repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/symlink_metadata.rs | tokio/src/fs/symlink_metadata.rs | use crate::fs::asyncify;
use std::fs::Metadata;
use std::io;
use std::path::Path;
/// Queries the file system metadata for a path.
///
/// This is an async version of [`std::fs::symlink_metadata`][std]
///
/// [std]: fn@std::fs::symlink_metadata
pub async fn symlink_metadata(path: impl AsRef<Path>) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
asyncify(|| std::fs::symlink_metadata(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/symlink_file.rs | tokio/src/fs/symlink_file.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new file symbolic link on the filesystem.
///
/// The `link` path will be a file symbolic link pointing to the `original`
/// path.
///
/// This is an async version of [`std::os::windows::fs::symlink_file`][std]
///
/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_file.html
pub async fn symlink_file(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::os::windows::fs::symlink_file(original, link)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/read_to_string.rs | tokio/src/fs/read_to_string.rs | use crate::fs::asyncify;
use std::{io, path::Path};
/// Creates a future which will open a file for reading and read the entire
/// contents into a string and return said string.
///
/// This is the async equivalent of [`std::fs::read_to_string`][std].
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
/// [std]: fn@std::fs::read_to_string
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let contents = fs::read_to_string("foo.txt").await?;
/// println!("foo.txt contains {} bytes", contents.len());
/// # Ok(())
/// # }
/// ```
pub async fn read_to_string(path: impl AsRef<Path>) -> io::Result<String> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::read_to_string(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/try_exists.rs | tokio/src/fs/try_exists.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Returns `Ok(true)` if the path points at an existing entity.
///
/// This function will traverse symbolic links to query information about the
/// destination file. In case of broken symbolic links this will return `Ok(false)`.
///
/// This is the async equivalent of [`std::path::Path::try_exists`][std].
///
/// [std]: fn@std::path::Path::try_exists
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// fs::try_exists("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
pub async fn try_exists(path: impl AsRef<Path>) -> io::Result<bool> {
let path = path.as_ref().to_owned();
asyncify(move || path.try_exists()).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/dir_builder.rs | tokio/src/fs/dir_builder.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// A builder for creating directories in various manners.
///
/// This is a specialized version of [`std::fs::DirBuilder`] for usage on
/// the Tokio runtime.
#[derive(Debug, Default)]
pub struct DirBuilder {
/// Indicates whether to create parent directories if they are missing.
recursive: bool,
/// Sets the Unix mode for newly created directories.
#[cfg(unix)]
pub(super) mode: Option<u32>,
}
impl DirBuilder {
/// Creates a new set of options with default mode/security settings for all
/// platforms and also non-recursive.
///
/// This is an async version of [`std::fs::DirBuilder::new`].
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::DirBuilder;
///
/// let builder = DirBuilder::new();
/// ```
pub fn new() -> Self {
DirBuilder::default()
}
/// Indicates whether to create directories recursively (including all parent directories).
/// Parents that do not exist are created with the same security and permissions settings.
///
/// This option defaults to `false`.
///
/// This is an async version of [`std::fs::DirBuilder::recursive`].
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::DirBuilder;
///
/// let mut builder = DirBuilder::new();
/// builder.recursive(true);
/// ```
pub fn recursive(&mut self, recursive: bool) -> &mut Self {
self.recursive = recursive;
self
}
/// Creates the specified directory with the configured options.
///
/// It is considered an error if the directory already exists unless
/// recursive mode is enabled.
///
/// This is an async version of [`std::fs::DirBuilder::create`].
///
/// # Errors
///
/// An error will be returned under the following circumstances:
///
/// * Path already points to an existing file.
/// * Path already points to an existing directory and the mode is
/// non-recursive.
/// * The calling process doesn't have permissions to create the directory
/// or its missing parents.
/// * Other I/O error occurred.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::DirBuilder;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// DirBuilder::new()
/// .recursive(true)
/// .create("/tmp/foo/bar/baz")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub async fn create(&self, path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
let mut builder = std::fs::DirBuilder::new();
builder.recursive(self.recursive);
#[cfg(unix)]
{
if let Some(mode) = self.mode {
std::os::unix::fs::DirBuilderExt::mode(&mut builder, mode);
}
}
asyncify(move || builder.create(path)).await
}
}
feature! {
#![unix]
impl DirBuilder {
/// Sets the mode to create new directories with.
///
/// This option defaults to 0o777.
///
/// # Examples
///
///
/// ```no_run
/// use tokio::fs::DirBuilder;
///
/// let mut builder = DirBuilder::new();
/// builder.mode(0o775);
/// ```
pub fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = Some(mode);
self
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/open_options.rs | tokio/src/fs/open_options.rs | use crate::fs::{asyncify, File};
use std::io;
use std::path::Path;
cfg_io_uring! {
mod uring_open_options;
pub(crate) use uring_open_options::UringOpenOptions;
use crate::runtime::driver::op::Op;
}
#[cfg(test)]
mod mock_open_options;
#[cfg(test)]
use mock_open_options::MockOpenOptions as StdOpenOptions;
#[cfg(not(test))]
use std::fs::OpenOptions as StdOpenOptions;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
#[cfg(windows)]
use std::os::windows::fs::OpenOptionsExt;
/// Options and flags which can be used to configure how a file is opened.
///
/// This builder exposes the ability to configure how a [`File`] is opened and
/// what operations are permitted on the open file. The [`File::open`] and
/// [`File::create`] methods are aliases for commonly used options using this
/// builder.
///
/// Generally speaking, when using `OpenOptions`, you'll first call [`new`],
/// then chain calls to methods to set each option, then call [`open`], passing
/// the path of the file you're trying to open. This will give you a
/// [`io::Result`] with a [`File`] inside that you can further operate
/// on.
///
/// This is a specialized version of [`std::fs::OpenOptions`] for usage from
/// the Tokio runtime.
///
/// `From<std::fs::OpenOptions>` is implemented for more advanced configuration
/// than the methods provided here.
///
/// [`new`]: OpenOptions::new
/// [`open`]: OpenOptions::open
/// [`File`]: File
/// [`File::open`]: File::open
/// [`File::create`]: File::create
///
/// # Examples
///
/// Opening a file to read:
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .read(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
///
/// Opening a file for both reading and writing, as well as creating it if it
/// doesn't exist:
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .read(true)
/// .write(true)
/// .create(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
#[derive(Clone, Debug)]
pub struct OpenOptions {
inner: Kind,
}
#[derive(Debug, Clone)]
enum Kind {
Std(StdOpenOptions),
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Uring(UringOpenOptions),
}
impl OpenOptions {
/// Creates a blank new set of options ready for configuration.
///
/// All options are initially set to `false`.
///
/// This is an async version of [`std::fs::OpenOptions::new`][std]
///
/// [std]: std::fs::OpenOptions::new
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
///
/// let mut options = OpenOptions::new();
/// let future = options.read(true).open("foo.txt");
/// ```
pub fn new() -> OpenOptions {
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
let inner = Kind::Uring(UringOpenOptions::new());
#[cfg(not(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
)))]
let inner = Kind::Std(StdOpenOptions::new());
OpenOptions { inner }
}
/// Sets the option for read access.
///
/// This option, when true, will indicate that the file should be
/// `read`-able if opened.
///
/// This is an async version of [`std::fs::OpenOptions::read`][std]
///
/// [std]: std::fs::OpenOptions::read
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .read(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.read(read);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.read(read);
}
}
self
}
/// Sets the option for write access.
///
/// This option, when true, will indicate that the file should be
/// `write`-able if opened.
///
/// This is an async version of [`std::fs::OpenOptions::write`][std]
///
/// [std]: std::fs::OpenOptions::write
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.write(write);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.write(write);
}
}
self
}
/// Sets the option for the append mode.
///
/// This option, when true, means that writes will append to a file instead
/// of overwriting previous contents. Note that setting
/// `.write(true).append(true)` has the same effect as setting only
/// `.append(true)`.
///
/// For most filesystems, the operating system guarantees that all writes are
/// atomic: no writes get mangled because another process writes at the same
/// time.
///
/// One maybe obvious note when using append-mode: make sure that all data
/// that belongs together is written to the file in one operation. This
/// can be done by concatenating strings before passing them to [`write()`],
/// or using a buffered writer (with a buffer of adequate size),
/// and calling [`flush()`] when the message is complete.
///
/// If a file is opened with both read and append access, beware that after
/// opening, and after every write, the position for reading may be set at the
/// end of the file. So, before writing, save the current position (using
/// [`seek`]`(`[`SeekFrom`]`::`[`Current`]`(0))`), and restore it before the next read.
///
/// This is an async version of [`std::fs::OpenOptions::append`][std]
///
/// [std]: std::fs::OpenOptions::append
///
/// ## Note
///
/// This function doesn't create the file if it doesn't exist. Use the [`create`]
/// method to do so.
///
/// [`write()`]: crate::io::AsyncWriteExt::write
/// [`flush()`]: crate::io::AsyncWriteExt::flush
/// [`seek`]: crate::io::AsyncSeekExt::seek
/// [`SeekFrom`]: std::io::SeekFrom
/// [`Current`]: std::io::SeekFrom::Current
/// [`create`]: OpenOptions::create
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .append(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.append(append);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.append(append);
}
}
self
}
/// Sets the option for truncating a previous file.
///
/// If a file is successfully opened with this option set it will truncate
/// the file to 0 length if it already exists.
///
/// The file must be opened with write access for truncate to work.
///
/// This is an async version of [`std::fs::OpenOptions::truncate`][std]
///
/// [std]: std::fs::OpenOptions::truncate
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .truncate(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.truncate(truncate);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.truncate(truncate);
}
}
self
}
/// Sets the option for creating a new file.
///
/// This option indicates whether a new file will be created if the file
/// does not yet already exist.
///
/// In order for the file to be created, [`write`] or [`append`] access must
/// be used.
///
/// This is an async version of [`std::fs::OpenOptions::create`][std]
///
/// [std]: std::fs::OpenOptions::create
/// [`write`]: OpenOptions::write
/// [`append`]: OpenOptions::append
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.create(create);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.create(create);
}
}
self
}
/// Sets the option to always create a new file.
///
/// This option indicates whether a new file will be created. No file is
/// allowed to exist at the target location, also no (dangling) symlink.
///
/// This option is useful because it is atomic. Otherwise between checking
/// whether a file exists and creating a new one, the file may have been
/// created by another process (a TOCTOU race condition / attack).
///
/// If `.create_new(true)` is set, [`.create()`] and [`.truncate()`] are
/// ignored.
///
/// The file must be opened with write or append access in order to create a
/// new file.
///
/// This is an async version of [`std::fs::OpenOptions::create_new`][std]
///
/// [std]: std::fs::OpenOptions::create_new
/// [`.create()`]: OpenOptions::create
/// [`.truncate()`]: OpenOptions::truncate
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create_new(true)
/// .open("foo.txt")
/// .await?;
///
/// Ok(())
/// }
/// ```
pub fn create_new(&mut self, create_new: bool) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.create_new(create_new);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.create_new(create_new);
}
}
self
}
/// Opens a file at `path` with the options specified by `self`.
///
/// This is an async version of [`std::fs::OpenOptions::open`][std]
///
/// [std]: std::fs::OpenOptions::open
///
/// # Errors
///
/// This function will return an error under a number of different
/// circumstances. Some of these error conditions are listed here, together
/// with their [`ErrorKind`]. The mapping to [`ErrorKind`]s is not part of
/// the compatibility contract of the function, especially the `Other` kind
/// might change to more specific kinds in the future.
///
/// * [`NotFound`]: The specified file does not exist and neither `create`
/// or `create_new` is set.
/// * [`NotFound`]: One of the directory components of the file path does
/// not exist.
/// * [`PermissionDenied`]: The user lacks permission to get the specified
/// access rights for the file.
/// * [`PermissionDenied`]: The user lacks permission to open one of the
/// directory components of the specified path.
/// * [`AlreadyExists`]: `create_new` was specified and the file already
/// exists.
/// * [`InvalidInput`]: Invalid combinations of open options (truncate
/// without write access, no access mode set, etc.).
/// * [`Other`]: One of the directory components of the specified file path
/// was not, in fact, a directory.
/// * [`Other`]: Filesystem-level errors: full disk, write permission
/// requested on a read-only file system, exceeded disk quota, too many
/// open files, too long filename, too many symbolic links in the
/// specified path (Unix-like systems only), etc.
///
/// # io_uring support
///
/// On Linux, you can also use `io_uring` for executing system calls.
/// To enable `io_uring`, you need to specify the `--cfg tokio_unstable`
/// flag at compile time, enable the `io-uring` cargo feature, and set the
/// `Builder::enable_io_uring` runtime option.
///
/// Support for `io_uring` is currently experimental, so its behavior may
/// change or it may be removed in future versions.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let file = OpenOptions::new().open("foo.txt").await?;
/// Ok(())
/// }
/// ```
///
/// [`ErrorKind`]: std::io::ErrorKind
/// [`AlreadyExists`]: std::io::ErrorKind::AlreadyExists
/// [`InvalidInput`]: std::io::ErrorKind::InvalidInput
/// [`NotFound`]: std::io::ErrorKind::NotFound
/// [`Other`]: std::io::ErrorKind::Other
/// [`PermissionDenied`]: std::io::ErrorKind::PermissionDenied
pub async fn open(&self, path: impl AsRef<Path>) -> io::Result<File> {
match &self.inner {
Kind::Std(opts) => Self::std_open(opts, path).await,
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
let handle = crate::runtime::Handle::current();
let driver_handle = handle.inner.driver().io();
if driver_handle.check_and_init(io_uring::opcode::OpenAt::CODE)? {
Op::open(path.as_ref(), opts)?.await
} else {
let opts = opts.clone().into();
Self::std_open(&opts, path).await
}
}
}
}
async fn std_open(opts: &StdOpenOptions, path: impl AsRef<Path>) -> io::Result<File> {
let path = path.as_ref().to_owned();
let opts = opts.clone();
let std = asyncify(move || opts.open(path)).await?;
Ok(File::from_std(std))
}
#[cfg(windows)]
pub(super) fn as_inner_mut(&mut self) -> &mut StdOpenOptions {
match &mut self.inner {
Kind::Std(ref mut opts) => opts,
}
}
}
feature! {
#![unix]
impl OpenOptions {
/// Sets the mode bits that a new file will be created with.
///
/// If a new file is created as part of an `OpenOptions::open` call then this
/// specified `mode` will be used as the permission bits for the new file.
/// If no `mode` is set, the default of `0o666` will be used.
/// The operating system masks out bits with the system's `umask`, to produce
/// the final permissions.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut options = OpenOptions::new();
/// options.mode(0o644); // Give read/write for owner and read for others.
/// let file = options.open("foo.txt").await?;
///
/// Ok(())
/// }
/// ```
pub fn mode(&mut self, mode: u32) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.mode(mode);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.mode(mode);
}
}
self
}
/// Passes custom flags to the `flags` argument of `open`.
///
/// The bits that define the access mode are masked out with `O_ACCMODE`, to
/// ensure they do not interfere with the access mode set by Rusts options.
///
/// Custom flags can only set flags, not remove flags set by Rusts options.
/// This options overwrites any previously set custom flags.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// let mut options = OpenOptions::new();
/// options.write(true);
/// if cfg!(unix) {
/// options.custom_flags(libc::O_NOFOLLOW);
/// }
/// let file = options.open("foo.txt").await?;
///
/// Ok(())
/// }
/// ```
pub fn custom_flags(&mut self, flags: i32) -> &mut OpenOptions {
match &mut self.inner {
Kind::Std(opts) => {
opts.custom_flags(flags);
}
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
Kind::Uring(opts) => {
opts.custom_flags(flags);
}
}
self
}
}
}
cfg_windows! {
impl OpenOptions {
/// Overrides the `dwDesiredAccess` argument to the call to [`CreateFile`]
/// with the specified value.
///
/// This will override the `read`, `write`, and `append` flags on the
/// `OpenOptions` structure. This method provides fine-grained control over
/// the permissions to read, write and append data, attributes (like hidden
/// and system), and extended attributes.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// // Open without read and write permission, for example if you only need
/// // to call `stat` on the file
/// let file = OpenOptions::new().access_mode(0).open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
pub fn access_mode(&mut self, access: u32) -> &mut OpenOptions {
self.as_inner_mut().access_mode(access);
self
}
/// Overrides the `dwShareMode` argument to the call to [`CreateFile`] with
/// the specified value.
///
/// By default `share_mode` is set to
/// `FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE`. This allows
/// other processes to read, write, and delete/rename the same file
/// while it is open. Removing any of the flags will prevent other
/// processes from performing the corresponding operation until the file
/// handle is closed.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// // Do not allow others to read or modify this file while we have it open
/// // for writing.
/// let file = OpenOptions::new()
/// .write(true)
/// .share_mode(0)
/// .open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
pub fn share_mode(&mut self, share: u32) -> &mut OpenOptions {
self.as_inner_mut().share_mode(share);
self
}
/// Sets extra flags for the `dwFileFlags` argument to the call to
/// [`CreateFile2`] to the specified value (or combines it with
/// `attributes` and `security_qos_flags` to set the `dwFlagsAndAttributes`
/// for [`CreateFile`]).
///
/// Custom flags can only set flags, not remove flags set by Rust's options.
/// This option overwrites any previously set custom flags.
///
/// # Examples
///
/// ```no_run
/// use windows_sys::Win32::Storage::FileSystem::FILE_FLAG_DELETE_ON_CLOSE;
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let file = OpenOptions::new()
/// .create(true)
/// .write(true)
/// .custom_flags(FILE_FLAG_DELETE_ON_CLOSE)
/// .open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
/// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
pub fn custom_flags(&mut self, flags: u32) -> &mut OpenOptions {
self.as_inner_mut().custom_flags(flags);
self
}
/// Sets the `dwFileAttributes` argument to the call to [`CreateFile2`] to
/// the specified value (or combines it with `custom_flags` and
/// `security_qos_flags` to set the `dwFlagsAndAttributes` for
/// [`CreateFile`]).
///
/// If a _new_ file is created because it does not yet exist and
/// `.create(true)` or `.create_new(true)` are specified, the new file is
/// given the attributes declared with `.attributes()`.
///
/// If an _existing_ file is opened with `.create(true).truncate(true)`, its
/// existing attributes are preserved and combined with the ones declared
/// with `.attributes()`.
///
/// In all other cases the attributes get ignored.
///
/// # Examples
///
/// ```no_run
/// use windows_sys::Win32::Storage::FileSystem::FILE_ATTRIBUTE_HIDDEN;
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create(true)
/// .attributes(FILE_ATTRIBUTE_HIDDEN)
/// .open("foo.txt").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
/// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
pub fn attributes(&mut self, attributes: u32) -> &mut OpenOptions {
self.as_inner_mut().attributes(attributes);
self
}
/// Sets the `dwSecurityQosFlags` argument to the call to [`CreateFile2`] to
/// the specified value (or combines it with `custom_flags` and `attributes`
/// to set the `dwFlagsAndAttributes` for [`CreateFile`]).
///
/// By default `security_qos_flags` is not set. It should be specified when
/// opening a named pipe, to control to which degree a server process can
/// act on behalf of a client process (security impersonation level).
///
/// When `security_qos_flags` is not set, a malicious program can gain the
/// elevated privileges of a privileged Rust process when it allows opening
/// user-specified paths, by tricking it into opening a named pipe. So
/// arguably `security_qos_flags` should also be set when opening arbitrary
/// paths. However the bits can then conflict with other flags, specifically
/// `FILE_FLAG_OPEN_NO_RECALL`.
///
/// For information about possible values, see [Impersonation Levels] on the
/// Windows Dev Center site. The `SECURITY_SQOS_PRESENT` flag is set
/// automatically when using this method.
///
/// # Examples
///
/// ```no_run
/// use windows_sys::Win32::Storage::FileSystem::SECURITY_IDENTIFICATION;
/// use tokio::fs::OpenOptions;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let file = OpenOptions::new()
/// .write(true)
/// .create(true)
///
/// // Sets the flag value to `SecurityIdentification`.
/// .security_qos_flags(SECURITY_IDENTIFICATION)
///
/// .open(r"\\.\pipe\MyPipe").await?;
/// # Ok(())
/// # }
/// ```
///
/// [`CreateFile`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea
/// [`CreateFile2`]: https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfile2
/// [Impersonation Levels]:
/// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
pub fn security_qos_flags(&mut self, flags: u32) -> &mut OpenOptions {
self.as_inner_mut().security_qos_flags(flags);
self
}
}
}
impl From<StdOpenOptions> for OpenOptions {
fn from(options: StdOpenOptions) -> OpenOptions {
OpenOptions {
inner: Kind::Std(options),
// TODO: Add support for converting `StdOpenOptions` to `UringOpenOptions`
// if user enables `io-uring` cargo feature. It is blocked by:
// * https://github.com/rust-lang/rust/issues/74943
// * https://github.com/rust-lang/rust/issues/76801
}
}
}
impl Default for OpenOptions {
fn default() -> Self {
Self::new()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/remove_dir_all.rs | tokio/src/fs/remove_dir_all.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Removes a directory at this path, after removing all its contents. Use carefully!
///
/// This is an async version of [`std::fs::remove_dir_all`][std]
///
/// [std]: fn@std::fs::remove_dir_all
pub async fn remove_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::remove_dir_all(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/file.rs | tokio/src/fs/file.rs | //! Types for working with [`File`].
//!
//! [`File`]: File
use crate::fs::{asyncify, OpenOptions};
use crate::io::blocking::{Buf, DEFAULT_MAX_BUF_SIZE};
use crate::io::{AsyncRead, AsyncSeek, AsyncWrite, ReadBuf};
use crate::sync::Mutex;
use std::cmp;
use std::fmt;
use std::fs::{Metadata, Permissions};
use std::future::Future;
use std::io::{self, Seek, SeekFrom};
use std::path::Path;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{ready, Context, Poll};
#[cfg(test)]
use super::mocks::JoinHandle;
#[cfg(test)]
use super::mocks::MockFile as StdFile;
#[cfg(test)]
use super::mocks::{spawn_blocking, spawn_mandatory_blocking};
#[cfg(not(test))]
use crate::blocking::JoinHandle;
#[cfg(not(test))]
use crate::blocking::{spawn_blocking, spawn_mandatory_blocking};
#[cfg(not(test))]
use std::fs::File as StdFile;
/// A reference to an open file on the filesystem.
///
/// This is a specialized version of [`std::fs::File`] for usage from the
/// Tokio runtime.
///
/// An instance of a `File` can be read and/or written depending on what options
/// it was opened with. Files also implement [`AsyncSeek`] to alter the logical
/// cursor that the file contains internally.
///
/// A file will not be closed immediately when it goes out of scope if there
/// are any IO operations that have not yet completed. To ensure that a file is
/// closed immediately when it is dropped, you should call [`flush`] before
/// dropping it. Note that this does not ensure that the file has been fully
/// written to disk; the operating system might keep the changes around in an
/// in-memory buffer. See the [`sync_all`] method for telling the OS to write
/// the data to disk.
///
/// Reading and writing to a `File` is usually done using the convenience
/// methods found on the [`AsyncReadExt`] and [`AsyncWriteExt`] traits.
///
/// [`AsyncSeek`]: trait@crate::io::AsyncSeek
/// [`flush`]: fn@crate::io::AsyncWriteExt::flush
/// [`sync_all`]: fn@crate::fs::File::sync_all
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
///
/// # Examples
///
/// Create a new file and asynchronously write bytes to it:
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt; // for write_all()
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// # Ok(())
/// # }
/// ```
///
/// Read the contents of a file into a buffer:
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncReadExt; // for read_to_end()
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
///
/// let mut contents = vec![];
/// file.read_to_end(&mut contents).await?;
///
/// println!("len = {}", contents.len());
/// # Ok(())
/// # }
/// ```
pub struct File {
std: Arc<StdFile>,
inner: Mutex<Inner>,
max_buf_size: usize,
}
struct Inner {
state: State,
/// Errors from writes/flushes are returned in write/flush calls. If a write
/// error is observed while performing a read, it is saved until the next
/// write / flush call.
last_write_err: Option<io::ErrorKind>,
pos: u64,
}
#[derive(Debug)]
enum State {
Idle(Option<Buf>),
Busy(JoinHandle<(Operation, Buf)>),
}
#[derive(Debug)]
enum Operation {
Read(io::Result<usize>),
Write(io::Result<()>),
Seek(io::Result<u64>),
}
impl File {
/// Attempts to open a file in read-only mode.
///
/// See [`OpenOptions`] for more details.
///
/// # Errors
///
/// This function will return an error if called from outside of the Tokio
/// runtime or if path does not already exist. Other errors may also be
/// returned according to `OpenOptions::open`.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncReadExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
///
/// let mut contents = vec![];
/// file.read_to_end(&mut contents).await?;
///
/// println!("len = {}", contents.len());
/// # Ok(())
/// # }
/// ```
///
/// The [`read_to_end`] method is defined on the [`AsyncReadExt`] trait.
///
/// [`read_to_end`]: fn@crate::io::AsyncReadExt::read_to_end
/// [`AsyncReadExt`]: trait@crate::io::AsyncReadExt
pub async fn open(path: impl AsRef<Path>) -> io::Result<File> {
Self::options().read(true).open(path).await
}
/// Opens a file in write-only mode.
///
/// This function will create a file if it does not exist, and will truncate
/// it if it does.
///
/// See [`OpenOptions`] for more details.
///
/// # Errors
///
/// Results in an error if called from outside of the Tokio runtime or if
/// the underlying [`create`] call results in an error.
///
/// [`create`]: std::fs::File::create
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn create(path: impl AsRef<Path>) -> io::Result<File> {
Self::options()
.write(true)
.create(true)
.truncate(true)
.open(path)
.await
}
/// Opens a file in read-write mode.
///
/// This function will create a file if it does not exist, or return an error
/// if it does. This way, if the call succeeds, the file returned is guaranteed
/// to be new.
///
/// This option is useful because it is atomic. Otherwise between checking
/// whether a file exists and creating a new one, the file may have been
/// created by another process (a TOCTOU race condition / attack).
///
/// This can also be written using `File::options().read(true).write(true).create_new(true).open(...)`.
///
/// See [`OpenOptions`] for more details.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create_new("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn create_new<P: AsRef<Path>>(path: P) -> std::io::Result<File> {
Self::options()
.read(true)
.write(true)
.create_new(true)
.open(path)
.await
}
/// Returns a new [`OpenOptions`] object.
///
/// This function returns a new `OpenOptions` object that you can use to
/// open or create a file with specific options if `open()` or `create()`
/// are not appropriate.
///
/// It is equivalent to `OpenOptions::new()`, but allows you to write more
/// readable code. Instead of
/// `OpenOptions::new().append(true).open("example.log")`,
/// you can write `File::options().append(true).open("example.log")`. This
/// also avoids the need to import `OpenOptions`.
///
/// See the [`OpenOptions::new`] function for more details.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut f = File::options().append(true).open("example.log").await?;
/// f.write_all(b"new line\n").await?;
/// # Ok(())
/// # }
/// ```
#[must_use]
pub fn options() -> OpenOptions {
OpenOptions::new()
}
/// Converts a [`std::fs::File`] to a [`tokio::fs::File`](File).
///
/// # Examples
///
/// ```no_run
/// // This line could block. It is not recommended to do this on the Tokio
/// // runtime.
/// let std_file = std::fs::File::open("foo.txt").unwrap();
/// let file = tokio::fs::File::from_std(std_file);
/// ```
pub fn from_std(std: StdFile) -> File {
File {
std: Arc::new(std),
inner: Mutex::new(Inner {
state: State::Idle(Some(Buf::with_capacity(0))),
last_write_err: None,
pos: 0,
}),
max_buf_size: DEFAULT_MAX_BUF_SIZE,
}
}
/// Attempts to sync all OS-internal metadata to disk.
///
/// This function will attempt to ensure that all in-core data reaches the
/// filesystem before returning.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// file.sync_all().await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn sync_all(&self) -> io::Result<()> {
let mut inner = self.inner.lock().await;
inner.complete_inflight().await;
let std = self.std.clone();
asyncify(move || std.sync_all()).await
}
/// This function is similar to `sync_all`, except that it may not
/// synchronize file metadata to the filesystem.
///
/// This is intended for use cases that must synchronize content, but don't
/// need the metadata on disk. The goal of this method is to reduce disk
/// operations.
///
/// Note that some platforms may simply implement this in terms of `sync_all`.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// file.sync_data().await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn sync_data(&self) -> io::Result<()> {
let mut inner = self.inner.lock().await;
inner.complete_inflight().await;
let std = self.std.clone();
asyncify(move || std.sync_data()).await
}
/// Truncates or extends the underlying file, updating the size of this file to become size.
///
/// If the size is less than the current file's size, then the file will be
/// shrunk. If it is greater than the current file's size, then the file
/// will be extended to size and have all of the intermediate data filled in
/// with 0s.
///
/// # Errors
///
/// This function will return an error if the file is not opened for
/// writing.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::create("foo.txt").await?;
/// file.write_all(b"hello, world!").await?;
/// file.set_len(10).await?;
/// # Ok(())
/// # }
/// ```
///
/// The [`write_all`] method is defined on the [`AsyncWriteExt`] trait.
///
/// [`write_all`]: fn@crate::io::AsyncWriteExt::write_all
/// [`AsyncWriteExt`]: trait@crate::io::AsyncWriteExt
pub async fn set_len(&self, size: u64) -> io::Result<()> {
let mut inner = self.inner.lock().await;
inner.complete_inflight().await;
let mut buf = match inner.state {
State::Idle(ref mut buf_cell) => buf_cell.take().unwrap(),
_ => unreachable!(),
};
let seek = if !buf.is_empty() {
Some(SeekFrom::Current(buf.discard_read()))
} else {
None
};
let std = self.std.clone();
inner.state = State::Busy(spawn_blocking(move || {
let res = if let Some(seek) = seek {
(&*std).seek(seek).and_then(|_| std.set_len(size))
} else {
std.set_len(size)
}
.map(|()| 0); // the value is discarded later
// Return the result as a seek
(Operation::Seek(res), buf)
}));
let (op, buf) = match inner.state {
State::Idle(_) => unreachable!(),
State::Busy(ref mut rx) => rx.await?,
};
inner.state = State::Idle(Some(buf));
match op {
Operation::Seek(res) => res.map(|pos| {
inner.pos = pos;
}),
_ => unreachable!(),
}
}
/// Queries metadata about the underlying file.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let file = File::open("foo.txt").await?;
/// let metadata = file.metadata().await?;
///
/// println!("{:?}", metadata);
/// # Ok(())
/// # }
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let std = self.std.clone();
asyncify(move || std.metadata()).await
}
/// Creates a new `File` instance that shares the same underlying file handle
/// as the existing `File` instance. Reads, writes, and seeks will affect both
/// File instances simultaneously.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let file = File::open("foo.txt").await?;
/// let file_clone = file.try_clone().await?;
/// # Ok(())
/// # }
/// ```
pub async fn try_clone(&self) -> io::Result<File> {
self.inner.lock().await.complete_inflight().await;
let std = self.std.clone();
let std_file = asyncify(move || std.try_clone()).await?;
let mut file = File::from_std(std_file);
file.set_max_buf_size(self.max_buf_size);
Ok(file)
}
/// Destructures `File` into a [`std::fs::File`]. This function is
/// async to allow any in-flight operations to complete.
///
/// Use `File::try_into_std` to attempt conversion immediately.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let tokio_file = File::open("foo.txt").await?;
/// let std_file = tokio_file.into_std().await;
/// # Ok(())
/// # }
/// ```
pub async fn into_std(mut self) -> StdFile {
self.inner.get_mut().complete_inflight().await;
Arc::try_unwrap(self.std).expect("Arc::try_unwrap failed")
}
/// Tries to immediately destructure `File` into a [`std::fs::File`].
///
/// # Errors
///
/// This function will return an error containing the file if some
/// operation is in-flight.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let tokio_file = File::open("foo.txt").await?;
/// let std_file = tokio_file.try_into_std().unwrap();
/// # Ok(())
/// # }
/// ```
#[allow(clippy::result_large_err)]
pub fn try_into_std(mut self) -> Result<StdFile, Self> {
match Arc::try_unwrap(self.std) {
Ok(file) => Ok(file),
Err(std_file_arc) => {
self.std = std_file_arc;
Err(self)
}
}
}
/// Changes the permissions on the underlying file.
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `fchmod` function on Unix and
/// the `SetFileInformationByHandle` function on Windows. Note that, this
/// [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error if the user lacks permission change
/// attributes on the underlying file. It may also return an error in other
/// os-specific unspecified cases.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
///
/// # async fn dox() -> std::io::Result<()> {
/// let file = File::open("foo.txt").await?;
/// let mut perms = file.metadata().await?.permissions();
/// perms.set_readonly(true);
/// file.set_permissions(perms).await?;
/// # Ok(())
/// # }
/// ```
pub async fn set_permissions(&self, perm: Permissions) -> io::Result<()> {
let std = self.std.clone();
asyncify(move || std.set_permissions(perm)).await
}
/// Set the maximum buffer size for the underlying [`AsyncRead`] / [`AsyncWrite`] operation.
///
/// Although Tokio uses a sensible default value for this buffer size, this function would be
/// useful for changing that default depending on the situation.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs::File;
/// use tokio::io::AsyncWriteExt;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut file = File::open("foo.txt").await?;
///
/// // Set maximum buffer size to 8 MiB
/// file.set_max_buf_size(8 * 1024 * 1024);
///
/// let mut buf = vec![1; 1024 * 1024 * 1024];
///
/// // Write the 1 GiB buffer in chunks up to 8 MiB each.
/// file.write_all(&mut buf).await?;
/// # Ok(())
/// # }
/// ```
pub fn set_max_buf_size(&mut self, max_buf_size: usize) {
self.max_buf_size = max_buf_size;
}
/// Get the maximum buffer size for the underlying [`AsyncRead`] / [`AsyncWrite`] operation.
pub fn max_buf_size(&self) -> usize {
self.max_buf_size
}
}
impl AsyncRead for File {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
dst: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
ready!(crate::trace::trace_leaf(cx));
let me = self.get_mut();
let inner = me.inner.get_mut();
loop {
match inner.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
if !buf.is_empty() || dst.remaining() == 0 {
buf.copy_to(dst);
*buf_cell = Some(buf);
return Poll::Ready(Ok(()));
}
let std = me.std.clone();
let max_buf_size = cmp::min(dst.remaining(), me.max_buf_size);
inner.state = State::Busy(spawn_blocking(move || {
// SAFETY: the `Read` implementation of `std` does not
// read from the buffer it is borrowing and correctly
// reports the length of the data written into the buffer.
let res = unsafe { buf.read_from(&mut &*std, max_buf_size) };
(Operation::Read(res), buf)
}));
}
State::Busy(ref mut rx) => {
let (op, mut buf) = ready!(Pin::new(rx).poll(cx))?;
match op {
Operation::Read(Ok(_)) => {
buf.copy_to(dst);
inner.state = State::Idle(Some(buf));
return Poll::Ready(Ok(()));
}
Operation::Read(Err(e)) => {
assert!(buf.is_empty());
inner.state = State::Idle(Some(buf));
return Poll::Ready(Err(e));
}
Operation::Write(Ok(())) => {
assert!(buf.is_empty());
inner.state = State::Idle(Some(buf));
continue;
}
Operation::Write(Err(e)) => {
assert!(inner.last_write_err.is_none());
inner.last_write_err = Some(e.kind());
inner.state = State::Idle(Some(buf));
}
Operation::Seek(result) => {
assert!(buf.is_empty());
inner.state = State::Idle(Some(buf));
if let Ok(pos) = result {
inner.pos = pos;
}
continue;
}
}
}
}
}
}
}
impl AsyncSeek for File {
fn start_seek(self: Pin<&mut Self>, mut pos: SeekFrom) -> io::Result<()> {
let me = self.get_mut();
let inner = me.inner.get_mut();
match inner.state {
State::Busy(_) => Err(io::Error::new(
io::ErrorKind::Other,
"other file operation is pending, call poll_complete before start_seek",
)),
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
// Factor in any unread data from the buf
if !buf.is_empty() {
let n = buf.discard_read();
if let SeekFrom::Current(ref mut offset) = pos {
*offset += n;
}
}
let std = me.std.clone();
inner.state = State::Busy(spawn_blocking(move || {
let res = (&*std).seek(pos);
(Operation::Seek(res), buf)
}));
Ok(())
}
}
}
fn poll_complete(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<u64>> {
ready!(crate::trace::trace_leaf(cx));
let inner = self.inner.get_mut();
loop {
match inner.state {
State::Idle(_) => return Poll::Ready(Ok(inner.pos)),
State::Busy(ref mut rx) => {
let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
inner.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => {}
Operation::Write(Err(e)) => {
assert!(inner.last_write_err.is_none());
inner.last_write_err = Some(e.kind());
}
Operation::Write(_) => {}
Operation::Seek(res) => {
if let Ok(pos) = res {
inner.pos = pos;
}
return Poll::Ready(res);
}
}
}
}
}
}
}
impl AsyncWrite for File {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
src: &[u8],
) -> Poll<io::Result<usize>> {
ready!(crate::trace::trace_leaf(cx));
let me = self.get_mut();
let inner = me.inner.get_mut();
if let Some(e) = inner.last_write_err.take() {
return Poll::Ready(Err(e.into()));
}
loop {
match inner.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
let seek = if !buf.is_empty() {
Some(SeekFrom::Current(buf.discard_read()))
} else {
None
};
let n = buf.copy_from(src, me.max_buf_size);
let std = me.std.clone();
let blocking_task_join_handle = spawn_mandatory_blocking(move || {
let res = if let Some(seek) = seek {
(&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std))
} else {
buf.write_to(&mut &*std)
};
(Operation::Write(res), buf)
})
.ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "background task failed")
})?;
inner.state = State::Busy(blocking_task_join_handle);
return Poll::Ready(Ok(n));
}
State::Busy(ref mut rx) => {
let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
inner.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => {
// We don't care about the result here. The fact
// that the cursor has advanced will be reflected in
// the next iteration of the loop
continue;
}
Operation::Write(res) => {
// If the previous write was successful, continue.
// Otherwise, error.
res?;
continue;
}
Operation::Seek(_) => {
// Ignore the seek
continue;
}
}
}
}
}
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
ready!(crate::trace::trace_leaf(cx));
let me = self.get_mut();
let inner = me.inner.get_mut();
if let Some(e) = inner.last_write_err.take() {
return Poll::Ready(Err(e.into()));
}
loop {
match inner.state {
State::Idle(ref mut buf_cell) => {
let mut buf = buf_cell.take().unwrap();
let seek = if !buf.is_empty() {
Some(SeekFrom::Current(buf.discard_read()))
} else {
None
};
let n = buf.copy_from_bufs(bufs, me.max_buf_size);
let std = me.std.clone();
let blocking_task_join_handle = spawn_mandatory_blocking(move || {
let res = if let Some(seek) = seek {
(&*std).seek(seek).and_then(|_| buf.write_to(&mut &*std))
} else {
buf.write_to(&mut &*std)
};
(Operation::Write(res), buf)
})
.ok_or_else(|| {
io::Error::new(io::ErrorKind::Other, "background task failed")
})?;
inner.state = State::Busy(blocking_task_join_handle);
return Poll::Ready(Ok(n));
}
State::Busy(ref mut rx) => {
let (op, buf) = ready!(Pin::new(rx).poll(cx))?;
inner.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => {
// We don't care about the result here. The fact
// that the cursor has advanced will be reflected in
// the next iteration of the loop
continue;
}
Operation::Write(res) => {
// If the previous write was successful, continue.
// Otherwise, error.
res?;
continue;
}
Operation::Seek(_) => {
// Ignore the seek
continue;
}
}
}
}
}
}
fn is_write_vectored(&self) -> bool {
true
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
let inner = self.inner.get_mut();
inner.poll_flush(cx)
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
ready!(crate::trace::trace_leaf(cx));
self.poll_flush(cx)
}
}
impl From<StdFile> for File {
fn from(std: StdFile) -> Self {
Self::from_std(std)
}
}
impl fmt::Debug for File {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("tokio::fs::File")
.field("std", &self.std)
.finish()
}
}
#[cfg(unix)]
impl std::os::unix::io::AsRawFd for File {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd {
self.std.as_raw_fd()
}
}
#[cfg(unix)]
impl std::os::unix::io::AsFd for File {
fn as_fd(&self) -> std::os::unix::io::BorrowedFd<'_> {
unsafe {
std::os::unix::io::BorrowedFd::borrow_raw(std::os::unix::io::AsRawFd::as_raw_fd(self))
}
}
}
#[cfg(unix)]
impl std::os::unix::io::FromRawFd for File {
unsafe fn from_raw_fd(fd: std::os::unix::io::RawFd) -> Self {
// Safety: exactly the same safety contract as
// `std::os::unix::io::FromRawFd::from_raw_fd`.
unsafe { StdFile::from_raw_fd(fd).into() }
}
}
cfg_windows! {
use crate::os::windows::io::{AsRawHandle, FromRawHandle, RawHandle, AsHandle, BorrowedHandle};
impl AsRawHandle for File {
fn as_raw_handle(&self) -> RawHandle {
self.std.as_raw_handle()
}
}
impl AsHandle for File {
fn as_handle(&self) -> BorrowedHandle<'_> {
unsafe {
BorrowedHandle::borrow_raw(
AsRawHandle::as_raw_handle(self),
)
}
}
}
impl FromRawHandle for File {
unsafe fn from_raw_handle(handle: RawHandle) -> Self {
// Safety: exactly the same safety contract as
// `FromRawHandle::from_raw_handle`.
unsafe { StdFile::from_raw_handle(handle).into() }
}
}
}
impl Inner {
async fn complete_inflight(&mut self) {
use std::future::poll_fn;
poll_fn(|cx| self.poll_complete_inflight(cx)).await;
}
fn poll_complete_inflight(&mut self, cx: &mut Context<'_>) -> Poll<()> {
ready!(crate::trace::trace_leaf(cx));
match self.poll_flush(cx) {
Poll::Ready(Err(e)) => {
self.last_write_err = Some(e.kind());
Poll::Ready(())
}
Poll::Ready(Ok(())) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
}
}
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
if let Some(e) = self.last_write_err.take() {
return Poll::Ready(Err(e.into()));
}
let (op, buf) = match self.state {
State::Idle(_) => return Poll::Ready(Ok(())),
State::Busy(ref mut rx) => ready!(Pin::new(rx).poll(cx))?,
};
// The buffer is not used here
self.state = State::Idle(Some(buf));
match op {
Operation::Read(_) => Poll::Ready(Ok(())),
Operation::Write(res) => Poll::Ready(res),
Operation::Seek(_) => Poll::Ready(Ok(())),
}
}
}
#[cfg(test)]
mod tests;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/copy.rs | tokio/src/fs/copy.rs | use crate::fs::asyncify;
use std::path::Path;
/// Copies the contents of one file to another. This function will also copy the permission bits
/// of the original file to the destination file.
/// This function will overwrite the contents of to.
///
/// This is the async equivalent of [`std::fs::copy`].
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// fs::copy("foo.txt", "bar.txt").await?;
/// # Ok(())
/// # }
/// ```
pub async fn copy(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<u64, std::io::Error> {
let from = from.as_ref().to_owned();
let to = to.as_ref().to_owned();
asyncify(|| std::fs::copy(from, to)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/mod.rs | tokio/src/fs/mod.rs | #![cfg(not(loom))]
//! Asynchronous file utilities.
//!
//! This module contains utility methods for working with the file system
//! asynchronously. This includes reading/writing to files, and working with
//! directories.
//!
//! Be aware that most operating systems do not provide asynchronous file system
//! APIs. Because of that, Tokio will use ordinary blocking file operations
//! behind the scenes. This is done using the [`spawn_blocking`] threadpool to
//! run them in the background.
//!
//! The `tokio::fs` module should only be used for ordinary files. Trying to use
//! it with e.g., a named pipe on Linux can result in surprising behavior,
//! such as hangs during runtime shutdown. For special files, you should use a
//! dedicated type such as [`tokio::net::unix::pipe`] or [`AsyncFd`] instead.
//!
//! Currently, Tokio will always use [`spawn_blocking`] on all platforms, but it
//! may be changed to use asynchronous file system APIs such as io_uring in the
//! future.
//!
//! # Usage
//!
//! The easiest way to use this module is to use the utility functions that
//! operate on entire files:
//!
//! * [`tokio::fs::read`](fn@crate::fs::read)
//! * [`tokio::fs::read_to_string`](fn@crate::fs::read_to_string)
//! * [`tokio::fs::write`](fn@crate::fs::write)
//!
//! The two `read` functions reads the entire file and returns its contents.
//! The `write` function takes the contents of the file and writes those
//! contents to the file. It overwrites the existing file, if any.
//!
//! For example, to read the file:
//!
//! ```
//! # async fn dox() -> std::io::Result<()> {
//! let contents = tokio::fs::read_to_string("my_file.txt").await?;
//!
//! println!("File has {} lines.", contents.lines().count());
//! # Ok(())
//! # }
//! ```
//!
//! To overwrite the file:
//!
//! ```
//! # async fn dox() -> std::io::Result<()> {
//! let contents = "First line.\nSecond line.\nThird line.\n";
//!
//! tokio::fs::write("my_file.txt", contents.as_bytes()).await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Using `File`
//!
//! The main type for interacting with files is [`File`]. It can be used to read
//! from and write to a given file. This is done using the [`AsyncRead`] and
//! [`AsyncWrite`] traits. This type is generally used when you want to do
//! something more complex than just reading or writing the entire contents in
//! one go.
//!
//! **Note:** It is important to use [`flush`] when writing to a Tokio
//! [`File`]. This is because calls to `write` will return before the write has
//! finished, and [`flush`] will wait for the write to finish. (The write will
//! happen even if you don't flush; it will just happen later.) This is
//! different from [`std::fs::File`], and is due to the fact that `File` uses
//! `spawn_blocking` behind the scenes.
//!
//! For example, to count the number of lines in a file without loading the
//! entire file into memory:
//!
//! ```no_run
//! use tokio::fs::File;
//! use tokio::io::AsyncReadExt;
//!
//! # async fn dox() -> std::io::Result<()> {
//! let mut file = File::open("my_file.txt").await?;
//!
//! let mut chunk = vec![0; 4096];
//! let mut number_of_lines = 0;
//! loop {
//! let len = file.read(&mut chunk).await?;
//! if len == 0 {
//! // Length of zero means end of file.
//! break;
//! }
//! for &b in &chunk[..len] {
//! if b == b'\n' {
//! number_of_lines += 1;
//! }
//! }
//! }
//!
//! println!("File has {} lines.", number_of_lines);
//! # Ok(())
//! # }
//! ```
//!
//! For example, to write a file line-by-line:
//!
//! ```no_run
//! use tokio::fs::File;
//! use tokio::io::AsyncWriteExt;
//!
//! # async fn dox() -> std::io::Result<()> {
//! let mut file = File::create("my_file.txt").await?;
//!
//! file.write_all(b"First line.\n").await?;
//! file.write_all(b"Second line.\n").await?;
//! file.write_all(b"Third line.\n").await?;
//!
//! // Remember to call `flush` after writing!
//! file.flush().await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Tuning your file IO
//!
//! Tokio's file uses [`spawn_blocking`] behind the scenes, and this has serious
//! performance consequences. To get good performance with file IO on Tokio, it
//! is recommended to batch your operations into as few `spawn_blocking` calls
//! as possible.
//!
//! One example of this difference can be seen by comparing the two reading
//! examples above. The first example uses [`tokio::fs::read`], which reads the
//! entire file in a single `spawn_blocking` call, and then returns it. The
//! second example will read the file in chunks using many `spawn_blocking`
//! calls. This means that the second example will most likely be more expensive
//! for large files. (Of course, using chunks may be necessary for very large
//! files that don't fit in memory.)
//!
//! The following examples will show some strategies for this:
//!
//! When creating a file, write the data to a `String` or `Vec<u8>` and then
//! write the entire file in a single `spawn_blocking` call with
//! `tokio::fs::write`.
//!
//! ```no_run
//! # async fn dox() -> std::io::Result<()> {
//! let mut contents = String::new();
//!
//! contents.push_str("First line.\n");
//! contents.push_str("Second line.\n");
//! contents.push_str("Third line.\n");
//!
//! tokio::fs::write("my_file.txt", contents.as_bytes()).await?;
//! # Ok(())
//! # }
//! ```
//!
//! Use [`BufReader`] and [`BufWriter`] to buffer many small reads or writes
//! into a few large ones. This example will most likely only perform one
//! `spawn_blocking` call.
//!
//! ```no_run
//! use tokio::fs::File;
//! use tokio::io::{AsyncWriteExt, BufWriter};
//!
//! # async fn dox() -> std::io::Result<()> {
//! let mut file = BufWriter::new(File::create("my_file.txt").await?);
//!
//! file.write_all(b"First line.\n").await?;
//! file.write_all(b"Second line.\n").await?;
//! file.write_all(b"Third line.\n").await?;
//!
//! // Due to the BufWriter, the actual write and spawn_blocking
//! // call happens when you flush.
//! file.flush().await?;
//! # Ok(())
//! # }
//! ```
//!
//! Manually use [`std::fs`] inside [`spawn_blocking`].
//!
//! ```no_run
//! use std::fs::File;
//! use std::io::{self, Write};
//! use tokio::task::spawn_blocking;
//!
//! # async fn dox() -> std::io::Result<()> {
//! spawn_blocking(move || {
//! let mut file = File::create("my_file.txt")?;
//!
//! file.write_all(b"First line.\n")?;
//! file.write_all(b"Second line.\n")?;
//! file.write_all(b"Third line.\n")?;
//!
//! // Unlike Tokio's file, the std::fs file does
//! // not need flush.
//!
//! io::Result::Ok(())
//! }).await.unwrap()?;
//! # Ok(())
//! # }
//! ```
//!
//! It's also good to be aware of [`File::set_max_buf_size`], which controls the
//! maximum amount of bytes that Tokio's [`File`] will read or write in a single
//! [`spawn_blocking`] call. The default is two megabytes, but this is subject
//! to change.
//!
//! [`spawn_blocking`]: fn@crate::task::spawn_blocking
//! [`AsyncRead`]: trait@crate::io::AsyncRead
//! [`AsyncWrite`]: trait@crate::io::AsyncWrite
//! [`BufReader`]: struct@crate::io::BufReader
//! [`BufWriter`]: struct@crate::io::BufWriter
//! [`tokio::net::unix::pipe`]: crate::net::unix::pipe
//! [`AsyncFd`]: crate::io::unix::AsyncFd
//! [`flush`]: crate::io::AsyncWriteExt::flush
//! [`tokio::fs::read`]: fn@crate::fs::read
mod canonicalize;
pub use self::canonicalize::canonicalize;
mod create_dir;
pub use self::create_dir::create_dir;
mod create_dir_all;
pub use self::create_dir_all::create_dir_all;
mod dir_builder;
pub use self::dir_builder::DirBuilder;
mod file;
pub use self::file::File;
mod hard_link;
pub use self::hard_link::hard_link;
mod metadata;
pub use self::metadata::metadata;
mod open_options;
pub use self::open_options::OpenOptions;
mod read;
pub use self::read::read;
mod read_dir;
pub use self::read_dir::{read_dir, DirEntry, ReadDir};
mod read_link;
pub use self::read_link::read_link;
mod read_to_string;
pub use self::read_to_string::read_to_string;
mod remove_dir;
pub use self::remove_dir::remove_dir;
mod remove_dir_all;
pub use self::remove_dir_all::remove_dir_all;
mod remove_file;
pub use self::remove_file::remove_file;
mod rename;
pub use self::rename::rename;
mod set_permissions;
pub use self::set_permissions::set_permissions;
mod symlink_metadata;
pub use self::symlink_metadata::symlink_metadata;
mod write;
pub use self::write::write;
mod copy;
pub use self::copy::copy;
mod try_exists;
pub use self::try_exists::try_exists;
#[cfg(test)]
mod mocks;
feature! {
#![unix]
mod symlink;
pub use self::symlink::symlink;
}
cfg_windows! {
mod symlink_dir;
pub use self::symlink_dir::symlink_dir;
mod symlink_file;
pub use self::symlink_file::symlink_file;
}
cfg_io_uring! {
pub(crate) mod read_uring;
pub(crate) use self::read_uring::read_uring;
pub(crate) use self::open_options::UringOpenOptions;
}
use std::io;
#[cfg(not(test))]
use crate::blocking::spawn_blocking;
#[cfg(test)]
use mocks::spawn_blocking;
pub(crate) async fn asyncify<F, T>(f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T> + Send + 'static,
T: Send + 'static,
{
match spawn_blocking(f).await {
Ok(res) => res,
Err(_) => Err(io::Error::new(
io::ErrorKind::Other,
"background task failed",
)),
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/hard_link.rs | tokio/src/fs/hard_link.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new hard link on the filesystem.
///
/// This is an async version of [`std::fs::hard_link`].
///
/// The `link` path will be a link pointing to the `original` path. Note that systems
/// often require these two paths to both be located on the same filesystem.
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `link` function on Unix
/// and the `CreateHardLink` function on Windows.
/// Note that, this [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * The `original` path is not a file or doesn't exist.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// #[tokio::main]
/// async fn main() -> std::io::Result<()> {
/// fs::hard_link("a.txt", "b.txt").await?; // Hard link a.txt to b.txt
/// Ok(())
/// }
/// ```
pub async fn hard_link(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::fs::hard_link(original, link)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/read_uring.rs | tokio/src/fs/read_uring.rs | use crate::fs::OpenOptions;
use crate::runtime::driver::op::Op;
use std::io;
use std::io::ErrorKind;
use std::os::fd::OwnedFd;
use std::path::Path;
// this algorithm is inspired from rust std lib version 1.90.0
// https://doc.rust-lang.org/1.90.0/src/std/io/mod.rs.html#409
const PROBE_SIZE: usize = 32;
const PROBE_SIZE_U32: u32 = PROBE_SIZE as u32;
// Max bytes we can read using io uring submission at a time
// SAFETY: cannot be higher than u32::MAX for safe cast
// Set to read max 64 MiB at time
const MAX_READ_SIZE: usize = 64 * 1024 * 1024;
pub(crate) async fn read_uring(path: &Path) -> io::Result<Vec<u8>> {
let file = OpenOptions::new().read(true).open(path).await?;
// TODO: use io uring in the future to obtain metadata
let size_hint: Option<usize> = file.metadata().await.map(|m| m.len() as usize).ok();
let fd: OwnedFd = file
.try_into_std()
.expect("unexpected in-flight operation detected")
.into();
let mut buf = Vec::new();
if let Some(size_hint) = size_hint {
buf.try_reserve(size_hint)?;
}
read_to_end_uring(fd, buf).await
}
async fn read_to_end_uring(mut fd: OwnedFd, mut buf: Vec<u8>) -> io::Result<Vec<u8>> {
let mut offset = 0;
let start_cap = buf.capacity();
loop {
if buf.len() == buf.capacity() && buf.capacity() == start_cap && buf.len() >= PROBE_SIZE {
// The buffer might be an exact fit. Let's read into a probe buffer
// and see if it returns `Ok(0)`. If so, we've avoided an
// unnecessary increasing of the capacity. But if not, append the
// probe buffer to the primary buffer and let its capacity grow.
let (r_fd, r_buf, is_eof) = small_probe_read(fd, buf, &mut offset).await?;
if is_eof {
return Ok(r_buf);
}
buf = r_buf;
fd = r_fd;
}
// buf is full, need more capacity
if buf.len() == buf.capacity() {
buf.try_reserve(PROBE_SIZE)?;
}
// prepare the spare capacity to be read into
let buf_len = usize::min(buf.spare_capacity_mut().len(), MAX_READ_SIZE);
// buf_len cannot be greater than u32::MAX because MAX_READ_SIZE
// is less than u32::MAX
let read_len = u32::try_from(buf_len).expect("buf_len must always fit in u32");
// read into spare capacity
let (r_fd, r_buf, is_eof) = op_read(fd, buf, &mut offset, read_len).await?;
if is_eof {
return Ok(r_buf);
}
fd = r_fd;
buf = r_buf;
}
}
async fn small_probe_read(
fd: OwnedFd,
mut buf: Vec<u8>,
offset: &mut u64,
) -> io::Result<(OwnedFd, Vec<u8>, bool)> {
let read_len = PROBE_SIZE_U32;
let mut temp_arr = [0; PROBE_SIZE];
// we don't call this function if the buffer's length < PROBE_SIZE
let back_bytes_len = buf.len() - PROBE_SIZE;
temp_arr.copy_from_slice(&buf[back_bytes_len..]);
// We're decreasing the length of the buffer and len is greater
// than PROBE_SIZE. So we can read into the discarded length
buf.truncate(back_bytes_len);
let (r_fd, mut r_buf, is_eof) = op_read(fd, buf, offset, read_len).await?;
// If `size_read` returns zero due to reasons such as the buffer's exact fit,
// then this `try_reserve` does not perform allocation.
r_buf.try_reserve(PROBE_SIZE)?;
r_buf.splice(back_bytes_len..back_bytes_len, temp_arr);
Ok((r_fd, r_buf, is_eof))
}
// Takes a length to read and returns a single read in the buffer
//
// Returns the file descriptor, buffer and EOF reached or not
async fn op_read(
mut fd: OwnedFd,
mut buf: Vec<u8>,
offset: &mut u64,
read_len: u32,
) -> io::Result<(OwnedFd, Vec<u8>, bool)> {
loop {
let (res, r_fd, r_buf) = Op::read(fd, buf, read_len, *offset).await;
match res {
Err(e) if e.kind() == ErrorKind::Interrupted => {
buf = r_buf;
fd = r_fd;
}
Err(e) => return Err(e),
Ok(size_read) => {
*offset += size_read as u64;
return Ok((r_fd, r_buf, size_read == 0));
}
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/remove_file.rs | tokio/src/fs/remove_file.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Removes a file from the filesystem.
///
/// Note that there is no guarantee that the file is immediately deleted (e.g.
/// depending on platform, other open file descriptors may prevent immediate
/// removal).
///
/// This is an async version of [`std::fs::remove_file`].
pub async fn remove_file(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::remove_file(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/read.rs | tokio/src/fs/read.rs | use crate::fs::asyncify;
use std::{io, path::Path};
/// Reads the entire contents of a file into a bytes vector.
///
/// This is an async version of [`std::fs::read`].
///
/// This is a convenience function for using [`File::open`] and [`read_to_end`]
/// with fewer imports and without an intermediate variable. It pre-allocates a
/// buffer based on the file size when available, so it is generally faster than
/// reading into a vector created with `Vec::new()`.
///
/// This operation is implemented by running the equivalent blocking operation
/// on a separate thread pool using [`spawn_blocking`].
///
/// [`File::open`]: super::File::open
/// [`read_to_end`]: crate::io::AsyncReadExt::read_to_end
/// [`spawn_blocking`]: crate::task::spawn_blocking
///
/// # Errors
///
/// This function will return an error if `path` does not already exist.
/// Other errors may also be returned according to [`OpenOptions::open`].
///
/// [`OpenOptions::open`]: super::OpenOptions::open
///
/// It will also return an error if it encounters while reading an error
/// of a kind other than [`ErrorKind::Interrupted`].
///
/// [`ErrorKind::Interrupted`]: std::io::ErrorKind::Interrupted
///
/// # io_uring support
///
/// On Linux, you can also use io_uring for executing system calls. To enable
/// io_uring, you need to specify the `--cfg tokio_unstable` flag at compile time,
/// enable the io-uring cargo feature, and set the `Builder::enable_io_uring`
/// runtime option.
///
/// Support for io_uring is currently experimental, so its behavior may change
/// or it may be removed in future versions.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
/// use std::net::SocketAddr;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error + 'static>> {
/// let contents = fs::read("address.txt").await?;
/// let foo: SocketAddr = String::from_utf8_lossy(&contents).parse()?;
/// Ok(())
/// }
/// ```
pub async fn read(path: impl AsRef<Path>) -> io::Result<Vec<u8>> {
let path = path.as_ref().to_owned();
#[cfg(all(
tokio_unstable,
feature = "io-uring",
feature = "rt",
feature = "fs",
target_os = "linux"
))]
{
use crate::fs::read_uring;
let handle = crate::runtime::Handle::current();
let driver_handle = handle.inner.driver().io();
if driver_handle.check_and_init(io_uring::opcode::Read::CODE)? {
return read_uring(&path).await;
}
}
asyncify(move || std::fs::read(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/set_permissions.rs | tokio/src/fs/set_permissions.rs | use crate::fs::asyncify;
use std::fs::Permissions;
use std::io;
use std::path::Path;
/// Changes the permissions found on a file or a directory.
///
/// This is an async version of [`std::fs::set_permissions`][std]
///
/// [std]: fn@std::fs::set_permissions
pub async fn set_permissions(path: impl AsRef<Path>, perm: Permissions) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(|| std::fs::set_permissions(path, perm)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/create_dir.rs | tokio/src/fs/create_dir.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new, empty directory at the provided path.
///
/// This is an async version of [`std::fs::create_dir`].
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `mkdir` function on Unix
/// and the `CreateDirectory` function on Windows.
/// Note that, this [may change in the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// **NOTE**: If a parent of the given path doesn't exist, this function will
/// return an error. To create a directory and all its missing parents at the
/// same time, use the [`create_dir_all`] function.
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * User lacks permissions to create directory at `path`.
/// * A parent of the given path doesn't exist. (To create a directory and all
/// its missing parents at the same time, use the [`create_dir_all`]
/// function.)
/// * `path` already exists.
///
/// [`create_dir_all`]: super::create_dir_all()
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
/// use std::io;
///
/// #[tokio::main]
/// async fn main() -> io::Result<()> {
/// fs::create_dir("/some/dir").await?;
/// Ok(())
/// }
/// ```
pub async fn create_dir(path: impl AsRef<Path>) -> io::Result<()> {
let path = path.as_ref().to_owned();
asyncify(move || std::fs::create_dir(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/read_dir.rs | tokio/src/fs/read_dir.rs | use crate::fs::asyncify;
use std::collections::VecDeque;
use std::ffi::OsString;
use std::fs::{FileType, Metadata};
use std::future::Future;
use std::io;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::Arc;
use std::task::{ready, Context, Poll};
#[cfg(test)]
use super::mocks::spawn_blocking;
#[cfg(test)]
use super::mocks::JoinHandle;
#[cfg(not(test))]
use crate::blocking::spawn_blocking;
#[cfg(not(test))]
use crate::blocking::JoinHandle;
const CHUNK_SIZE: usize = 32;
/// Returns a stream over the entries within a directory.
///
/// This is an async version of [`std::fs::read_dir`].
///
/// This operation is implemented by running the equivalent blocking
/// operation on a separate thread pool using [`spawn_blocking`].
///
/// [`spawn_blocking`]: crate::task::spawn_blocking
pub async fn read_dir(path: impl AsRef<Path>) -> io::Result<ReadDir> {
let path = path.as_ref().to_owned();
asyncify(|| -> io::Result<ReadDir> {
let mut std = std::fs::read_dir(path)?;
let mut buf = VecDeque::with_capacity(CHUNK_SIZE);
let remain = ReadDir::next_chunk(&mut buf, &mut std);
Ok(ReadDir(State::Idle(Some((buf, std, remain)))))
})
.await
}
/// Reads the entries in a directory.
///
/// This struct is returned from the [`read_dir`] function of this module and
/// will yield instances of [`DirEntry`]. Through a [`DirEntry`] information
/// like the entry's path and possibly other metadata can be learned.
///
/// A `ReadDir` can be turned into a `Stream` with [`ReadDirStream`].
///
/// [`ReadDirStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.ReadDirStream.html
///
/// # Errors
///
/// This stream will return an [`Err`] if there's some sort of intermittent
/// IO error during iteration.
///
/// [`read_dir`]: read_dir
/// [`DirEntry`]: DirEntry
/// [`Err`]: std::result::Result::Err
#[derive(Debug)]
#[must_use = "streams do nothing unless polled"]
pub struct ReadDir(State);
#[derive(Debug)]
enum State {
Idle(Option<(VecDeque<io::Result<DirEntry>>, std::fs::ReadDir, bool)>),
Pending(JoinHandle<(VecDeque<io::Result<DirEntry>>, std::fs::ReadDir, bool)>),
}
impl ReadDir {
/// Returns the next entry in the directory stream.
///
/// # Cancel safety
///
/// This method is cancellation safe.
pub async fn next_entry(&mut self) -> io::Result<Option<DirEntry>> {
use std::future::poll_fn;
poll_fn(|cx| self.poll_next_entry(cx)).await
}
/// Polls for the next directory entry in the stream.
///
/// This method returns:
///
/// * `Poll::Pending` if the next directory entry is not yet available.
/// * `Poll::Ready(Ok(Some(entry)))` if the next directory entry is available.
/// * `Poll::Ready(Ok(None))` if there are no more directory entries in this
/// stream.
/// * `Poll::Ready(Err(err))` if an IO error occurred while reading the next
/// directory entry.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when the next directory entry
/// becomes available on the underlying IO resource.
///
/// Note that on multiple calls to `poll_next_entry`, only the `Waker` from
/// the `Context` passed to the most recent call is scheduled to receive a
/// wakeup.
pub fn poll_next_entry(&mut self, cx: &mut Context<'_>) -> Poll<io::Result<Option<DirEntry>>> {
loop {
match self.0 {
State::Idle(ref mut data) => {
let (buf, _, ref remain) = data.as_mut().unwrap();
if let Some(ent) = buf.pop_front() {
return Poll::Ready(ent.map(Some));
} else if !remain {
return Poll::Ready(Ok(None));
}
let (mut buf, mut std, _) = data.take().unwrap();
self.0 = State::Pending(spawn_blocking(move || {
let remain = ReadDir::next_chunk(&mut buf, &mut std);
(buf, std, remain)
}));
}
State::Pending(ref mut rx) => {
self.0 = State::Idle(Some(ready!(Pin::new(rx).poll(cx))?));
}
}
}
}
fn next_chunk(buf: &mut VecDeque<io::Result<DirEntry>>, std: &mut std::fs::ReadDir) -> bool {
for _ in 0..CHUNK_SIZE {
let ret = match std.next() {
Some(ret) => ret,
None => return false,
};
let success = ret.is_ok();
buf.push_back(ret.map(|std| DirEntry {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
file_type: std.file_type().ok(),
std: Arc::new(std),
}));
if !success {
break;
}
}
true
}
}
feature! {
#![unix]
use std::os::unix::fs::DirEntryExt;
impl DirEntry {
/// Returns the underlying `d_ino` field in the contained `dirent`
/// structure.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # #[tokio::main]
/// # async fn main() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
/// while let Some(entry) = entries.next_entry().await? {
/// // Here, `entry` is a `DirEntry`.
/// println!("{:?}: {}", entry.file_name(), entry.ino());
/// }
/// # Ok(())
/// # }
/// ```
pub fn ino(&self) -> u64 {
self.as_inner().ino()
}
}
}
/// Entries returned by the [`ReadDir`] stream.
///
/// [`ReadDir`]: struct@ReadDir
///
/// This is a specialized version of [`std::fs::DirEntry`] for usage from the
/// Tokio runtime.
///
/// An instance of `DirEntry` represents an entry inside of a directory on the
/// filesystem. Each entry can be inspected via methods to learn about the full
/// path or possibly other metadata through per-platform extension traits.
#[derive(Debug)]
pub struct DirEntry {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
file_type: Option<FileType>,
std: Arc<std::fs::DirEntry>,
}
impl DirEntry {
/// Returns the full path to the file that this entry represents.
///
/// The full path is created by joining the original path to `read_dir`
/// with the filename of this entry.
///
/// # Examples
///
/// ```no_run
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// println!("{:?}", entry.path());
/// }
/// # Ok(())
/// # }
/// ```
///
/// This prints output like:
///
/// ```text
/// "./whatever.txt"
/// "./foo.html"
/// "./hello_world.rs"
/// ```
///
/// The exact text, of course, depends on what files you have in `.`.
pub fn path(&self) -> PathBuf {
self.std.path()
}
/// Returns the bare file name of this directory entry without any other
/// leading path component.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// println!("{:?}", entry.file_name());
/// }
/// # Ok(())
/// # }
/// ```
pub fn file_name(&self) -> OsString {
self.std.file_name()
}
/// Returns the metadata for the file that this entry points at.
///
/// This function will not traverse symlinks if this entry points at a
/// symlink.
///
/// # Platform-specific behavior
///
/// On Windows this function is cheap to call (no extra system calls
/// needed), but on Unix platforms this function is the equivalent of
/// calling `symlink_metadata` on the path.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// if let Ok(metadata) = entry.metadata().await {
/// // Now let's show our entry's permissions!
/// println!("{:?}: {:?}", entry.path(), metadata.permissions());
/// } else {
/// println!("Couldn't get file type for {:?}", entry.path());
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn metadata(&self) -> io::Result<Metadata> {
let std = self.std.clone();
asyncify(move || std.metadata()).await
}
/// Returns the file type for the file that this entry points at.
///
/// This function will not traverse symlinks if this entry points at a
/// symlink.
///
/// # Platform-specific behavior
///
/// On Windows and most Unix platforms this function is free (no extra
/// system calls needed), but some Unix platforms may require the equivalent
/// call to `symlink_metadata` to learn about the target file type.
///
/// # Examples
///
/// ```
/// use tokio::fs;
///
/// # async fn dox() -> std::io::Result<()> {
/// let mut entries = fs::read_dir(".").await?;
///
/// while let Some(entry) = entries.next_entry().await? {
/// if let Ok(file_type) = entry.file_type().await {
/// // Now let's show our entry's file type!
/// println!("{:?}: {:?}", entry.path(), file_type);
/// } else {
/// println!("Couldn't get file type for {:?}", entry.path());
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn file_type(&self) -> io::Result<FileType> {
#[cfg(not(any(
target_os = "solaris",
target_os = "illumos",
target_os = "haiku",
target_os = "vxworks",
target_os = "aix",
target_os = "nto",
target_os = "vita",
)))]
if let Some(file_type) = self.file_type {
return Ok(file_type);
}
let std = self.std.clone();
asyncify(move || std.file_type()).await
}
/// Returns a reference to the underlying `std::fs::DirEntry`.
#[cfg(unix)]
pub(super) fn as_inner(&self) -> &std::fs::DirEntry {
&self.std
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/mocks.rs | tokio/src/fs/mocks.rs | //! Mock version of std::fs::File;
use mockall::mock;
use crate::sync::oneshot;
#[cfg(all(test, unix))]
use std::os::fd::{AsRawFd, FromRawFd, OwnedFd};
use std::{
cell::RefCell,
collections::VecDeque,
fs::{Metadata, Permissions},
future::Future,
io::{self, Read, Seek, SeekFrom, Write},
path::PathBuf,
pin::Pin,
task::{Context, Poll},
};
mock! {
#[derive(Debug)]
pub File {
pub fn create(pb: PathBuf) -> io::Result<Self>;
// These inner_ methods exist because std::fs::File has two
// implementations for each of these methods: one on "&mut self" and
// one on "&&self". Defining both of those in terms of an inner_ method
// allows us to specify the expectation the same way, regardless of
// which method is used.
pub fn inner_flush(&self) -> io::Result<()>;
pub fn inner_read(&self, dst: &mut [u8]) -> io::Result<usize>;
pub fn inner_seek(&self, pos: SeekFrom) -> io::Result<u64>;
pub fn inner_write(&self, src: &[u8]) -> io::Result<usize>;
pub fn metadata(&self) -> io::Result<Metadata>;
pub fn open(pb: PathBuf) -> io::Result<Self>;
pub fn set_len(&self, size: u64) -> io::Result<()>;
pub fn set_permissions(&self, _perm: Permissions) -> io::Result<()>;
pub fn set_max_buf_size(&self, max_buf_size: usize);
pub fn sync_all(&self) -> io::Result<()>;
pub fn sync_data(&self) -> io::Result<()>;
pub fn try_clone(&self) -> io::Result<Self>;
}
#[cfg(windows)]
impl std::os::windows::io::AsRawHandle for File {
fn as_raw_handle(&self) -> std::os::windows::io::RawHandle;
}
#[cfg(windows)]
impl std::os::windows::io::FromRawHandle for File {
unsafe fn from_raw_handle(h: std::os::windows::io::RawHandle) -> Self;
}
#[cfg(unix)]
impl std::os::unix::io::AsRawFd for File {
fn as_raw_fd(&self) -> std::os::unix::io::RawFd;
}
#[cfg(unix)]
impl std::os::unix::io::FromRawFd for File {
unsafe fn from_raw_fd(h: std::os::unix::io::RawFd) -> Self;
}
}
impl Read for MockFile {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
// Placate Miri. Tokio will call this method with an uninitialized
// buffer, which is ok because std::io::Read::read implementations don't usually read
// from their input buffers. But Mockall 0.12-0.13 will try to Debug::fmt the
// buffer, even if there is no failure, triggering an uninitialized data access alert from
// Miri. Initialize the data here just to prevent those Miri alerts.
// This can be removed after upgrading to Mockall 0.14.
dst.fill(0);
self.inner_read(dst)
}
}
impl Read for &'_ MockFile {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
// Placate Miri. Tokio will call this method with an uninitialized
// buffer, which is ok because std::io::Read::read implementations don't usually read
// from their input buffers. But Mockall 0.12-0.13 will try to Debug::fmt the
// buffer, even if there is no failure, triggering an uninitialized data access alert from
// Miri. Initialize the data here just to prevent those Miri alerts.
// This can be removed after upgrading to Mockall 0.14.
dst.fill(0);
self.inner_read(dst)
}
}
impl Seek for &'_ MockFile {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
self.inner_seek(pos)
}
}
impl Write for &'_ MockFile {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.inner_write(src)
}
fn flush(&mut self) -> io::Result<()> {
self.inner_flush()
}
}
#[cfg(all(test, unix))]
impl From<MockFile> for OwnedFd {
#[inline]
fn from(file: MockFile) -> OwnedFd {
unsafe { OwnedFd::from_raw_fd(file.as_raw_fd()) }
}
}
tokio_thread_local! {
static QUEUE: RefCell<VecDeque<Box<dyn FnOnce() + Send>>> = RefCell::new(VecDeque::new())
}
#[derive(Debug)]
pub(super) struct JoinHandle<T> {
rx: oneshot::Receiver<T>,
}
pub(super) fn spawn_blocking<F, R>(f: F) -> JoinHandle<R>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let task = Box::new(move || {
let _ = tx.send(f());
});
QUEUE.with(|cell| cell.borrow_mut().push_back(task));
JoinHandle { rx }
}
pub(super) fn spawn_mandatory_blocking<F, R>(f: F) -> Option<JoinHandle<R>>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = oneshot::channel();
let task = Box::new(move || {
let _ = tx.send(f());
});
QUEUE.with(|cell| cell.borrow_mut().push_back(task));
Some(JoinHandle { rx })
}
impl<T> Future for JoinHandle<T> {
type Output = Result<T, io::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
use std::task::Poll;
match Pin::new(&mut self.rx).poll(cx) {
Poll::Ready(Ok(v)) => Poll::Ready(Ok(v)),
Poll::Ready(Err(e)) => panic!("error = {e:?}"),
Poll::Pending => Poll::Pending,
}
}
}
pub(super) mod pool {
use super::*;
pub(in super::super) fn len() -> usize {
QUEUE.with(|cell| cell.borrow().len())
}
pub(in super::super) fn run_one() {
let task = QUEUE
.with(|cell| cell.borrow_mut().pop_front())
.expect("expected task to run, but none ready");
task();
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/metadata.rs | tokio/src/fs/metadata.rs | use crate::fs::asyncify;
use std::fs::Metadata;
use std::io;
use std::path::Path;
/// Given a path, queries the file system to get information about a file,
/// directory, etc.
///
/// This is an async version of [`std::fs::metadata`].
///
/// This function will traverse symbolic links to query information about the
/// destination file.
///
/// # Platform-specific behavior
///
/// This function currently corresponds to the `stat` function on Unix and the
/// `GetFileAttributesEx` function on Windows. Note that, this [may change in
/// the future][changes].
///
/// [changes]: https://doc.rust-lang.org/std/io/index.html#platform-specific-behavior
///
/// # Errors
///
/// This function will return an error in the following situations, but is not
/// limited to just these cases:
///
/// * The user lacks permissions to perform `metadata` call on `path`.
/// * `path` does not exist.
///
/// # Examples
///
/// ```rust,no_run
/// use tokio::fs;
///
/// #[tokio::main]
/// async fn main() -> std::io::Result<()> {
/// let attr = fs::metadata("/some/file/path.txt").await?;
/// // inspect attr ...
/// Ok(())
/// }
/// ```
pub async fn metadata(path: impl AsRef<Path>) -> io::Result<Metadata> {
let path = path.as_ref().to_owned();
asyncify(|| std::fs::metadata(path)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/symlink_dir.rs | tokio/src/fs/symlink_dir.rs | use crate::fs::asyncify;
use std::io;
use std::path::Path;
/// Creates a new directory symlink on the filesystem.
///
/// The `link` path will be a directory symbolic link pointing to the `original`
/// path.
///
/// This is an async version of [`std::os::windows::fs::symlink_dir`][std]
///
/// [std]: https://doc.rust-lang.org/std/os/windows/fs/fn.symlink_dir.html
pub async fn symlink_dir(original: impl AsRef<Path>, link: impl AsRef<Path>) -> io::Result<()> {
let original = original.as_ref().to_owned();
let link = link.as_ref().to_owned();
asyncify(move || std::os::windows::fs::symlink_dir(original, link)).await
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/file/tests.rs | tokio/src/fs/file/tests.rs | use super::*;
use crate::{
fs::mocks::*,
io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt},
};
use mockall::{predicate::eq, Sequence};
use tokio_test::{assert_pending, assert_ready_err, assert_ready_ok, task};
const HELLO: &[u8] = b"hello world...";
const FOO: &[u8] = b"foo bar baz...";
#[test]
fn open_read() {
let mut file = MockFile::default();
file.expect_inner_read().once().returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_eq!(0, pool::len());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert_eq!(n, HELLO.len());
assert_eq!(&buf[..n], HELLO);
}
#[test]
fn read_twice_before_dispatch() {
let mut file = MockFile::default();
file.expect_inner_read().once().returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
assert_pending!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert_eq!(&buf[..n], HELLO);
}
#[test]
fn read_with_smaller_buf() {
let mut file = MockFile::default();
file.expect_inner_read().once().returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
}
pool::run_one();
{
let mut buf = [0; 4];
let mut t = task::spawn(file.read(&mut buf));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 4);
assert_eq!(&buf[..], &HELLO[..n]);
}
// Calling again immediately succeeds with the rest of the buffer
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 10);
assert_eq!(&buf[..n], &HELLO[4..]);
assert_eq!(0, pool::len());
}
#[test]
fn read_with_bigger_buf() {
let mut seq = Sequence::new();
let mut file = MockFile::default();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..4].copy_from_slice(&HELLO[..4]);
Ok(4)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len() - 4].copy_from_slice(&HELLO[4..]);
Ok(HELLO.len() - 4)
});
let mut file = File::from_std(file);
{
let mut buf = [0; 4];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
}
pool::run_one();
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 4);
assert_eq!(&buf[..n], &HELLO[..n]);
}
// Calling again immediately succeeds with the rest of the buffer
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert_eq!(n, 10);
assert_eq!(&buf[..n], &HELLO[4..]);
assert_eq!(0, pool::len());
}
#[test]
fn read_err_then_read_success() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
assert_ready_err!(t.poll());
}
{
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let n = assert_ready_ok!(t.poll());
assert_eq!(n, HELLO.len());
assert_eq!(&buf[..n], HELLO);
}
}
#[test]
fn open_write() {
let mut file = MockFile::default();
file.expect_inner_write()
.once()
.with(eq(HELLO))
.returning(|buf| Ok(buf.len()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_eq!(0, pool::len());
assert_ready_ok!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(!t.is_woken());
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn flush_while_idle() {
let file = MockFile::default();
let mut file = File::from_std(file);
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn read_with_buffer_larger_than_max() {
// Chunks
let chunk_a = crate::io::blocking::DEFAULT_MAX_BUF_SIZE;
let chunk_b = chunk_a * 2;
let chunk_c = chunk_a * 3;
let chunk_d = chunk_a * 4;
assert_eq!(chunk_d / 1024 / 1024, 8);
let mut data = vec![];
for i in 0..(chunk_d - 1) {
data.push((i % 151) as u8);
}
let data = Arc::new(data);
let d0 = data.clone();
let d1 = data.clone();
let d2 = data.clone();
let d3 = data.clone();
let mut seq = Sequence::new();
let mut file = MockFile::default();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[0..chunk_a].copy_from_slice(&d0[0..chunk_a]);
Ok(chunk_a)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[..chunk_a].copy_from_slice(&d1[chunk_a..chunk_b]);
Ok(chunk_b - chunk_a)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[..chunk_a].copy_from_slice(&d2[chunk_b..chunk_c]);
Ok(chunk_c - chunk_b)
});
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(move |buf| {
buf[..chunk_a - 1].copy_from_slice(&d3[chunk_c..]);
Ok(chunk_a - 1)
});
let mut file = File::from_std(file);
let mut actual = vec![0; chunk_d];
let mut pos = 0;
while pos < data.len() {
let mut t = task::spawn(file.read(&mut actual[pos..]));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
let n = assert_ready_ok!(t.poll());
assert!(n <= chunk_a);
pos += n;
}
assert_eq!(&data[..], &actual[..data.len()]);
}
#[test]
#[cfg_attr(miri, ignore)] // takes a really long time with miri
fn write_with_buffer_larger_than_max() {
// Chunks
let chunk_a = crate::io::blocking::DEFAULT_MAX_BUF_SIZE;
let chunk_b = chunk_a * 2;
let chunk_c = chunk_a * 3;
let chunk_d = chunk_a * 4;
assert_eq!(chunk_d / 1024 / 1024, 8);
let mut data = vec![];
for i in 0..(chunk_d - 1) {
data.push((i % 151) as u8);
}
let data = Arc::new(data);
let d0 = data.clone();
let d1 = data.clone();
let d2 = data.clone();
let d3 = data.clone();
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d0[0..chunk_a])
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d1[chunk_a..chunk_b])
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d2[chunk_b..chunk_c])
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.withf(move |buf| buf == &d3[chunk_c..chunk_d - 1])
.returning(|buf| Ok(buf.len()));
let mut file = File::from_std(file);
let mut rem = &data[..];
let mut first = true;
while !rem.is_empty() {
let mut task = task::spawn(file.write(rem));
if !first {
assert_pending!(task.poll());
pool::run_one();
assert!(task.is_woken());
}
first = false;
let n = assert_ready_ok!(task.poll());
rem = &rem[n..];
}
pool::run_one();
}
#[test]
fn write_twice_before_dispatch() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|buf| Ok(buf.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|buf| Ok(buf.len()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.write(FOO));
assert_pending!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.flush());
assert_pending!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn incomplete_read_followed_by_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Current(-(HELLO.len() as i64))))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
file.expect_inner_write()
.once()
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn incomplete_partial_read_followed_by_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.in_sequence(&mut seq)
.with(eq(SeekFrom::Current(-10)))
.returning(|_| Ok(0));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut buf = [0; 4];
let mut t = task::spawn(file.read(&mut buf));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
assert_eq!(pool::len(), 1);
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn incomplete_read_followed_by_flush() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.in_sequence(&mut seq)
.with(eq(SeekFrom::Current(-(HELLO.len() as i64))))
.returning(|_| Ok(0));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut buf = [0; 32];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
pool::run_one();
}
#[test]
fn incomplete_flush_followed_by_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(FOO))
.returning(|_| Ok(FOO.len()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
let n = assert_ready_ok!(t.poll());
assert_eq!(n, HELLO.len());
let mut t = task::spawn(file.flush());
assert_pending!(t.poll());
// TODO: Move under write
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_ok!(t.poll());
}
#[test]
fn read_err() {
let mut file = MockFile::default();
file.expect_inner_read()
.once()
.returning(|_| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn write_write_err() {
let mut file = MockFile::default();
file.expect_inner_write()
.once()
.returning(|_| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_err!(t.poll());
}
#[test]
fn write_read_write_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_err!(t.poll());
}
#[test]
fn write_read_flush_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
let mut buf = [0; 1024];
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_err!(t.poll());
}
#[test]
fn write_seek_write_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Start(0)))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
{
let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
assert_pending!(t.poll());
}
pool::run_one();
let mut t = task::spawn(file.write(FOO));
assert_ready_err!(t.poll());
}
#[test]
fn write_seek_flush_err() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Start(0)))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
pool::run_one();
{
let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
assert_pending!(t.poll());
}
pool::run_one();
let mut t = task::spawn(file.flush());
assert_ready_err!(t.poll());
}
#[test]
fn sync_all_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_all().once().returning(|| Ok(()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_all());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn sync_all_err_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_all()
.once()
.returning(|| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_all());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn sync_data_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_data().once().returning(|| Ok(()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_data());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn sync_data_err_ordered_after_write() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.with(eq(HELLO))
.returning(|_| Ok(HELLO.len()));
file.expect_sync_data()
.once()
.returning(|| Err(io::ErrorKind::Other.into()));
let mut file = File::from_std(file);
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
let mut t = task::spawn(file.sync_data());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_pending!(t.poll());
assert_eq!(1, pool::len());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn open_set_len_ok() {
let mut file = MockFile::default();
file.expect_set_len().with(eq(123)).returning(|_| Ok(()));
let file = File::from_std(file);
let mut t = task::spawn(file.set_len(123));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
#[test]
fn open_set_len_err() {
let mut file = MockFile::default();
file.expect_set_len()
.with(eq(123))
.returning(|_| Err(io::ErrorKind::Other.into()));
let file = File::from_std(file);
let mut t = task::spawn(file.set_len(123));
assert_pending!(t.poll());
pool::run_one();
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
#[test]
fn partial_read_set_len_ok() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..HELLO.len()].copy_from_slice(HELLO);
Ok(HELLO.len())
});
file.expect_inner_seek()
.once()
.with(eq(SeekFrom::Current(-(HELLO.len() as i64))))
.in_sequence(&mut seq)
.returning(|_| Ok(0));
file.expect_set_len()
.once()
.in_sequence(&mut seq)
.with(eq(123))
.returning(|_| Ok(()));
file.expect_inner_read()
.once()
.in_sequence(&mut seq)
.returning(|buf| {
buf[0..FOO.len()].copy_from_slice(FOO);
Ok(FOO.len())
});
let mut buf = [0; 32];
let mut file = File::from_std(file);
{
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
}
pool::run_one();
{
let mut t = task::spawn(file.set_len(123));
assert_pending!(t.poll());
pool::run_one();
assert_ready_ok!(t.poll());
}
let mut t = task::spawn(file.read(&mut buf));
assert_pending!(t.poll());
pool::run_one();
let n = assert_ready_ok!(t.poll());
assert_eq!(n, FOO.len());
assert_eq!(&buf[..n], FOO);
}
#[test]
fn busy_file_seek_error() {
let mut file = MockFile::default();
let mut seq = Sequence::new();
file.expect_inner_write()
.once()
.in_sequence(&mut seq)
.returning(|_| Err(io::ErrorKind::Other.into()));
let mut file = crate::io::BufReader::new(File::from_std(file));
{
let mut t = task::spawn(file.write(HELLO));
assert_ready_ok!(t.poll());
}
pool::run_one();
let mut t = task::spawn(file.seek(SeekFrom::Start(0)));
assert_ready_err!(t.poll());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/open_options/uring_open_options.rs | tokio/src/fs/open_options/uring_open_options.rs | use std::{io, os::unix::fs::OpenOptionsExt};
#[cfg(test)]
use super::mock_open_options::MockOpenOptions as StdOpenOptions;
#[cfg(not(test))]
use std::fs::OpenOptions as StdOpenOptions;
#[derive(Debug, Clone)]
pub(crate) struct UringOpenOptions {
pub(crate) read: bool,
pub(crate) write: bool,
pub(crate) append: bool,
pub(crate) truncate: bool,
pub(crate) create: bool,
pub(crate) create_new: bool,
pub(crate) mode: libc::mode_t,
pub(crate) custom_flags: libc::c_int,
}
impl UringOpenOptions {
pub(crate) fn new() -> Self {
Self {
read: false,
write: false,
append: false,
truncate: false,
create: false,
create_new: false,
mode: 0o666,
custom_flags: 0,
}
}
pub(crate) fn append(&mut self, append: bool) -> &mut Self {
self.append = append;
self
}
pub(crate) fn create(&mut self, create: bool) -> &mut Self {
self.create = create;
self
}
pub(crate) fn create_new(&mut self, create_new: bool) -> &mut Self {
self.create_new = create_new;
self
}
pub(crate) fn read(&mut self, read: bool) -> &mut Self {
self.read = read;
self
}
pub(crate) fn write(&mut self, write: bool) -> &mut Self {
self.write = write;
self
}
pub(crate) fn truncate(&mut self, truncate: bool) -> &mut Self {
self.truncate = truncate;
self
}
pub(crate) fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as libc::mode_t;
self
}
pub(crate) fn custom_flags(&mut self, flags: i32) -> &mut Self {
self.custom_flags = flags;
self
}
// Equivalent to https://github.com/rust-lang/rust/blob/64c81fd10509924ca4da5d93d6052a65b75418a5/library/std/src/sys/fs/unix.rs#L1118-L1127
pub(crate) fn access_mode(&self) -> io::Result<libc::c_int> {
match (self.read, self.write, self.append) {
(true, false, false) => Ok(libc::O_RDONLY),
(false, true, false) => Ok(libc::O_WRONLY),
(true, true, false) => Ok(libc::O_RDWR),
(false, _, true) => Ok(libc::O_WRONLY | libc::O_APPEND),
(true, _, true) => Ok(libc::O_RDWR | libc::O_APPEND),
(false, false, false) => Err(io::Error::from_raw_os_error(libc::EINVAL)),
}
}
// Equivalent to https://github.com/rust-lang/rust/blob/64c81fd10509924ca4da5d93d6052a65b75418a5/library/std/src/sys/fs/unix.rs#L1129-L1151
pub(crate) fn creation_mode(&self) -> io::Result<libc::c_int> {
match (self.write, self.append) {
(true, false) => {}
(false, false) => {
if self.truncate || self.create || self.create_new {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
}
(_, true) => {
if self.truncate && !self.create_new {
return Err(io::Error::from_raw_os_error(libc::EINVAL));
}
}
}
Ok(match (self.create, self.truncate, self.create_new) {
(false, false, false) => 0,
(true, false, false) => libc::O_CREAT,
(false, true, false) => libc::O_TRUNC,
(true, true, false) => libc::O_CREAT | libc::O_TRUNC,
(_, _, true) => libc::O_CREAT | libc::O_EXCL,
})
}
}
impl From<UringOpenOptions> for StdOpenOptions {
fn from(value: UringOpenOptions) -> Self {
let mut std = StdOpenOptions::new();
std.append(value.append);
std.create(value.create);
std.create_new(value.create_new);
std.read(value.read);
std.truncate(value.truncate);
std.write(value.write);
std.mode(value.mode);
std.custom_flags(value.custom_flags);
std
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/fs/open_options/mock_open_options.rs | tokio/src/fs/open_options/mock_open_options.rs | #![allow(unreachable_pub)]
//! Mock version of `std::fs::OpenOptions`;
use mockall::mock;
use crate::fs::mocks::MockFile;
#[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt;
#[cfg(windows)]
use std::os::windows::fs::OpenOptionsExt;
use std::{io, path::Path};
mock! {
#[derive(Debug)]
pub OpenOptions {
pub fn append(&mut self, append: bool) -> &mut Self;
pub fn create(&mut self, create: bool) -> &mut Self;
pub fn create_new(&mut self, create_new: bool) -> &mut Self;
pub fn open<P: AsRef<Path> + 'static>(&self, path: P) -> io::Result<MockFile>;
pub fn read(&mut self, read: bool) -> &mut Self;
pub fn truncate(&mut self, truncate: bool) -> &mut Self;
pub fn write(&mut self, write: bool) -> &mut Self;
}
impl Clone for OpenOptions {
fn clone(&self) -> Self;
}
#[cfg(unix)]
impl OpenOptionsExt for OpenOptions {
fn custom_flags(&mut self, flags: i32) -> &mut Self;
fn mode(&mut self, mode: u32) -> &mut Self;
}
#[cfg(windows)]
impl OpenOptionsExt for OpenOptions {
fn access_mode(&mut self, access: u32) -> &mut Self;
fn share_mode(&mut self, val: u32) -> &mut Self;
fn custom_flags(&mut self, flags: u32) -> &mut Self;
fn attributes(&mut self, val: u32) -> &mut Self;
fn security_qos_flags(&mut self, flags: u32) -> &mut Self;
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/notify.rs | tokio/src/sync/notify.rs | // Allow `unreachable_pub` warnings when sync is not enabled
// due to the usage of `Notify` within the `rt` feature set.
// When this module is compiled with `sync` enabled we will warn on
// this lint. When `rt` is enabled we use `pub(crate)` which
// triggers this warning but it is safe to ignore in this case.
#![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))]
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::Mutex;
use crate::util::linked_list::{self, GuardedLinkedList, LinkedList};
use crate::util::WakeList;
use std::future::Future;
use std::marker::PhantomPinned;
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::pin::Pin;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::{self, Acquire, Relaxed, Release, SeqCst};
use std::sync::Arc;
use std::task::{Context, Poll, Waker};
type WaitList = LinkedList<Waiter, <Waiter as linked_list::Link>::Target>;
type GuardedWaitList = GuardedLinkedList<Waiter, <Waiter as linked_list::Link>::Target>;
/// Notifies a single task to wake up.
///
/// `Notify` provides a basic mechanism to notify a single task of an event.
/// `Notify` itself does not carry any data. Instead, it is to be used to signal
/// another task to perform an operation.
///
/// A `Notify` can be thought of as a [`Semaphore`] starting with 0 permits. The
/// [`notified().await`] method waits for a permit to become available, and
/// [`notify_one()`] sets a permit **if there currently are no available
/// permits**.
///
/// The synchronization details of `Notify` are similar to
/// [`thread::park`][park] and [`Thread::unpark`][unpark] from std. A [`Notify`]
/// value contains a single permit. [`notified().await`] waits for the permit to
/// be made available, consumes the permit, and resumes. [`notify_one()`] sets
/// the permit, waking a pending task if there is one.
///
/// If `notify_one()` is called **before** `notified().await`, then the next
/// call to `notified().await` will complete immediately, consuming the permit.
/// Any subsequent calls to `notified().await` will wait for a new permit.
///
/// If `notify_one()` is called **multiple** times before `notified().await`,
/// only a **single** permit is stored. The next call to `notified().await` will
/// complete immediately, but the one after will wait for a new permit.
///
/// # Examples
///
/// Basic usage.
///
/// ```
/// use tokio::sync::Notify;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let notify = Arc::new(Notify::new());
/// let notify2 = notify.clone();
///
/// let handle = tokio::spawn(async move {
/// notify2.notified().await;
/// println!("received notification");
/// });
///
/// println!("sending notification");
/// notify.notify_one();
///
/// // Wait for task to receive notification.
/// handle.await.unwrap();
/// # }
/// ```
///
/// Unbound multi-producer single-consumer (mpsc) channel.
///
/// No wakeups can be lost when using this channel because the call to
/// `notify_one()` will store a permit in the `Notify`, which the following call
/// to `notified()` will consume.
///
/// ```
/// use tokio::sync::Notify;
///
/// use std::collections::VecDeque;
/// use std::sync::Mutex;
///
/// struct Channel<T> {
/// values: Mutex<VecDeque<T>>,
/// notify: Notify,
/// }
///
/// impl<T> Channel<T> {
/// pub fn send(&self, value: T) {
/// self.values.lock().unwrap()
/// .push_back(value);
///
/// // Notify the consumer a value is available
/// self.notify.notify_one();
/// }
///
/// // This is a single-consumer channel, so several concurrent calls to
/// // `recv` are not allowed.
/// pub async fn recv(&self) -> T {
/// loop {
/// // Drain values
/// if let Some(value) = self.values.lock().unwrap().pop_front() {
/// return value;
/// }
///
/// // Wait for values to be available
/// self.notify.notified().await;
/// }
/// }
/// }
/// ```
///
/// Unbound multi-producer multi-consumer (mpmc) channel.
///
/// The call to [`enable`] is important because otherwise if you have two
/// calls to `recv` and two calls to `send` in parallel, the following could
/// happen:
///
/// 1. Both calls to `try_recv` return `None`.
/// 2. Both new elements are added to the vector.
/// 3. The `notify_one` method is called twice, adding only a single
/// permit to the `Notify`.
/// 4. Both calls to `recv` reach the `Notified` future. One of them
/// consumes the permit, and the other sleeps forever.
///
/// By adding the `Notified` futures to the list by calling `enable` before
/// `try_recv`, the `notify_one` calls in step three would remove the
/// futures from the list and mark them notified instead of adding a permit
/// to the `Notify`. This ensures that both futures are woken.
///
/// Notice that this failure can only happen if there are two concurrent calls
/// to `recv`. This is why the mpsc example above does not require a call to
/// `enable`.
///
/// ```
/// use tokio::sync::Notify;
///
/// use std::collections::VecDeque;
/// use std::sync::Mutex;
///
/// struct Channel<T> {
/// messages: Mutex<VecDeque<T>>,
/// notify_on_sent: Notify,
/// }
///
/// impl<T> Channel<T> {
/// pub fn send(&self, msg: T) {
/// let mut locked_queue = self.messages.lock().unwrap();
/// locked_queue.push_back(msg);
/// drop(locked_queue);
///
/// // Send a notification to one of the calls currently
/// // waiting in a call to `recv`.
/// self.notify_on_sent.notify_one();
/// }
///
/// pub fn try_recv(&self) -> Option<T> {
/// let mut locked_queue = self.messages.lock().unwrap();
/// locked_queue.pop_front()
/// }
///
/// pub async fn recv(&self) -> T {
/// let future = self.notify_on_sent.notified();
/// tokio::pin!(future);
///
/// loop {
/// // Make sure that no wakeup is lost if we get
/// // `None` from `try_recv`.
/// future.as_mut().enable();
///
/// if let Some(msg) = self.try_recv() {
/// return msg;
/// }
///
/// // Wait for a call to `notify_one`.
/// //
/// // This uses `.as_mut()` to avoid consuming the future,
/// // which lets us call `Pin::set` below.
/// future.as_mut().await;
///
/// // Reset the future in case another call to
/// // `try_recv` got the message before us.
/// future.set(self.notify_on_sent.notified());
/// }
/// }
/// }
/// ```
///
/// [park]: std::thread::park
/// [unpark]: std::thread::Thread::unpark
/// [`notified().await`]: Notify::notified()
/// [`notify_one()`]: Notify::notify_one()
/// [`enable`]: Notified::enable()
/// [`Semaphore`]: crate::sync::Semaphore
#[derive(Debug)]
pub struct Notify {
// `state` uses 2 bits to store one of `EMPTY`,
// `WAITING` or `NOTIFIED`. The rest of the bits
// are used to store the number of times `notify_waiters`
// was called.
//
// Throughout the code there are two assumptions:
// - state can be transitioned *from* `WAITING` only if
// `waiters` lock is held
// - number of times `notify_waiters` was called can
// be modified only if `waiters` lock is held
state: AtomicUsize,
waiters: Mutex<WaitList>,
}
#[derive(Debug)]
struct Waiter {
/// Intrusive linked-list pointers.
pointers: linked_list::Pointers<Waiter>,
/// Waiting task's waker. Depending on the value of `notification`,
/// this field is either protected by the `waiters` lock in
/// `Notify`, or it is exclusively owned by the enclosing `Waiter`.
waker: UnsafeCell<Option<Waker>>,
/// Notification for this waiter. Uses 2 bits to store if and how was
/// notified, 1 bit for storing if it was woken up using FIFO or LIFO, and
/// the rest of it is unused.
/// * if it's `None`, then `waker` is protected by the `waiters` lock.
/// * if it's `Some`, then `waker` is exclusively owned by the
/// enclosing `Waiter` and can be accessed without locking.
notification: AtomicNotification,
/// Should not be `Unpin`.
_p: PhantomPinned,
}
impl Waiter {
fn new() -> Waiter {
Waiter {
pointers: linked_list::Pointers::new(),
waker: UnsafeCell::new(None),
notification: AtomicNotification::none(),
_p: PhantomPinned,
}
}
}
generate_addr_of_methods! {
impl<> Waiter {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<Waiter>> {
&self.pointers
}
}
}
// No notification.
const NOTIFICATION_NONE: usize = 0b000;
// Notification type used by `notify_one`.
const NOTIFICATION_ONE: usize = 0b001;
// Notification type used by `notify_last`.
const NOTIFICATION_LAST: usize = 0b101;
// Notification type used by `notify_waiters`.
const NOTIFICATION_ALL: usize = 0b010;
/// Notification for a `Waiter`.
/// This struct is equivalent to `Option<Notification>`, but uses
/// `AtomicUsize` inside for atomic operations.
#[derive(Debug)]
struct AtomicNotification(AtomicUsize);
impl AtomicNotification {
fn none() -> Self {
AtomicNotification(AtomicUsize::new(NOTIFICATION_NONE))
}
/// Store-release a notification.
/// This method should be called exactly once.
fn store_release(&self, notification: Notification) {
let data: usize = match notification {
Notification::All => NOTIFICATION_ALL,
Notification::One(NotifyOneStrategy::Fifo) => NOTIFICATION_ONE,
Notification::One(NotifyOneStrategy::Lifo) => NOTIFICATION_LAST,
};
self.0.store(data, Release);
}
fn load(&self, ordering: Ordering) -> Option<Notification> {
let data = self.0.load(ordering);
match data {
NOTIFICATION_NONE => None,
NOTIFICATION_ONE => Some(Notification::One(NotifyOneStrategy::Fifo)),
NOTIFICATION_LAST => Some(Notification::One(NotifyOneStrategy::Lifo)),
NOTIFICATION_ALL => Some(Notification::All),
_ => unreachable!(),
}
}
/// Clears the notification.
/// This method is used by a `Notified` future to consume the
/// notification. It uses relaxed ordering and should be only
/// used once the atomic notification is no longer shared.
fn clear(&self) {
self.0.store(NOTIFICATION_NONE, Relaxed);
}
}
#[derive(Debug, PartialEq, Eq)]
#[repr(usize)]
enum NotifyOneStrategy {
Fifo,
Lifo,
}
#[derive(Debug, PartialEq, Eq)]
#[repr(usize)]
enum Notification {
One(NotifyOneStrategy),
All,
}
/// List used in `Notify::notify_waiters`. It wraps a guarded linked list
/// and gates the access to it on `notify.waiters` mutex. It also empties
/// the list on drop.
struct NotifyWaitersList<'a> {
list: GuardedWaitList,
is_empty: bool,
notify: &'a Notify,
}
impl<'a> NotifyWaitersList<'a> {
fn new(
unguarded_list: WaitList,
guard: Pin<&'a Waiter>,
notify: &'a Notify,
) -> NotifyWaitersList<'a> {
let guard_ptr = NonNull::from(guard.get_ref());
let list = unguarded_list.into_guarded(guard_ptr);
NotifyWaitersList {
list,
is_empty: false,
notify,
}
}
/// Removes the last element from the guarded list. Modifying this list
/// requires an exclusive access to the main list in `Notify`.
fn pop_back_locked(&mut self, _waiters: &mut WaitList) -> Option<NonNull<Waiter>> {
let result = self.list.pop_back();
if result.is_none() {
// Save information about emptiness to avoid waiting for lock
// in the destructor.
self.is_empty = true;
}
result
}
}
impl Drop for NotifyWaitersList<'_> {
fn drop(&mut self) {
// If the list is not empty, we unlink all waiters from it.
// We do not wake the waiters to avoid double panics.
if !self.is_empty {
let _lock_guard = self.notify.waiters.lock();
while let Some(waiter) = self.list.pop_back() {
// Safety: we never make mutable references to waiters.
let waiter = unsafe { waiter.as_ref() };
waiter.notification.store_release(Notification::All);
}
}
}
}
/// Future returned from [`Notify::notified()`].
///
/// This future is fused, so once it has completed, any future calls to poll
/// will immediately return `Poll::Ready`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Notified<'a> {
/// The `Notify` being received on.
notify: &'a Notify,
/// The current state of the receiving process.
state: State,
/// Number of calls to `notify_waiters` at the time of creation.
notify_waiters_calls: usize,
/// Entry in the waiter `LinkedList`.
waiter: Waiter,
}
unsafe impl<'a> Send for Notified<'a> {}
unsafe impl<'a> Sync for Notified<'a> {}
/// Future returned from [`Notify::notified_owned()`].
///
/// This future is fused, so once it has completed, any future calls to poll
/// will immediately return `Poll::Ready`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct OwnedNotified {
/// The `Notify` being received on.
notify: Arc<Notify>,
/// The current state of the receiving process.
state: State,
/// Number of calls to `notify_waiters` at the time of creation.
notify_waiters_calls: usize,
/// Entry in the waiter `LinkedList`.
waiter: Waiter,
}
unsafe impl Sync for OwnedNotified {}
/// A custom `project` implementation is used in place of `pin-project-lite`
/// as a custom drop for [`Notified`] and [`OwnedNotified`] implementation
/// is needed.
struct NotifiedProject<'a> {
notify: &'a Notify,
state: &'a mut State,
notify_waiters_calls: &'a usize,
waiter: &'a Waiter,
}
#[derive(Debug)]
enum State {
Init,
Waiting,
Done,
}
const NOTIFY_WAITERS_SHIFT: usize = 2;
const STATE_MASK: usize = (1 << NOTIFY_WAITERS_SHIFT) - 1;
const NOTIFY_WAITERS_CALLS_MASK: usize = !STATE_MASK;
/// Initial "idle" state.
const EMPTY: usize = 0;
/// One or more threads are currently waiting to be notified.
const WAITING: usize = 1;
/// Pending notification.
const NOTIFIED: usize = 2;
fn set_state(data: usize, state: usize) -> usize {
(data & NOTIFY_WAITERS_CALLS_MASK) | (state & STATE_MASK)
}
fn get_state(data: usize) -> usize {
data & STATE_MASK
}
fn get_num_notify_waiters_calls(data: usize) -> usize {
(data & NOTIFY_WAITERS_CALLS_MASK) >> NOTIFY_WAITERS_SHIFT
}
fn inc_num_notify_waiters_calls(data: usize) -> usize {
data + (1 << NOTIFY_WAITERS_SHIFT)
}
fn atomic_inc_num_notify_waiters_calls(data: &AtomicUsize) {
data.fetch_add(1 << NOTIFY_WAITERS_SHIFT, SeqCst);
}
impl Notify {
/// Create a new `Notify`, initialized without a permit.
///
/// # Examples
///
/// ```
/// use tokio::sync::Notify;
///
/// let notify = Notify::new();
/// ```
pub fn new() -> Notify {
Notify {
state: AtomicUsize::new(0),
waiters: Mutex::new(LinkedList::new()),
}
}
/// Create a new `Notify`, initialized without a permit.
///
/// When using the `tracing` [unstable feature], a `Notify` created with
/// `const_new` will not be instrumented. As such, it will not be visible
/// in [`tokio-console`]. Instead, [`Notify::new`] should be used to create
/// an instrumented object if that is needed.
///
/// # Examples
///
/// ```
/// use tokio::sync::Notify;
///
/// static NOTIFY: Notify = Notify::const_new();
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new() -> Notify {
Notify {
state: AtomicUsize::new(0),
waiters: Mutex::const_new(LinkedList::new()),
}
}
/// Wait for a notification.
///
/// Equivalent to:
///
/// ```ignore
/// async fn notified(&self);
/// ```
///
/// Each `Notify` value holds a single permit. If a permit is available from
/// an earlier call to [`notify_one()`], then `notified().await` will complete
/// immediately, consuming that permit. Otherwise, `notified().await` waits
/// for a permit to be made available by the next call to `notify_one()`.
///
/// The `Notified` future is not guaranteed to receive wakeups from calls to
/// `notify_one()` if it has not yet been polled. See the documentation for
/// [`Notified::enable()`] for more details.
///
/// The `Notified` future is guaranteed to receive wakeups from
/// `notify_waiters()` as soon as it has been created, even if it has not
/// yet been polled.
///
/// [`notify_one()`]: Notify::notify_one
/// [`Notified::enable()`]: Notified::enable
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute notifications in the order
/// they were requested. Cancelling a call to `notified` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use tokio::sync::Notify;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let notify = Arc::new(Notify::new());
/// let notify2 = notify.clone();
///
/// tokio::spawn(async move {
/// notify2.notified().await;
/// println!("received notification");
/// });
///
/// println!("sending notification");
/// notify.notify_one();
/// # }
/// ```
pub fn notified(&self) -> Notified<'_> {
// we load the number of times notify_waiters
// was called and store that in the future.
let state = self.state.load(SeqCst);
Notified {
notify: self,
state: State::Init,
notify_waiters_calls: get_num_notify_waiters_calls(state),
waiter: Waiter::new(),
}
}
/// Wait for a notification with an owned `Future`.
///
/// Unlike [`Self::notified`] which returns a future tied to the `Notify`'s
/// lifetime, `notified_owned` creates a self-contained future that owns its
/// notification state, making it safe to move between threads.
///
/// See [`Self::notified`] for more details.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute notifications in the order
/// they were requested. Cancelling a call to `notified_owned` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::Notify;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let notify = Arc::new(Notify::new());
///
/// for _ in 0..10 {
/// let notified = notify.clone().notified_owned();
/// tokio::spawn(async move {
/// notified.await;
/// println!("received notification");
/// });
/// }
///
/// println!("sending notification");
/// notify.notify_waiters();
/// # }
/// ```
pub fn notified_owned(self: Arc<Self>) -> OwnedNotified {
// we load the number of times notify_waiters
// was called and store that in the future.
let state = self.state.load(SeqCst);
OwnedNotified {
notify: self,
state: State::Init,
notify_waiters_calls: get_num_notify_waiters_calls(state),
waiter: Waiter::new(),
}
}
/// Notifies the first waiting task.
///
/// If a task is currently waiting, that task is notified. Otherwise, a
/// permit is stored in this `Notify` value and the **next** call to
/// [`notified().await`] will complete immediately consuming the permit made
/// available by this call to `notify_one()`.
///
/// At most one permit may be stored by `Notify`. Many sequential calls to
/// `notify_one` will result in a single permit being stored. The next call to
/// `notified().await` will complete immediately, but the one after that
/// will wait.
///
/// [`notified().await`]: Notify::notified()
///
/// # Examples
///
/// ```
/// use tokio::sync::Notify;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let notify = Arc::new(Notify::new());
/// let notify2 = notify.clone();
///
/// tokio::spawn(async move {
/// notify2.notified().await;
/// println!("received notification");
/// });
///
/// println!("sending notification");
/// notify.notify_one();
/// # }
/// ```
// Alias for old name in 0.x
#[cfg_attr(docsrs, doc(alias = "notify"))]
pub fn notify_one(&self) {
self.notify_with_strategy(NotifyOneStrategy::Fifo);
}
/// Notifies the last waiting task.
///
/// This function behaves similar to `notify_one`. The only difference is that it wakes
/// the most recently added waiter instead of the oldest waiter.
///
/// Check the [`notify_one()`] documentation for more info and
/// examples.
///
/// [`notify_one()`]: Notify::notify_one
pub fn notify_last(&self) {
self.notify_with_strategy(NotifyOneStrategy::Lifo);
}
fn notify_with_strategy(&self, strategy: NotifyOneStrategy) {
// Load the current state
let mut curr = self.state.load(SeqCst);
// If the state is `EMPTY`, transition to `NOTIFIED` and return.
while let EMPTY | NOTIFIED = get_state(curr) {
// The compare-exchange from `NOTIFIED` -> `NOTIFIED` is intended. A
// happens-before synchronization must happen between this atomic
// operation and a task calling `notified().await`.
let new = set_state(curr, NOTIFIED);
let res = self.state.compare_exchange(curr, new, SeqCst, SeqCst);
match res {
// No waiters, no further work to do
Ok(_) => return,
Err(actual) => {
curr = actual;
}
}
}
// There are waiters, the lock must be acquired to notify.
let mut waiters = self.waiters.lock();
// The state must be reloaded while the lock is held. The state may only
// transition out of WAITING while the lock is held.
curr = self.state.load(SeqCst);
if let Some(waker) = notify_locked(&mut waiters, &self.state, curr, strategy) {
drop(waiters);
waker.wake();
}
}
/// Notifies all waiting tasks.
///
/// If a task is currently waiting, that task is notified. Unlike with
/// `notify_one()`, no permit is stored to be used by the next call to
/// `notified().await`. The purpose of this method is to notify all
/// already registered waiters. Registering for notification is done by
/// acquiring an instance of the `Notified` future via calling `notified()`.
///
/// # Examples
///
/// ```
/// use tokio::sync::Notify;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let notify = Arc::new(Notify::new());
/// let notify2 = notify.clone();
///
/// let notified1 = notify.notified();
/// let notified2 = notify.notified();
///
/// let handle = tokio::spawn(async move {
/// println!("sending notifications");
/// notify2.notify_waiters();
/// });
///
/// notified1.await;
/// notified2.await;
/// println!("received notifications");
/// # }
/// ```
pub fn notify_waiters(&self) {
self.lock_waiter_list().notify_waiters();
}
fn inner_notify_waiters<'a>(
&'a self,
curr: usize,
mut waiters: crate::loom::sync::MutexGuard<'a, LinkedList<Waiter, Waiter>>,
) {
if matches!(get_state(curr), EMPTY | NOTIFIED) {
// There are no waiting tasks. All we need to do is increment the
// number of times this method was called.
atomic_inc_num_notify_waiters_calls(&self.state);
return;
}
// Increment the number of times this method was called
// and transition to empty.
let new_state = set_state(inc_num_notify_waiters_calls(curr), EMPTY);
self.state.store(new_state, SeqCst);
// It is critical for `GuardedLinkedList` safety that the guard node is
// pinned in memory and is not dropped until the guarded list is dropped.
let guard = Waiter::new();
pin!(guard);
// We move all waiters to a secondary list. It uses a `GuardedLinkedList`
// underneath to allow every waiter to safely remove itself from it.
//
// * This list will be still guarded by the `waiters` lock.
// `NotifyWaitersList` wrapper makes sure we hold the lock to modify it.
// * This wrapper will empty the list on drop. It is critical for safety
// that we will not leave any list entry with a pointer to the local
// guard node after this function returns / panics.
let mut list = NotifyWaitersList::new(std::mem::take(&mut *waiters), guard.as_ref(), self);
let mut wakers = WakeList::new();
'outer: loop {
while wakers.can_push() {
match list.pop_back_locked(&mut waiters) {
Some(waiter) => {
// Safety: we never make mutable references to waiters.
let waiter = unsafe { waiter.as_ref() };
// Safety: we hold the lock, so we can access the waker.
if let Some(waker) =
unsafe { waiter.waker.with_mut(|waker| (*waker).take()) }
{
wakers.push(waker);
}
// This waiter is unlinked and will not be shared ever again, release it.
waiter.notification.store_release(Notification::All);
}
None => {
break 'outer;
}
}
}
// Release the lock before notifying.
drop(waiters);
// One of the wakers may panic, but the remaining waiters will still
// be unlinked from the list in `NotifyWaitersList` destructor.
wakers.wake_all();
// Acquire the lock again.
waiters = self.waiters.lock();
}
// Release the lock before notifying
drop(waiters);
wakers.wake_all();
}
pub(crate) fn lock_waiter_list(&self) -> NotifyGuard<'_> {
let guarded_waiters = self.waiters.lock();
// The state must be loaded while the lock is held. The state may only
// transition out of WAITING while the lock is held.
let current_state = self.state.load(SeqCst);
NotifyGuard {
guarded_notify: self,
guarded_waiters,
current_state,
}
}
}
impl Default for Notify {
fn default() -> Notify {
Notify::new()
}
}
impl UnwindSafe for Notify {}
impl RefUnwindSafe for Notify {}
fn notify_locked(
waiters: &mut WaitList,
state: &AtomicUsize,
curr: usize,
strategy: NotifyOneStrategy,
) -> Option<Waker> {
match get_state(curr) {
EMPTY | NOTIFIED => {
let res = state.compare_exchange(curr, set_state(curr, NOTIFIED), SeqCst, SeqCst);
match res {
Ok(_) => None,
Err(actual) => {
let actual_state = get_state(actual);
assert!(actual_state == EMPTY || actual_state == NOTIFIED);
state.store(set_state(actual, NOTIFIED), SeqCst);
None
}
}
}
WAITING => {
// At this point, it is guaranteed that the state will not
// concurrently change as holding the lock is required to
// transition **out** of `WAITING`.
//
// Get a pending waiter using one of the available dequeue strategies.
let waiter = match strategy {
NotifyOneStrategy::Fifo => waiters.pop_back().unwrap(),
NotifyOneStrategy::Lifo => waiters.pop_front().unwrap(),
};
// Safety: we never make mutable references to waiters.
let waiter = unsafe { waiter.as_ref() };
// Safety: we hold the lock, so we can access the waker.
let waker = unsafe { waiter.waker.with_mut(|waker| (*waker).take()) };
// This waiter is unlinked and will not be shared ever again, release it.
waiter
.notification
.store_release(Notification::One(strategy));
if waiters.is_empty() {
// As this the **final** waiter in the list, the state
// must be transitioned to `EMPTY`. As transitioning
// **from** `WAITING` requires the lock to be held, a
// `store` is sufficient.
state.store(set_state(curr, EMPTY), SeqCst);
}
waker
}
_ => unreachable!(),
}
}
// ===== impl Notified =====
impl Notified<'_> {
/// Adds this future to the list of futures that are ready to receive
/// wakeups from calls to [`notify_one`].
///
/// Polling the future also adds it to the list, so this method should only
/// be used if you want to add the future to the list before the first call
/// to `poll`. (In fact, this method is equivalent to calling `poll` except
/// that no `Waker` is registered.)
///
/// This has no effect on notifications sent using [`notify_waiters`], which
/// are received as long as they happen after the creation of the `Notified`
/// regardless of whether `enable` or `poll` has been called.
///
/// This method returns true if the `Notified` is ready. This happens in the
/// following situations:
///
/// 1. The `notify_waiters` method was called between the creation of the
/// `Notified` and the call to this method.
/// 2. This is the first call to `enable` or `poll` on this future, and the
/// `Notify` was holding a permit from a previous call to `notify_one`.
/// The call consumes the permit in that case.
/// 3. The future has previously been enabled or polled, and it has since
/// then been marked ready by either consuming a permit from the
/// `Notify`, or by a call to `notify_one` or `notify_waiters` that
/// removed it from the list of futures ready to receive wakeups.
///
/// If this method returns true, any future calls to poll on the same future
/// will immediately return `Poll::Ready`.
///
/// # Examples
///
/// Unbound multi-producer multi-consumer (mpmc) channel.
///
/// The call to `enable` is important because otherwise if you have two
/// calls to `recv` and two calls to `send` in parallel, the following could
/// happen:
///
/// 1. Both calls to `try_recv` return `None`.
/// 2. Both new elements are added to the vector.
/// 3. The `notify_one` method is called twice, adding only a single
/// permit to the `Notify`.
/// 4. Both calls to `recv` reach the `Notified` future. One of them
/// consumes the permit, and the other sleeps forever.
///
/// By adding the `Notified` futures to the list by calling `enable` before
/// `try_recv`, the `notify_one` calls in step three would remove the
/// futures from the list and mark them notified instead of adding a permit
/// to the `Notify`. This ensures that both futures are woken.
///
/// ```
/// use tokio::sync::Notify;
///
/// use std::collections::VecDeque;
/// use std::sync::Mutex;
///
/// struct Channel<T> {
/// messages: Mutex<VecDeque<T>>,
/// notify_on_sent: Notify,
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock.rs | tokio/src/sync/rwlock.rs | use crate::sync::batch_semaphore::{Semaphore, TryAcquireError};
use crate::sync::mutex::TryLockError;
#[cfg(all(tokio_unstable, feature = "tracing"))]
use crate::util::trace;
use std::cell::UnsafeCell;
use std::marker;
use std::marker::PhantomData;
use std::sync::Arc;
pub(crate) mod owned_read_guard;
pub(crate) mod owned_write_guard;
pub(crate) mod owned_write_guard_mapped;
pub(crate) mod read_guard;
pub(crate) mod write_guard;
pub(crate) mod write_guard_mapped;
pub(crate) use owned_read_guard::OwnedRwLockReadGuard;
pub(crate) use owned_write_guard::OwnedRwLockWriteGuard;
pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
pub(crate) use read_guard::RwLockReadGuard;
pub(crate) use write_guard::RwLockWriteGuard;
pub(crate) use write_guard_mapped::RwLockMappedWriteGuard;
#[cfg(not(loom))]
const MAX_READS: u32 = u32::MAX >> 3;
#[cfg(loom)]
const MAX_READS: u32 = 10;
/// An asynchronous reader-writer lock.
///
/// This type of lock allows a number of readers or at most one writer at any
/// point in time. The write portion of this lock typically allows modification
/// of the underlying data (exclusive access) and the read portion of this lock
/// typically allows for read-only access (shared access).
///
/// In comparison, a [`Mutex`] does not distinguish between readers or writers
/// that acquire the lock, therefore causing any tasks waiting for the lock to
/// become available to yield. An `RwLock` will allow any number of readers to
/// acquire the lock as long as a writer is not holding the lock.
///
/// The priority policy of Tokio's read-write lock is _fair_ (or
/// [_write-preferring_]), in order to ensure that readers cannot starve
/// writers. Fairness is ensured using a first-in, first-out queue for the tasks
/// awaiting the lock; if a task that wishes to acquire the write lock is at the
/// head of the queue, read locks will not be given out until the write lock has
/// been released. This is in contrast to the Rust standard library's
/// `std::sync::RwLock`, where the priority policy is dependent on the
/// operating system's implementation.
///
/// The type parameter `T` represents the data that this lock protects. It is
/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards
/// returned from the locking methods implement [`Deref`](trait@std::ops::Deref)
/// (and [`DerefMut`](trait@std::ops::DerefMut)
/// for the `write` methods) to allow access to the content of the lock.
///
/// # Examples
///
/// ```
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(5);
///
/// // many reader locks can be held at once
/// {
/// let r1 = lock.read().await;
/// let r2 = lock.read().await;
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // read locks are dropped at this point
///
/// // only one write lock may be held, however
/// {
/// let mut w = lock.write().await;
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // write lock is dropped here
/// # }
/// ```
///
/// [`Mutex`]: struct@super::Mutex
/// [`RwLock`]: struct@RwLock
/// [`RwLockReadGuard`]: struct@RwLockReadGuard
/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard
/// [`Send`]: trait@std::marker::Send
/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies
pub struct RwLock<T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
// maximum number of concurrent readers
mr: u32,
//semaphore to coordinate read and write access to T
s: Semaphore,
//inner data T
c: UnsafeCell<T>,
}
#[test]
#[cfg(not(loom))]
fn bounds() {
fn check_send<T: Send>() {}
fn check_sync<T: Sync>() {}
fn check_unpin<T: Unpin>() {}
// This has to take a value, since the async fn's return type is unnameable.
fn check_send_sync_val<T: Send + Sync>(_t: T) {}
check_send::<RwLock<u32>>();
check_sync::<RwLock<u32>>();
check_unpin::<RwLock<u32>>();
check_send::<RwLockReadGuard<'_, u32>>();
check_sync::<RwLockReadGuard<'_, u32>>();
check_unpin::<RwLockReadGuard<'_, u32>>();
check_send::<OwnedRwLockReadGuard<u32, i32>>();
check_sync::<OwnedRwLockReadGuard<u32, i32>>();
check_unpin::<OwnedRwLockReadGuard<u32, i32>>();
check_send::<RwLockWriteGuard<'_, u32>>();
check_sync::<RwLockWriteGuard<'_, u32>>();
check_unpin::<RwLockWriteGuard<'_, u32>>();
check_send::<RwLockMappedWriteGuard<'_, u32>>();
check_sync::<RwLockMappedWriteGuard<'_, u32>>();
check_unpin::<RwLockMappedWriteGuard<'_, u32>>();
check_send::<OwnedRwLockWriteGuard<u32>>();
check_sync::<OwnedRwLockWriteGuard<u32>>();
check_unpin::<OwnedRwLockWriteGuard<u32>>();
check_send::<OwnedRwLockMappedWriteGuard<u32, i32>>();
check_sync::<OwnedRwLockMappedWriteGuard<u32, i32>>();
check_unpin::<OwnedRwLockMappedWriteGuard<u32, i32>>();
let rwlock = Arc::new(RwLock::new(0));
check_send_sync_val(rwlock.read());
check_send_sync_val(Arc::clone(&rwlock).read_owned());
check_send_sync_val(rwlock.write());
check_send_sync_val(Arc::clone(&rwlock).write_owned());
}
// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads.
// If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through
// RwLock<T>.
unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}
// NB: These impls need to be explicit since we're storing a raw pointer.
// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
// `T` is `Send`.
unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {}
unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {}
// T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in
// the RwLock, unlike RwLockReadGuard.
unsafe impl<T, U> Send for OwnedRwLockReadGuard<T, U>
where
T: ?Sized + Send + Sync,
U: ?Sized + Sync,
{
}
unsafe impl<T, U> Sync for OwnedRwLockReadGuard<T, U>
where
T: ?Sized + Send + Sync,
U: ?Sized + Send + Sync,
{
}
unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Sync for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T, U> Sync for OwnedRwLockMappedWriteGuard<T, U>
where
T: ?Sized + Send + Sync,
U: ?Sized + Send + Sync,
{
}
// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
// `T` is `Send` - but since this is also provides mutable access, we need to
// make sure that `T` is `Send` since its value can be sent across thread
// boundaries.
unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Send for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T, U> Send for OwnedRwLockMappedWriteGuard<T, U>
where
T: ?Sized + Send + Sync,
U: ?Sized + Send + Sync,
{
}
impl<T: ?Sized> RwLock<T> {
/// Creates a new instance of an `RwLock<T>` which is unlocked.
///
/// # Examples
///
/// ```
/// use tokio::sync::RwLock;
///
/// let lock = RwLock::new(5);
/// ```
#[track_caller]
pub fn new(value: T) -> RwLock<T>
where
T: Sized,
{
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let location = std::panic::Location::caller();
let resource_span = tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "RwLock",
kind = "Sync",
loc.file = location.file(),
loc.line = location.line(),
loc.col = location.column(),
);
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
max_readers = MAX_READS,
);
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
);
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 0,
);
});
resource_span
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize));
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
let s = Semaphore::new(MAX_READS as usize);
RwLock {
mr: MAX_READS,
c: UnsafeCell::new(value),
s,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
}
}
/// Creates a new instance of an `RwLock<T>` which is unlocked
/// and allows a maximum of `max_reads` concurrent readers.
///
/// # Examples
///
/// ```
/// use tokio::sync::RwLock;
///
/// let lock = RwLock::with_max_readers(5, 1024);
/// ```
///
/// # Panics
///
/// Panics if `max_reads` is more than `u32::MAX >> 3`.
#[track_caller]
pub fn with_max_readers(value: T, max_reads: u32) -> RwLock<T>
where
T: Sized,
{
assert!(
max_reads <= MAX_READS,
"a RwLock may not be created with more than {MAX_READS} readers"
);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let location = std::panic::Location::caller();
let resource_span = tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "RwLock",
kind = "Sync",
loc.file = location.file(),
loc.line = location.line(),
loc.col = location.column(),
);
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
max_readers = max_reads,
);
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
);
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 0,
);
});
resource_span
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize));
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
let s = Semaphore::new(max_reads as usize);
RwLock {
mr: max_reads,
c: UnsafeCell::new(value),
s,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
}
}
/// Creates a new instance of an `RwLock<T>` which is unlocked.
///
/// When using the `tracing` [unstable feature], a `RwLock` created with
/// `const_new` will not be instrumented. As such, it will not be visible
/// in [`tokio-console`]. Instead, [`RwLock::new`] should be used to create
/// an instrumented object if that is needed.
///
/// # Examples
///
/// ```
/// use tokio::sync::RwLock;
///
/// static LOCK: RwLock<i32> = RwLock::const_new(5);
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new(value: T) -> RwLock<T>
where
T: Sized,
{
RwLock {
mr: MAX_READS,
c: UnsafeCell::new(value),
s: Semaphore::const_new(MAX_READS as usize),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Creates a new instance of an `RwLock<T>` which is unlocked
/// and allows a maximum of `max_reads` concurrent readers.
///
/// # Examples
///
/// ```
/// use tokio::sync::RwLock;
///
/// static LOCK: RwLock<i32> = RwLock::const_with_max_readers(5, 1024);
/// ```
#[cfg(not(all(loom, test)))]
pub const fn const_with_max_readers(value: T, max_reads: u32) -> RwLock<T>
where
T: Sized,
{
assert!(max_reads <= MAX_READS);
RwLock {
mr: max_reads,
c: UnsafeCell::new(value),
s: Semaphore::const_new(max_reads as usize),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Locks this `RwLock` with shared read access, causing the current task
/// to yield until the lock has been acquired.
///
/// The calling task will yield until there are no writers which hold the
/// lock. There may be other readers inside the lock when the task resumes.
///
/// Note that under the priority policy of [`RwLock`], read locks are not
/// granted until prior write locks, to prevent starvation. Therefore
/// deadlock may occur if a read lock is held by the current task, a write
/// lock attempt is made, and then a subsequent read lock attempt is made
/// by the current task.
///
/// Returns an RAII guard which will drop this read access of the `RwLock`
/// when dropped.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute locks in the order they
/// were requested. Cancelling a call to `read` makes you lose your place in
/// the queue.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
/// let c_lock = lock.clone();
///
/// let n = lock.read().await;
/// assert_eq!(*n, 1);
///
/// tokio::spawn(async move {
/// // While main has an active read lock, we acquire one too.
/// let r = c_lock.read().await;
/// assert_eq!(*r, 1);
/// }).await.expect("The spawned task has panicked");
///
/// // Drop the guard after the spawned task finishes.
/// drop(n);
/// # }
/// ```
pub async fn read(&self) -> RwLockReadGuard<'_, T> {
let acquire_fut = async {
self.s.acquire(1).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
// handle to it through the Arc, which means that this can never happen.
unreachable!()
});
RwLockReadGuard {
s: &self.s,
data: self.c.get(),
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let acquire_fut = trace::async_op(
move || acquire_fut,
self.resource_span.clone(),
"RwLock::read",
"poll",
false,
);
#[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
let guard = acquire_fut.await;
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
guard
}
/// Blockingly locks this `RwLock` with shared read access.
///
/// This method is intended for use cases where you
/// need to use this rwlock in asynchronous code as well as in synchronous code.
///
/// Returns an RAII guard which will drop the read access of this `RwLock` when dropped.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
///
/// - If you find yourself in an asynchronous execution context and needing
/// to call some (synchronous) function which performs one of these
/// `blocking_` operations, then consider wrapping that call inside
/// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
/// (or [`block_in_place()`][crate::task::block_in_place]).
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::sync::Arc;
/// use tokio::sync::RwLock;
///
/// #[tokio::main]
/// async fn main() {
/// let rwlock = Arc::new(RwLock::new(1));
/// let mut write_lock = rwlock.write().await;
///
/// let blocking_task = tokio::task::spawn_blocking({
/// let rwlock = Arc::clone(&rwlock);
/// move || {
/// // This shall block until the `write_lock` is released.
/// let read_lock = rwlock.blocking_read();
/// assert_eq!(*read_lock, 0);
/// }
/// });
///
/// *write_lock -= 1;
/// drop(write_lock); // release the lock.
///
/// // Await the completion of the blocking task.
/// blocking_task.await.unwrap();
///
/// // Assert uncontended.
/// assert!(rwlock.try_write().is_ok());
/// }
/// # }
/// ```
#[track_caller]
#[cfg(feature = "sync")]
pub fn blocking_read(&self) -> RwLockReadGuard<'_, T> {
crate::future::block_on(self.read())
}
/// Locks this `RwLock` with shared read access, causing the current task
/// to yield until the lock has been acquired.
///
/// The calling task will yield until there are no writers which hold the
/// lock. There may be other readers inside the lock when the task resumes.
///
/// This method is identical to [`RwLock::read`], except that the returned
/// guard references the `RwLock` with an [`Arc`] rather than by borrowing
/// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
/// method, and the guard will live for the `'static` lifetime, as it keeps
/// the `RwLock` alive by holding an `Arc`.
///
/// Note that under the priority policy of [`RwLock`], read locks are not
/// granted until prior write locks, to prevent starvation. Therefore
/// deadlock may occur if a read lock is held by the current task, a write
/// lock attempt is made, and then a subsequent read lock attempt is made
/// by the current task.
///
/// Returns an RAII guard which will drop this read access of the `RwLock`
/// when dropped.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute locks in the order they
/// were requested. Cancelling a call to `read_owned` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
/// let c_lock = lock.clone();
///
/// let n = lock.read_owned().await;
/// assert_eq!(*n, 1);
///
/// tokio::spawn(async move {
/// // While main has an active read lock, we acquire one too.
/// let r = c_lock.read_owned().await;
/// assert_eq!(*r, 1);
/// }).await.expect("The spawned task has panicked");
///
/// // Drop the guard after the spawned task finishes.
/// drop(n);
///}
/// ```
pub async fn read_owned(self: Arc<Self>) -> OwnedRwLockReadGuard<T> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = self.resource_span.clone();
let acquire_fut = async {
self.s.acquire(1).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
// handle to it through the Arc, which means that this can never happen.
unreachable!()
});
OwnedRwLockReadGuard {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
data: self.c.get(),
lock: self,
_p: PhantomData,
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let acquire_fut = trace::async_op(
move || acquire_fut,
resource_span,
"RwLock::read_owned",
"poll",
false,
);
#[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
let guard = acquire_fut.await;
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
guard
}
/// Attempts to acquire this `RwLock` with shared read access.
///
/// If the access couldn't be acquired immediately, returns [`TryLockError`].
/// Otherwise, an RAII guard is returned which will release read access
/// when dropped.
///
/// [`TryLockError`]: TryLockError
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
/// let c_lock = lock.clone();
///
/// let v = lock.try_read().unwrap();
/// assert_eq!(*v, 1);
///
/// tokio::spawn(async move {
/// // While main has an active read lock, we acquire one too.
/// let n = c_lock.read().await;
/// assert_eq!(*n, 1);
/// }).await.expect("The spawned task has panicked");
///
/// // Drop the guard when spawned task finishes.
/// drop(v);
/// # }
/// ```
pub fn try_read(&self) -> Result<RwLockReadGuard<'_, T>, TryLockError> {
match self.s.try_acquire(1) {
Ok(permit) => permit,
Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
Err(TryAcquireError::Closed) => unreachable!(),
}
let guard = RwLockReadGuard {
s: &self.s,
data: self.c.get(),
marker: marker::PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
Ok(guard)
}
/// Attempts to acquire this `RwLock` with shared read access.
///
/// If the access couldn't be acquired immediately, returns [`TryLockError`].
/// Otherwise, an RAII guard is returned which will release read access
/// when dropped.
///
/// This method is identical to [`RwLock::try_read`], except that the
/// returned guard references the `RwLock` with an [`Arc`] rather than by
/// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
/// call this method, and the guard will live for the `'static` lifetime,
/// as it keeps the `RwLock` alive by holding an `Arc`.
///
/// [`TryLockError`]: TryLockError
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
/// let c_lock = lock.clone();
///
/// let v = lock.try_read_owned().unwrap();
/// assert_eq!(*v, 1);
///
/// tokio::spawn(async move {
/// // While main has an active read lock, we acquire one too.
/// let n = c_lock.read_owned().await;
/// assert_eq!(*n, 1);
/// }).await.expect("The spawned task has panicked");
///
/// // Drop the guard when spawned task finishes.
/// drop(v);
/// # }
/// ```
pub fn try_read_owned(self: Arc<Self>) -> Result<OwnedRwLockReadGuard<T>, TryLockError> {
match self.s.try_acquire(1) {
Ok(permit) => permit,
Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
Err(TryAcquireError::Closed) => unreachable!(),
}
let guard = OwnedRwLockReadGuard {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
data: self.c.get(),
lock: self,
_p: PhantomData,
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
Ok(guard)
}
/// Locks this `RwLock` with exclusive write access, causing the current
/// task to yield until the lock has been acquired.
///
/// The calling task will yield while other writers or readers currently
/// have access to the lock.
///
/// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute locks in the order they
/// were requested. Cancelling a call to `write` makes you lose your place
/// in the queue.
///
/// # Examples
///
/// ```
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(1);
///
/// let mut n = lock.write().await;
/// *n = 2;
/// # }
/// ```
pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
let acquire_fut = async {
self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
// handle to it through the Arc, which means that this can never happen.
unreachable!()
});
RwLockWriteGuard {
permits_acquired: self.mr,
s: &self.s,
data: self.c.get(),
marker: marker::PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let acquire_fut = trace::async_op(
move || acquire_fut,
self.resource_span.clone(),
"RwLock::write",
"poll",
false,
);
#[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
let guard = acquire_fut.await;
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = true,
write_locked.op = "override",
)
});
guard
}
/// Blockingly locks this `RwLock` with exclusive write access.
///
/// This method is intended for use cases where you
/// need to use this rwlock in asynchronous code as well as in synchronous code.
///
/// Returns an RAII guard which will drop the write access of this `RwLock` when dropped.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
///
/// - If you find yourself in an asynchronous execution context and needing
/// to call some (synchronous) function which performs one of these
/// `blocking_` operations, then consider wrapping that call inside
/// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
/// (or [`block_in_place()`][crate::task::block_in_place]).
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::sync::Arc;
/// use tokio::{sync::RwLock};
///
/// #[tokio::main]
/// async fn main() {
/// let rwlock = Arc::new(RwLock::new(1));
/// let read_lock = rwlock.read().await;
///
/// let blocking_task = tokio::task::spawn_blocking({
/// let rwlock = Arc::clone(&rwlock);
/// move || {
/// // This shall block until the `read_lock` is released.
/// let mut write_lock = rwlock.blocking_write();
/// *write_lock = 2;
/// }
/// });
///
/// assert_eq!(*read_lock, 1);
/// // Release the last outstanding read lock.
/// drop(read_lock);
///
/// // Await the completion of the blocking task.
/// blocking_task.await.unwrap();
///
/// // Assert uncontended.
/// let read_lock = rwlock.try_read().unwrap();
/// assert_eq!(*read_lock, 2);
/// }
/// # }
/// ```
#[track_caller]
#[cfg(feature = "sync")]
pub fn blocking_write(&self) -> RwLockWriteGuard<'_, T> {
crate::future::block_on(self.write())
}
/// Locks this `RwLock` with exclusive write access, causing the current
/// task to yield until the lock has been acquired.
///
/// The calling task will yield while other writers or readers currently
/// have access to the lock.
///
/// This method is identical to [`RwLock::write`], except that the returned
/// guard references the `RwLock` with an [`Arc`] rather than by borrowing
/// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
/// method, and the guard will live for the `'static` lifetime, as it keeps
/// the `RwLock` alive by holding an `Arc`.
///
/// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute locks in the order they
/// were requested. Cancelling a call to `write_owned` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::RwLock;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
///
/// let mut n = lock.write_owned().await;
/// *n = 2;
///}
/// ```
pub async fn write_owned(self: Arc<Self>) -> OwnedRwLockWriteGuard<T> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = self.resource_span.clone();
let acquire_fut = async {
self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and we have a
// handle to it through the Arc, which means that this can never happen.
unreachable!()
});
OwnedRwLockWriteGuard {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
permits_acquired: self.mr,
data: self.c.get(),
lock: self,
_p: PhantomData,
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let acquire_fut = trace::async_op(
move || acquire_fut,
resource_span,
"RwLock::write_owned",
"poll",
false,
);
#[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
let guard = acquire_fut.await;
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mutex.rs | tokio/src/sync/mutex.rs | #![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))]
use crate::sync::batch_semaphore as semaphore;
#[cfg(all(tokio_unstable, feature = "tracing"))]
use crate::util::trace;
use std::cell::UnsafeCell;
use std::error::Error;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use std::{fmt, mem, ptr};
/// An asynchronous `Mutex`-like type.
///
/// This type acts similarly to [`std::sync::Mutex`], with two major
/// differences: [`lock`] is an async method so does not block, and the lock
/// guard is designed to be held across `.await` points.
///
/// Tokio's Mutex operates on a guaranteed FIFO basis.
/// This means that the order in which tasks call the [`lock`] method is
/// the exact order in which they will acquire the lock.
///
/// # Which kind of mutex should you use?
///
/// Contrary to popular belief, it is ok and often preferred to use the ordinary
/// [`Mutex`][std] from the standard library in asynchronous code.
///
/// The feature that the async mutex offers over the blocking mutex is the
/// ability to keep it locked across an `.await` point. This makes the async
/// mutex more expensive than the blocking mutex, so the blocking mutex should
/// be preferred in the cases where it can be used. The primary use case for the
/// async mutex is to provide shared mutable access to IO resources such as a
/// database connection. If the value behind the mutex is just data, it's
/// usually appropriate to use a blocking mutex such as the one in the standard
/// library or [`parking_lot`].
///
/// Note that, although the compiler will not prevent the std `Mutex` from holding
/// its guard across `.await` points in situations where the task is not movable
/// between threads, this virtually never leads to correct concurrent code in
/// practice as it can easily lead to deadlocks.
///
/// A common pattern is to wrap the `Arc<Mutex<...>>` in a struct that provides
/// non-async methods for performing operations on the data within, and only
/// lock the mutex inside these methods. The [mini-redis] example provides an
/// illustration of this pattern.
///
/// Additionally, when you _do_ want shared access to an IO resource, it is
/// often better to spawn a task to manage the IO resource, and to use message
/// passing to communicate with that task.
///
/// [std]: std::sync::Mutex
/// [`parking_lot`]: https://docs.rs/parking_lot
/// [mini-redis]: https://github.com/tokio-rs/mini-redis/blob/master/src/db.rs
///
/// # Examples:
///
/// ```rust,no_run
/// use tokio::sync::Mutex;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let data1 = Arc::new(Mutex::new(0));
/// let data2 = Arc::clone(&data1);
///
/// tokio::spawn(async move {
/// let mut lock = data2.lock().await;
/// *lock += 1;
/// });
///
/// let mut lock = data1.lock().await;
/// *lock += 1;
/// # }
/// ```
///
///
/// ```rust,no_run
/// use tokio::sync::Mutex;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let count = Arc::new(Mutex::new(0));
///
/// for i in 0..5 {
/// let my_count = Arc::clone(&count);
/// tokio::spawn(async move {
/// for j in 0..10 {
/// let mut lock = my_count.lock().await;
/// *lock += 1;
/// println!("{} {} {}", i, j, lock);
/// }
/// });
/// }
///
/// loop {
/// if *count.lock().await >= 50 {
/// break;
/// }
/// }
/// println!("Count hit 50.");
/// # }
/// ```
/// There are a few things of note here to pay attention to in this example.
/// 1. The mutex is wrapped in an [`Arc`] to allow it to be shared across
/// threads.
/// 2. Each spawned task obtains a lock and releases it on every iteration.
/// 3. Mutation of the data protected by the Mutex is done by de-referencing
/// the obtained lock as seen on lines 13 and 20.
///
/// Tokio's Mutex works in a simple FIFO (first in, first out) style where all
/// calls to [`lock`] complete in the order they were performed. In that way the
/// Mutex is "fair" and predictable in how it distributes the locks to inner
/// data. Locks are released and reacquired after every iteration, so basically,
/// each thread goes to the back of the line after it increments the value once.
/// Note that there's some unpredictability to the timing between when the
/// threads are started, but once they are going they alternate predictably.
/// Finally, since there is only a single valid lock at any given time, there is
/// no possibility of a race condition when mutating the inner value.
///
/// Note that in contrast to [`std::sync::Mutex`], this implementation does not
/// poison the mutex when a thread holding the [`MutexGuard`] panics. In such a
/// case, the mutex will be unlocked. If the panic is caught, this might leave
/// the data protected by the mutex in an inconsistent state.
///
/// [`Mutex`]: struct@Mutex
/// [`MutexGuard`]: struct@MutexGuard
/// [`Arc`]: struct@std::sync::Arc
/// [`std::sync::Mutex`]: struct@std::sync::Mutex
/// [`Send`]: trait@std::marker::Send
/// [`lock`]: method@Mutex::lock
pub struct Mutex<T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
s: semaphore::Semaphore,
c: UnsafeCell<T>,
}
/// A handle to a held `Mutex`. The guard can be held across any `.await` point
/// as it is [`Send`].
///
/// As long as you have this guard, you have exclusive access to the underlying
/// `T`. The guard internally borrows the `Mutex`, so the mutex will not be
/// dropped while a guard exists.
///
/// The lock is automatically released whenever the guard is dropped, at which
/// point `lock` will succeed yet again.
#[clippy::has_significant_drop]
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
lock: &'a Mutex<T>,
}
/// An owned handle to a held `Mutex`.
///
/// This guard is only available from a `Mutex` that is wrapped in an [`Arc`]. It
/// is identical to `MutexGuard`, except that rather than borrowing the `Mutex`,
/// it clones the `Arc`, incrementing the reference count. This means that
/// unlike `MutexGuard`, it will have the `'static` lifetime.
///
/// As long as you have this guard, you have exclusive access to the underlying
/// `T`. The guard internally keeps a reference-counted pointer to the original
/// `Mutex`, so even if the lock goes away, the guard remains valid.
///
/// The lock is automatically released whenever the guard is dropped, at which
/// point `lock` will succeed yet again.
///
/// [`Arc`]: std::sync::Arc
#[clippy::has_significant_drop]
pub struct OwnedMutexGuard<T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
lock: Arc<Mutex<T>>,
}
/// A handle to a held `Mutex` that has had a function applied to it via [`MutexGuard::map`].
///
/// This can be used to hold a subfield of the protected data.
///
/// [`MutexGuard::map`]: method@MutexGuard::map
#[clippy::has_significant_drop]
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MappedMutexGuard<'a, T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
s: &'a semaphore::Semaphore,
data: *mut T,
// Needed to tell the borrow checker that we are holding a `&mut T`
marker: PhantomData<&'a mut T>,
}
/// A owned handle to a held `Mutex` that has had a function applied to it via
/// [`OwnedMutexGuard::map`].
///
/// This can be used to hold a subfield of the protected data.
///
/// [`OwnedMutexGuard::map`]: method@OwnedMutexGuard::map
#[clippy::has_significant_drop]
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct OwnedMappedMutexGuard<T: ?Sized, U: ?Sized = T> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
data: *mut U,
lock: Arc<Mutex<T>>,
}
/// A helper type used when taking apart a `MutexGuard` without running its
/// Drop implementation.
#[allow(dead_code)] // Unused fields are still used in Drop.
struct MutexGuardInner<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
lock: &'a Mutex<T>,
}
/// A helper type used when taking apart a `OwnedMutexGuard` without running
/// its Drop implementation.
struct OwnedMutexGuardInner<T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
lock: Arc<Mutex<T>>,
}
/// A helper type used when taking apart a `MappedMutexGuard` without running
/// its Drop implementation.
#[allow(dead_code)] // Unused fields are still used in Drop.
struct MappedMutexGuardInner<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
s: &'a semaphore::Semaphore,
data: *mut T,
}
/// A helper type used when taking apart a `OwnedMappedMutexGuard` without running
/// its Drop implementation.
#[allow(dead_code)] // Unused fields are still used in Drop.
struct OwnedMappedMutexGuardInner<T: ?Sized, U: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
data: *mut U,
lock: Arc<Mutex<T>>,
}
// As long as T: Send, it's fine to send and share Mutex<T> between threads.
// If T was not Send, sending and sharing a Mutex<T> would be bad, since you can
// access T through Mutex<T>.
unsafe impl<T> Send for Mutex<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for Mutex<T> where T: ?Sized + Send {}
unsafe impl<T> Sync for MutexGuard<'_, T> where T: ?Sized + Send + Sync {}
unsafe impl<T> Sync for OwnedMutexGuard<T> where T: ?Sized + Send + Sync {}
unsafe impl<'a, T> Sync for MappedMutexGuard<'a, T> where T: ?Sized + Sync + 'a {}
unsafe impl<'a, T> Send for MappedMutexGuard<'a, T> where T: ?Sized + Send + 'a {}
unsafe impl<T, U> Sync for OwnedMappedMutexGuard<T, U>
where
T: ?Sized + Send + Sync,
U: ?Sized + Send + Sync,
{
}
unsafe impl<T, U> Send for OwnedMappedMutexGuard<T, U>
where
T: ?Sized + Send,
U: ?Sized + Send,
{
}
/// Error returned from the [`Mutex::try_lock`], [`RwLock::try_read`] and
/// [`RwLock::try_write`] functions.
///
/// `Mutex::try_lock` operation will only fail if the mutex is already locked.
///
/// `RwLock::try_read` operation will only fail if the lock is currently held
/// by an exclusive writer.
///
/// `RwLock::try_write` operation will only fail if the lock is currently held
/// by any reader or by an exclusive writer.
///
/// [`Mutex::try_lock`]: Mutex::try_lock
/// [`RwLock::try_read`]: fn@super::RwLock::try_read
/// [`RwLock::try_write`]: fn@super::RwLock::try_write
#[derive(Debug)]
pub struct TryLockError(pub(super) ());
impl fmt::Display for TryLockError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "operation would block")
}
}
impl Error for TryLockError {}
#[test]
#[cfg(not(loom))]
fn bounds() {
fn check_send<T: Send>() {}
fn check_unpin<T: Unpin>() {}
// This has to take a value, since the async fn's return type is unnameable.
fn check_send_sync_val<T: Send + Sync>(_t: T) {}
fn check_send_sync<T: Send + Sync>() {}
fn check_static<T: 'static>() {}
fn check_static_val<T: 'static>(_t: T) {}
check_send::<MutexGuard<'_, u32>>();
check_send::<OwnedMutexGuard<u32>>();
check_unpin::<Mutex<u32>>();
check_send_sync::<Mutex<u32>>();
check_static::<OwnedMutexGuard<u32>>();
let mutex = Mutex::new(1);
check_send_sync_val(mutex.lock());
let arc_mutex = Arc::new(Mutex::new(1));
check_send_sync_val(arc_mutex.clone().lock_owned());
check_static_val(arc_mutex.lock_owned());
}
impl<T: ?Sized> Mutex<T> {
/// Creates a new lock in an unlocked state ready for use.
///
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
///
/// let lock = Mutex::new(5);
/// ```
#[track_caller]
pub fn new(t: T) -> Self
where
T: Sized,
{
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let location = std::panic::Location::caller();
tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "Mutex",
kind = "Sync",
loc.file = location.file(),
loc.line = location.line(),
loc.col = location.column(),
)
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let s = resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
locked = false,
);
semaphore::Semaphore::new(1)
});
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
let s = semaphore::Semaphore::new(1);
Self {
c: UnsafeCell::new(t),
s,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
}
}
/// Creates a new lock in an unlocked state ready for use.
///
/// When using the `tracing` [unstable feature], a `Mutex` created with
/// `const_new` will not be instrumented. As such, it will not be visible
/// in [`tokio-console`]. Instead, [`Mutex::new`] should be used to create
/// an instrumented object if that is needed.
///
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
///
/// static LOCK: Mutex<i32> = Mutex::const_new(5);
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new(t: T) -> Self
where
T: Sized,
{
Self {
c: UnsafeCell::new(t),
s: semaphore::Semaphore::const_new(1),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Locks this mutex, causing the current task to yield until the lock has
/// been acquired. When the lock has been acquired, function returns a
/// [`MutexGuard`].
///
/// If the mutex is available to be acquired immediately, then this call
/// will typically not yield to the runtime. However, this is not guaranteed
/// under all circumstances.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute locks in the order they
/// were requested. Cancelling a call to `lock` makes you lose your place in
/// the queue.
///
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mutex = Mutex::new(1);
///
/// let mut n = mutex.lock().await;
/// *n = 2;
/// # }
/// ```
pub async fn lock(&self) -> MutexGuard<'_, T> {
let acquire_fut = async {
self.acquire().await;
MutexGuard {
lock: self,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let acquire_fut = trace::async_op(
move || acquire_fut,
self.resource_span.clone(),
"Mutex::lock",
"poll",
false,
);
#[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
let guard = acquire_fut.await;
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
locked = true,
);
});
guard
}
/// Blockingly locks this `Mutex`. When the lock has been acquired, function returns a
/// [`MutexGuard`].
///
/// This method is intended for use cases where you
/// need to use this mutex in asynchronous code as well as in synchronous code.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
///
/// - If you find yourself in an asynchronous execution context and needing
/// to call some (synchronous) function which performs one of these
/// `blocking_` operations, then consider wrapping that call inside
/// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
/// (or [`block_in_place()`][crate::task::block_in_place]).
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::sync::Arc;
/// use tokio::sync::Mutex;
///
/// #[tokio::main]
/// async fn main() {
/// let mutex = Arc::new(Mutex::new(1));
/// let lock = mutex.lock().await;
///
/// let mutex1 = Arc::clone(&mutex);
/// let blocking_task = tokio::task::spawn_blocking(move || {
/// // This shall block until the `lock` is released.
/// let mut n = mutex1.blocking_lock();
/// *n = 2;
/// });
///
/// assert_eq!(*lock, 1);
/// // Release the lock.
/// drop(lock);
///
/// // Await the completion of the blocking task.
/// blocking_task.await.unwrap();
///
/// // Assert uncontended.
/// let n = mutex.try_lock().unwrap();
/// assert_eq!(*n, 2);
/// }
/// # }
/// ```
#[track_caller]
#[cfg(feature = "sync")]
#[cfg_attr(docsrs, doc(alias = "lock_blocking"))]
pub fn blocking_lock(&self) -> MutexGuard<'_, T> {
crate::future::block_on(self.lock())
}
/// Blockingly locks this `Mutex`. When the lock has been acquired, function returns an
/// [`OwnedMutexGuard`].
///
/// This method is identical to [`Mutex::blocking_lock`], except that the returned
/// guard references the `Mutex` with an [`Arc`] rather than by borrowing
/// it. Therefore, the `Mutex` must be wrapped in an `Arc` to call this
/// method, and the guard will live for the `'static` lifetime, as it keeps
/// the `Mutex` alive by holding an `Arc`.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution context.
///
/// - If you find yourself in an asynchronous execution context and needing
/// to call some (synchronous) function which performs one of these
/// `blocking_` operations, then consider wrapping that call inside
/// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
/// (or [`block_in_place()`][crate::task::block_in_place]).
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::sync::Arc;
/// use tokio::sync::Mutex;
///
/// #[tokio::main]
/// async fn main() {
/// let mutex = Arc::new(Mutex::new(1));
/// let lock = mutex.lock().await;
///
/// let mutex1 = Arc::clone(&mutex);
/// let blocking_task = tokio::task::spawn_blocking(move || {
/// // This shall block until the `lock` is released.
/// let mut n = mutex1.blocking_lock_owned();
/// *n = 2;
/// });
///
/// assert_eq!(*lock, 1);
/// // Release the lock.
/// drop(lock);
///
/// // Await the completion of the blocking task.
/// blocking_task.await.unwrap();
///
/// // Assert uncontended.
/// let n = mutex.try_lock().unwrap();
/// assert_eq!(*n, 2);
/// }
/// # }
/// ```
#[track_caller]
#[cfg(feature = "sync")]
pub fn blocking_lock_owned(self: Arc<Self>) -> OwnedMutexGuard<T> {
crate::future::block_on(self.lock_owned())
}
/// Locks this mutex, causing the current task to yield until the lock has
/// been acquired. When the lock has been acquired, this returns an
/// [`OwnedMutexGuard`].
///
/// If the mutex is available to be acquired immediately, then this call
/// will typically not yield to the runtime. However, this is not guaranteed
/// under all circumstances.
///
/// This method is identical to [`Mutex::lock`], except that the returned
/// guard references the `Mutex` with an [`Arc`] rather than by borrowing
/// it. Therefore, the `Mutex` must be wrapped in an `Arc` to call this
/// method, and the guard will live for the `'static` lifetime, as it keeps
/// the `Mutex` alive by holding an `Arc`.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute locks in the order they
/// were requested. Cancelling a call to `lock_owned` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mutex = Arc::new(Mutex::new(1));
///
/// let mut n = mutex.clone().lock_owned().await;
/// *n = 2;
/// # }
/// ```
///
/// [`Arc`]: std::sync::Arc
pub async fn lock_owned(self: Arc<Self>) -> OwnedMutexGuard<T> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = self.resource_span.clone();
let acquire_fut = async {
self.acquire().await;
OwnedMutexGuard {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
lock: self,
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let acquire_fut = trace::async_op(
move || acquire_fut,
resource_span,
"Mutex::lock_owned",
"poll",
false,
);
#[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
let guard = acquire_fut.await;
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
locked = true,
);
});
guard
}
async fn acquire(&self) {
crate::trace::async_trace_leaf().await;
self.s.acquire(1).await.unwrap_or_else(|_| {
// The semaphore was closed. but, we never explicitly close it, and
// we own it exclusively, which means that this can never happen.
unreachable!()
});
}
/// Attempts to acquire the lock, and returns [`TryLockError`] if the
/// lock is currently held somewhere else.
///
/// [`TryLockError`]: TryLockError
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
/// # async fn dox() -> Result<(), tokio::sync::TryLockError> {
///
/// let mutex = Mutex::new(1);
///
/// let n = mutex.try_lock()?;
/// assert_eq!(*n, 1);
/// # Ok(())
/// # }
/// ```
pub fn try_lock(&self) -> Result<MutexGuard<'_, T>, TryLockError> {
match self.s.try_acquire(1) {
Ok(()) => {
let guard = MutexGuard {
lock: self,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
locked = true,
);
});
Ok(guard)
}
Err(_) => Err(TryLockError(())),
}
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the `Mutex` mutably, no actual locking needs to
/// take place -- the mutable borrow statically guarantees no locks exist.
///
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
///
/// fn main() {
/// let mut mutex = Mutex::new(1);
///
/// let n = mutex.get_mut();
/// *n = 2;
/// }
/// ```
pub fn get_mut(&mut self) -> &mut T {
self.c.get_mut()
}
/// Attempts to acquire the lock, and returns [`TryLockError`] if the lock
/// is currently held somewhere else.
///
/// This method is identical to [`Mutex::try_lock`], except that the
/// returned guard references the `Mutex` with an [`Arc`] rather than by
/// borrowing it. Therefore, the `Mutex` must be wrapped in an `Arc` to call
/// this method, and the guard will live for the `'static` lifetime, as it
/// keeps the `Mutex` alive by holding an `Arc`.
///
/// [`TryLockError`]: TryLockError
/// [`Arc`]: std::sync::Arc
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
/// use std::sync::Arc;
/// # async fn dox() -> Result<(), tokio::sync::TryLockError> {
///
/// let mutex = Arc::new(Mutex::new(1));
///
/// let n = mutex.clone().try_lock_owned()?;
/// assert_eq!(*n, 1);
/// # Ok(())
/// # }
pub fn try_lock_owned(self: Arc<Self>) -> Result<OwnedMutexGuard<T>, TryLockError> {
match self.s.try_acquire(1) {
Ok(()) => {
let guard = OwnedMutexGuard {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: self.resource_span.clone(),
lock: self,
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
locked = true,
);
});
Ok(guard)
}
Err(_) => Err(TryLockError(())),
}
}
/// Consumes the mutex, returning the underlying data.
/// # Examples
///
/// ```
/// use tokio::sync::Mutex;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mutex = Mutex::new(1);
///
/// let n = mutex.into_inner();
/// assert_eq!(n, 1);
/// # }
/// ```
pub fn into_inner(self) -> T
where
T: Sized,
{
self.c.into_inner()
}
}
impl<T> From<T> for Mutex<T> {
fn from(s: T) -> Self {
Self::new(s)
}
}
impl<T> Default for Mutex<T>
where
T: Default,
{
fn default() -> Self {
Self::new(T::default())
}
}
impl<T: ?Sized> std::fmt::Debug for Mutex<T>
where
T: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut d = f.debug_struct("Mutex");
match self.try_lock() {
Ok(inner) => d.field("data", &&*inner),
Err(_) => d.field("data", &format_args!("<locked>")),
};
d.finish()
}
}
// === impl MutexGuard ===
impl<'a, T: ?Sized> MutexGuard<'a, T> {
fn skip_drop(self) -> MutexGuardInner<'a, T> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the `resource_span` and then forgets the
// original. In the end, we have not duplicated or forgotten any values.
MutexGuardInner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: unsafe { std::ptr::read(&me.resource_span) },
lock: me.lock,
}
}
/// Makes a new [`MappedMutexGuard`] for a component of the locked data.
///
/// This operation cannot fail as the [`MutexGuard`] passed in already locked the mutex.
///
/// This is an associated function that needs to be used as `MutexGuard::map(...)`. A method
/// would interfere with methods of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use tokio::sync::{Mutex, MutexGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let foo = Mutex::new(Foo(1));
///
/// {
/// let mut mapped = MutexGuard::map(foo.lock().await, |f| &mut f.0);
/// *mapped = 2;
/// }
///
/// assert_eq!(Foo(2), *foo.lock().await);
/// # }
/// ```
///
/// [`MutexGuard`]: struct@MutexGuard
/// [`MappedMutexGuard`]: struct@MappedMutexGuard
#[inline]
pub fn map<U, F>(mut this: Self, f: F) -> MappedMutexGuard<'a, U>
where
U: ?Sized,
F: FnOnce(&mut T) -> &mut U,
{
let data = f(&mut *this) as *mut U;
let inner = this.skip_drop();
MappedMutexGuard {
s: &inner.lock.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: inner.resource_span,
}
}
/// Attempts to make a new [`MappedMutexGuard`] for a component of the locked data. The
/// original guard is returned if the closure returns `None`.
///
/// This operation cannot fail as the [`MutexGuard`] passed in already locked the mutex.
///
/// This is an associated function that needs to be used as `MutexGuard::try_map(...)`. A
/// method would interfere with methods of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use tokio::sync::{Mutex, MutexGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let foo = Mutex::new(Foo(1));
///
/// {
/// let mut mapped = MutexGuard::try_map(foo.lock().await, |f| Some(&mut f.0))
/// .expect("should not fail");
/// *mapped = 2;
/// }
///
/// assert_eq!(Foo(2), *foo.lock().await);
/// # }
/// ```
///
/// [`MutexGuard`]: struct@MutexGuard
/// [`MappedMutexGuard`]: struct@MappedMutexGuard
#[inline]
pub fn try_map<U, F>(mut this: Self, f: F) -> Result<MappedMutexGuard<'a, U>, Self>
where
U: ?Sized,
F: FnOnce(&mut T) -> Option<&mut U>,
{
let data = match f(&mut *this) {
Some(data) => data as *mut U,
None => return Err(this),
};
let inner = this.skip_drop();
Ok(MappedMutexGuard {
s: &inner.lock.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: inner.resource_span,
})
}
/// Returns a reference to the original `Mutex`.
///
/// ```
/// use tokio::sync::{Mutex, MutexGuard};
///
/// async fn unlock_and_relock<'l>(guard: MutexGuard<'l, u32>) -> MutexGuard<'l, u32> {
/// println!("1. contains: {:?}", *guard);
/// let mutex = MutexGuard::mutex(&guard);
/// drop(guard);
/// let guard = mutex.lock().await;
/// println!("2. contains: {:?}", *guard);
/// guard
/// }
/// #
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// # let mutex = Mutex::new(0u32);
/// # let guard = mutex.lock().await;
/// # let _guard = unlock_and_relock(guard).await;
/// # }
/// ```
#[inline]
pub fn mutex(this: &Self) -> &'a Mutex<T> {
this.lock
}
}
impl<T: ?Sized> Drop for MutexGuard<'_, T> {
fn drop(&mut self) {
self.lock.s.release(1);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
locked = false,
);
});
}
}
impl<T: ?Sized> Deref for MutexGuard<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.lock.c.get() }
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/once_cell.rs | tokio/src/sync/once_cell.rs | use super::{Semaphore, SemaphorePermit, TryAcquireError};
use crate::loom::cell::UnsafeCell;
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::mem::MaybeUninit;
use std::ops::Drop;
use std::ptr;
use std::sync::atomic::{AtomicBool, Ordering};
// This file contains an implementation of an OnceCell. The principle
// behind the safety of the cell is that any thread with an `&OnceCell` may
// access the `value` field according the following rules:
//
// 1. When `value_set` is false, the `value` field may be modified by the
// thread holding the permit on the semaphore.
// 2. When `value_set` is true, the `value` field may be accessed immutably by
// any thread.
//
// It is an invariant that if the semaphore is closed, then `value_set` is true.
// The reverse does not necessarily hold — but if not, the semaphore may not
// have any available permits.
//
// A thread with a `&mut OnceCell` may modify the value in any way it wants as
// long as the invariants are upheld.
/// A thread-safe cell that can be written to only once.
///
/// A `OnceCell` is typically used for global variables that need to be
/// initialized once on first use, but need no further changes. The `OnceCell`
/// in Tokio allows the initialization procedure to be asynchronous.
///
/// # Examples
///
/// ```
/// use tokio::sync::OnceCell;
///
/// async fn some_computation() -> u32 {
/// 1 + 1
/// }
///
/// static ONCE: OnceCell<u32> = OnceCell::const_new();
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let result = ONCE.get_or_init(some_computation).await;
/// assert_eq!(*result, 2);
/// # }
/// ```
///
/// It is often useful to write a wrapper method for accessing the value.
///
/// ```
/// use tokio::sync::OnceCell;
///
/// static ONCE: OnceCell<u32> = OnceCell::const_new();
///
/// async fn get_global_integer() -> &'static u32 {
/// ONCE.get_or_init(|| async {
/// 1 + 1
/// }).await
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let result = get_global_integer().await;
/// assert_eq!(*result, 2);
/// # }
/// ```
pub struct OnceCell<T> {
value_set: AtomicBool,
value: UnsafeCell<MaybeUninit<T>>,
semaphore: Semaphore,
}
impl<T> Default for OnceCell<T> {
fn default() -> OnceCell<T> {
OnceCell::new()
}
}
impl<T: fmt::Debug> fmt::Debug for OnceCell<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("OnceCell")
.field("value", &self.get())
.finish()
}
}
impl<T: Clone> Clone for OnceCell<T> {
fn clone(&self) -> OnceCell<T> {
OnceCell::new_with(self.get().cloned())
}
}
impl<T: PartialEq> PartialEq for OnceCell<T> {
fn eq(&self, other: &OnceCell<T>) -> bool {
self.get() == other.get()
}
}
impl<T: Eq> Eq for OnceCell<T> {}
impl<T> Drop for OnceCell<T> {
fn drop(&mut self) {
if self.initialized_mut() {
unsafe {
self.value
.with_mut(|ptr| ptr::drop_in_place((*ptr).as_mut_ptr()));
};
}
}
}
impl<T> From<T> for OnceCell<T> {
fn from(value: T) -> Self {
OnceCell {
value_set: AtomicBool::new(true),
value: UnsafeCell::new(MaybeUninit::new(value)),
semaphore: Semaphore::new_closed(),
}
}
}
impl<T> OnceCell<T> {
/// Creates a new empty `OnceCell` instance.
pub fn new() -> Self {
OnceCell {
value_set: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
semaphore: Semaphore::new(1),
}
}
/// Creates a new empty `OnceCell` instance.
///
/// Equivalent to `OnceCell::new`, except that it can be used in static
/// variables.
///
/// When using the `tracing` [unstable feature], a `OnceCell` created with
/// `const_new` will not be instrumented. As such, it will not be visible
/// in [`tokio-console`]. Instead, [`OnceCell::new`] should be used to
/// create an instrumented object if that is needed.
///
/// # Example
///
/// ```
/// use tokio::sync::OnceCell;
///
/// static ONCE: OnceCell<u32> = OnceCell::const_new();
///
/// async fn get_global_integer() -> &'static u32 {
/// ONCE.get_or_init(|| async {
/// 1 + 1
/// }).await
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let result = get_global_integer().await;
/// assert_eq!(*result, 2);
/// # }
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new() -> Self {
OnceCell {
value_set: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
semaphore: Semaphore::const_new(1),
}
}
/// Creates a new `OnceCell` that contains the provided value, if any.
///
/// If the `Option` is `None`, this is equivalent to `OnceCell::new`.
///
/// [`OnceCell::new`]: crate::sync::OnceCell::new
// Once https://github.com/rust-lang/rust/issues/73255 lands
// and tokio MSRV is bumped to the rustc version with it stabilised,
// we can make this function available in const context,
// by creating `Semaphore::const_new_closed`.
pub fn new_with(value: Option<T>) -> Self {
if let Some(v) = value {
OnceCell::from(v)
} else {
OnceCell::new()
}
}
/// Creates a new `OnceCell` that contains the provided value.
///
/// # Example
///
/// When using the `tracing` [unstable feature], a `OnceCell` created with
/// `const_new_with` will not be instrumented. As such, it will not be
/// visible in [`tokio-console`]. Instead, [`OnceCell::new_with`] should be
/// used to create an instrumented object if that is needed.
///
/// ```
/// use tokio::sync::OnceCell;
///
/// static ONCE: OnceCell<u32> = OnceCell::const_new_with(1);
///
/// async fn get_global_integer() -> &'static u32 {
/// ONCE.get_or_init(|| async {
/// 1 + 1
/// }).await
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let result = get_global_integer().await;
/// assert_eq!(*result, 1);
/// # }
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new_with(value: T) -> Self {
OnceCell {
value_set: AtomicBool::new(true),
value: UnsafeCell::new(MaybeUninit::new(value)),
semaphore: Semaphore::const_new_closed(),
}
}
/// Returns `true` if the `OnceCell` currently contains a value, and `false`
/// otherwise.
pub fn initialized(&self) -> bool {
// Using acquire ordering so any threads that read a true from this
// atomic is able to read the value.
self.value_set.load(Ordering::Acquire)
}
/// Returns `true` if the `OnceCell` currently contains a value, and `false`
/// otherwise.
fn initialized_mut(&mut self) -> bool {
*self.value_set.get_mut()
}
// SAFETY: The OnceCell must not be empty.
unsafe fn get_unchecked(&self) -> &T {
unsafe { &*self.value.with(|ptr| (*ptr).as_ptr()) }
}
// SAFETY: The OnceCell must not be empty.
unsafe fn get_unchecked_mut(&mut self) -> &mut T {
// SAFETY:
//
// 1. The caller guarantees that the OnceCell is initialized.
// 2. The `&mut self` guarantees that there are no other references to the value.
unsafe { &mut *self.value.with_mut(|ptr| (*ptr).as_mut_ptr()) }
}
fn set_value(&self, value: T, permit: SemaphorePermit<'_>) -> &T {
// SAFETY: We are holding the only permit on the semaphore.
unsafe {
self.value.with_mut(|ptr| (*ptr).as_mut_ptr().write(value));
}
// Using release ordering so any threads that read a true from this
// atomic is able to read the value we just stored.
self.value_set.store(true, Ordering::Release);
self.semaphore.close();
permit.forget();
// SAFETY: We just initialized the cell.
unsafe { self.get_unchecked() }
}
/// Returns a reference to the value currently stored in the `OnceCell`, or
/// `None` if the `OnceCell` is empty.
pub fn get(&self) -> Option<&T> {
if self.initialized() {
Some(unsafe { self.get_unchecked() })
} else {
None
}
}
/// Returns a mutable reference to the value currently stored in the
/// `OnceCell`, or `None` if the `OnceCell` is empty.
///
/// Since this call borrows the `OnceCell` mutably, it is safe to mutate the
/// value inside the `OnceCell` — the mutable borrow statically guarantees
/// no other references exist.
pub fn get_mut(&mut self) -> Option<&mut T> {
if self.initialized_mut() {
Some(unsafe { self.get_unchecked_mut() })
} else {
None
}
}
/// Sets the value of the `OnceCell` to the given value if the `OnceCell` is
/// empty.
///
/// If the `OnceCell` already has a value, this call will fail with an
/// [`SetError::AlreadyInitializedError`].
///
/// If the `OnceCell` is empty, but some other task is currently trying to
/// set the value, this call will fail with [`SetError::InitializingError`].
///
/// [`SetError::AlreadyInitializedError`]: crate::sync::SetError::AlreadyInitializedError
/// [`SetError::InitializingError`]: crate::sync::SetError::InitializingError
pub fn set(&self, value: T) -> Result<(), SetError<T>> {
if self.initialized() {
return Err(SetError::AlreadyInitializedError(value));
}
// Another task might be initializing the cell, in which case
// `try_acquire` will return an error. If we succeed to acquire the
// permit, then we can set the value.
match self.semaphore.try_acquire() {
Ok(permit) => {
debug_assert!(!self.initialized());
self.set_value(value, permit);
Ok(())
}
Err(TryAcquireError::NoPermits) => {
// Some other task is holding the permit. That task is
// currently trying to initialize the value.
Err(SetError::InitializingError(value))
}
Err(TryAcquireError::Closed) => {
// The semaphore was closed. Some other task has initialized
// the value.
Err(SetError::AlreadyInitializedError(value))
}
}
}
/// Gets the value currently in the `OnceCell`, or initialize it with the
/// given asynchronous operation.
///
/// If some other task is currently working on initializing the `OnceCell`,
/// this call will wait for that other task to finish, then return the value
/// that the other task produced.
///
/// If the provided operation is cancelled or panics, the initialization
/// attempt is cancelled. If there are other tasks waiting for the value to
/// be initialized, one of them will start another attempt at initializing
/// the value.
///
/// This will deadlock if `f` tries to initialize the cell recursively.
pub async fn get_or_init<F, Fut>(&self, f: F) -> &T
where
F: FnOnce() -> Fut,
Fut: Future<Output = T>,
{
crate::trace::async_trace_leaf().await;
if self.initialized() {
// SAFETY: The OnceCell has been fully initialized.
unsafe { self.get_unchecked() }
} else {
// Here we try to acquire the semaphore permit. Holding the permit
// will allow us to set the value of the OnceCell, and prevents
// other tasks from initializing the OnceCell while we are holding
// it.
match self.semaphore.acquire().await {
Ok(permit) => {
debug_assert!(!self.initialized());
// If `f()` panics or `select!` is called, this
// `get_or_init` call is aborted and the semaphore permit is
// dropped.
let value = f().await;
self.set_value(value, permit)
}
Err(_) => {
debug_assert!(self.initialized());
// SAFETY: The semaphore has been closed. This only happens
// when the OnceCell is fully initialized.
unsafe { self.get_unchecked() }
}
}
}
}
/// Gets the value currently in the `OnceCell`, or initialize it with the
/// given asynchronous operation.
///
/// If some other task is currently working on initializing the `OnceCell`,
/// this call will wait for that other task to finish, then return the value
/// that the other task produced.
///
/// If the provided operation returns an error, is cancelled or panics, the
/// initialization attempt is cancelled. If there are other tasks waiting
/// for the value to be initialized, one of them will start another attempt
/// at initializing the value.
///
/// This will deadlock if `f` tries to initialize the cell recursively.
pub async fn get_or_try_init<E, F, Fut>(&self, f: F) -> Result<&T, E>
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, E>>,
{
crate::trace::async_trace_leaf().await;
if self.initialized() {
// SAFETY: The OnceCell has been fully initialized.
unsafe { Ok(self.get_unchecked()) }
} else {
// Here we try to acquire the semaphore permit. Holding the permit
// will allow us to set the value of the OnceCell, and prevents
// other tasks from initializing the OnceCell while we are holding
// it.
match self.semaphore.acquire().await {
Ok(permit) => {
debug_assert!(!self.initialized());
// If `f()` panics or `select!` is called, this
// `get_or_try_init` call is aborted and the semaphore
// permit is dropped.
let value = f().await;
match value {
Ok(value) => Ok(self.set_value(value, permit)),
Err(e) => Err(e),
}
}
Err(_) => {
debug_assert!(self.initialized());
// SAFETY: The semaphore has been closed. This only happens
// when the OnceCell is fully initialized.
unsafe { Ok(self.get_unchecked()) }
}
}
}
}
/// Takes the value from the cell, destroying the cell in the process.
/// Returns `None` if the cell is empty.
pub fn into_inner(mut self) -> Option<T> {
if self.initialized_mut() {
// Set to uninitialized for the destructor of `OnceCell` to work properly
*self.value_set.get_mut() = false;
Some(unsafe { self.value.with(|ptr| ptr::read(ptr).assume_init()) })
} else {
None
}
}
/// Takes ownership of the current value, leaving the cell empty. Returns
/// `None` if the cell is empty.
pub fn take(&mut self) -> Option<T> {
std::mem::take(self).into_inner()
}
}
// Since `get` gives us access to immutable references of the OnceCell, OnceCell
// can only be Sync if T is Sync, otherwise OnceCell would allow sharing
// references of !Sync values across threads. We need T to be Send in order for
// OnceCell to by Sync because we can use `set` on `&OnceCell<T>` to send values
// (of type T) across threads.
unsafe impl<T: Sync + Send> Sync for OnceCell<T> {}
// Access to OnceCell's value is guarded by the semaphore permit
// and atomic operations on `value_set`, so as long as T itself is Send
// it's safe to send it to another thread
unsafe impl<T: Send> Send for OnceCell<T> {}
/// Errors that can be returned from [`OnceCell::set`].
///
/// [`OnceCell::set`]: crate::sync::OnceCell::set
#[derive(Debug, PartialEq, Eq)]
pub enum SetError<T> {
/// The cell was already initialized when [`OnceCell::set`] was called.
///
/// [`OnceCell::set`]: crate::sync::OnceCell::set
AlreadyInitializedError(T),
/// The cell is currently being initialized.
InitializingError(T),
}
impl<T> fmt::Display for SetError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
SetError::AlreadyInitializedError(_) => write!(f, "AlreadyInitializedError"),
SetError::InitializingError(_) => write!(f, "InitializingError"),
}
}
}
impl<T: fmt::Debug> Error for SetError<T> {}
impl<T> SetError<T> {
/// Whether `SetError` is `SetError::AlreadyInitializedError`.
pub fn is_already_init_err(&self) -> bool {
match self {
SetError::AlreadyInitializedError(_) => true,
SetError::InitializingError(_) => false,
}
}
/// Whether `SetError` is `SetError::InitializingError`
pub fn is_initializing_err(&self) -> bool {
match self {
SetError::AlreadyInitializedError(_) => false,
SetError::InitializingError(_) => true,
}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/broadcast.rs | tokio/src/sync/broadcast.rs | //! A multi-producer, multi-consumer broadcast queue. Each sent value is seen by
//! all consumers.
//!
//! A [`Sender`] is used to broadcast values to **all** connected [`Receiver`]
//! values. [`Sender`] handles are clone-able, allowing concurrent send and
//! receive actions. [`Sender`] and [`Receiver`] are both `Send` and `Sync` as
//! long as `T` is `Send`.
//!
//! When a value is sent, **all** [`Receiver`] handles are notified and will
//! receive the value. The value is stored once inside the channel and cloned on
//! demand for each receiver. Once all receivers have received a clone of the
//! value, the value is released from the channel.
//!
//! A channel is created by calling [`channel`], specifying the maximum number
//! of messages the channel can retain at any given time.
//!
//! New [`Receiver`] handles are created by calling [`Sender::subscribe`]. The
//! returned [`Receiver`] will receive values sent **after** the call to
//! `subscribe`.
//!
//! This channel is also suitable for the single-producer multi-consumer
//! use-case, where a single sender broadcasts values to many receivers.
//!
//! ## Lagging
//!
//! As sent messages must be retained until **all** [`Receiver`] handles receive
//! a clone, broadcast channels are susceptible to the "slow receiver" problem.
//! In this case, all but one receiver are able to receive values at the rate
//! they are sent. Because one receiver is stalled, the channel starts to fill
//! up.
//!
//! This broadcast channel implementation handles this case by setting a hard
//! upper bound on the number of values the channel may retain at any given
//! time. This upper bound is passed to the [`channel`] function as an argument.
//!
//! If a value is sent when the channel is at capacity, the oldest value
//! currently held by the channel is released. This frees up space for the new
//! value. Any receiver that has not yet seen the released value will return
//! [`RecvError::Lagged`] the next time [`recv`] is called.
//!
//! Once [`RecvError::Lagged`] is returned, the lagging receiver's position is
//! updated to the oldest value contained by the channel. The next call to
//! [`recv`] will return this value.
//!
//! This behavior enables a receiver to detect when it has lagged so far behind
//! that data has been dropped. The caller may decide how to respond to this:
//! either by aborting its task or by tolerating lost messages and resuming
//! consumption of the channel.
//!
//! ## Closing
//!
//! When **all** [`Sender`] handles have been dropped, no new values may be
//! sent. At this point, the channel is "closed". Once a receiver has received
//! all values retained by the channel, the next call to [`recv`] will return
//! with [`RecvError::Closed`].
//!
//! When a [`Receiver`] handle is dropped, any messages not read by the receiver
//! will be marked as read. If this receiver was the only one not to have read
//! that message, the message will be dropped at this point.
//!
//! [`Sender`]: crate::sync::broadcast::Sender
//! [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe
//! [`Receiver`]: crate::sync::broadcast::Receiver
//! [`channel`]: crate::sync::broadcast::channel
//! [`RecvError::Lagged`]: crate::sync::broadcast::error::RecvError::Lagged
//! [`RecvError::Closed`]: crate::sync::broadcast::error::RecvError::Closed
//! [`recv`]: crate::sync::broadcast::Receiver::recv
//!
//! # Examples
//!
//! Basic usage
//!
//! ```
//! use tokio::sync::broadcast;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, mut rx1) = broadcast::channel(16);
//! let mut rx2 = tx.subscribe();
//!
//! tokio::spawn(async move {
//! assert_eq!(rx1.recv().await.unwrap(), 10);
//! assert_eq!(rx1.recv().await.unwrap(), 20);
//! });
//!
//! tokio::spawn(async move {
//! assert_eq!(rx2.recv().await.unwrap(), 10);
//! assert_eq!(rx2.recv().await.unwrap(), 20);
//! });
//!
//! tx.send(10).unwrap();
//! tx.send(20).unwrap();
//! # }
//! ```
//!
//! Handling lag
//!
//! ```
//! use tokio::sync::broadcast;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, mut rx) = broadcast::channel(2);
//!
//! tx.send(10).unwrap();
//! tx.send(20).unwrap();
//! tx.send(30).unwrap();
//!
//! // The receiver lagged behind
//! assert!(rx.recv().await.is_err());
//!
//! // At this point, we can abort or continue with lost messages
//!
//! assert_eq!(20, rx.recv().await.unwrap());
//! assert_eq!(30, rx.recv().await.unwrap());
//! # }
//! ```
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::{AtomicBool, AtomicUsize};
use crate::loom::sync::{Arc, Mutex, MutexGuard};
use crate::task::coop::cooperative;
use crate::util::linked_list::{self, GuardedLinkedList, LinkedList};
use crate::util::WakeList;
use std::fmt;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
use std::task::{ready, Context, Poll, Waker};
/// Sending-half of the [`broadcast`] channel.
///
/// May be used from many threads. Messages can be sent with
/// [`send`][Sender::send].
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// # }
/// ```
///
/// [`broadcast`]: crate::sync::broadcast
pub struct Sender<T> {
shared: Arc<Shared<T>>,
}
/// A sender that does not prevent the channel from being closed.
///
/// If all [`Sender`] instances of a channel were dropped and only `WeakSender`
/// instances remain, the channel is closed.
///
/// In order to send messages, the `WeakSender` needs to be upgraded using
/// [`WeakSender::upgrade`], which returns `Option<Sender>`. It returns `None`
/// if all `Sender`s have been dropped, and otherwise it returns a `Sender`.
///
/// [`Sender`]: Sender
/// [`WeakSender::upgrade`]: WeakSender::upgrade
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast::channel;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, _rx) = channel::<i32>(15);
/// let tx_weak = tx.downgrade();
///
/// // Upgrading will succeed because `tx` still exists.
/// assert!(tx_weak.upgrade().is_some());
///
/// // If we drop `tx`, then it will fail.
/// drop(tx);
/// assert!(tx_weak.clone().upgrade().is_none());
/// # }
/// ```
pub struct WeakSender<T> {
shared: Arc<Shared<T>>,
}
/// Receiving-half of the [`broadcast`] channel.
///
/// Must not be used concurrently. Messages may be retrieved using
/// [`recv`][Receiver::recv].
///
/// To turn this receiver into a `Stream`, you can use the [`BroadcastStream`]
/// wrapper.
///
/// [`BroadcastStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.BroadcastStream.html
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// # }
/// ```
///
/// [`broadcast`]: crate::sync::broadcast
pub struct Receiver<T> {
/// State shared with all receivers and senders.
shared: Arc<Shared<T>>,
/// Next position to read from
next: u64,
}
pub mod error {
//! Broadcast error types
use std::fmt;
/// Error returned by the [`send`] function on a [`Sender`].
///
/// A **send** operation can only fail if there are no active receivers,
/// implying that the message could never be received. The error contains the
/// message being sent as a payload so it can be recovered.
///
/// [`send`]: crate::sync::broadcast::Sender::send
/// [`Sender`]: crate::sync::broadcast::Sender
#[derive(Debug)]
pub struct SendError<T>(pub T);
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "channel closed")
}
}
impl<T: fmt::Debug> std::error::Error for SendError<T> {}
/// An error returned from the [`recv`] function on a [`Receiver`].
///
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum RecvError {
/// There are no more active senders implying no further messages will ever
/// be sent.
Closed,
/// The receiver lagged too far behind. Attempting to receive again will
/// return the oldest message still retained by the channel.
///
/// Includes the number of skipped messages.
Lagged(u64),
}
impl fmt::Display for RecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
RecvError::Closed => write!(f, "channel closed"),
RecvError::Lagged(amt) => write!(f, "channel lagged by {amt}"),
}
}
}
impl std::error::Error for RecvError {}
/// An error returned from the [`try_recv`] function on a [`Receiver`].
///
/// [`try_recv`]: crate::sync::broadcast::Receiver::try_recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum TryRecvError {
/// The channel is currently empty. There are still active
/// [`Sender`] handles, so data may yet become available.
///
/// [`Sender`]: crate::sync::broadcast::Sender
Empty,
/// There are no more active senders implying no further messages will ever
/// be sent.
Closed,
/// The receiver lagged too far behind and has been forcibly disconnected.
/// Attempting to receive again will return the oldest message still
/// retained by the channel.
///
/// Includes the number of skipped messages.
Lagged(u64),
}
impl fmt::Display for TryRecvError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TryRecvError::Empty => write!(f, "channel empty"),
TryRecvError::Closed => write!(f, "channel closed"),
TryRecvError::Lagged(amt) => write!(f, "channel lagged by {amt}"),
}
}
}
impl std::error::Error for TryRecvError {}
}
use self::error::{RecvError, SendError, TryRecvError};
use super::Notify;
/// Data shared between senders and receivers.
struct Shared<T> {
/// slots in the channel.
buffer: Box<[Mutex<Slot<T>>]>,
/// Mask a position -> index.
mask: usize,
/// Tail of the queue. Includes the rx wait list.
tail: Mutex<Tail>,
/// Number of outstanding Sender handles.
num_tx: AtomicUsize,
/// Number of outstanding weak Sender handles.
num_weak_tx: AtomicUsize,
/// Notify when the last subscribed [`Receiver`] drops.
notify_last_rx_drop: Notify,
}
/// Next position to write a value.
struct Tail {
/// Next position to write to.
pos: u64,
/// Number of active receivers.
rx_cnt: usize,
/// True if the channel is closed.
closed: bool,
/// Receivers waiting for a value.
waiters: LinkedList<Waiter, <Waiter as linked_list::Link>::Target>,
}
/// Slot in the buffer.
struct Slot<T> {
/// Remaining number of receivers that are expected to see this value.
///
/// When this goes to zero, the value is released.
///
/// An atomic is used as it is mutated concurrently with the slot read lock
/// acquired.
rem: AtomicUsize,
/// Uniquely identifies the `send` stored in the slot.
pos: u64,
/// The value being broadcast.
///
/// The value is set by `send` when the write lock is held. When a reader
/// drops, `rem` is decremented. When it hits zero, the value is dropped.
val: Option<T>,
}
/// An entry in the wait queue.
struct Waiter {
/// True if queued.
queued: AtomicBool,
/// Task waiting on the broadcast channel.
waker: Option<Waker>,
/// Intrusive linked-list pointers.
pointers: linked_list::Pointers<Waiter>,
/// Should not be `Unpin`.
_p: PhantomPinned,
}
impl Waiter {
fn new() -> Self {
Self {
queued: AtomicBool::new(false),
waker: None,
pointers: linked_list::Pointers::new(),
_p: PhantomPinned,
}
}
}
generate_addr_of_methods! {
impl<> Waiter {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<Waiter>> {
&self.pointers
}
}
}
struct RecvGuard<'a, T> {
slot: MutexGuard<'a, Slot<T>>,
}
/// Receive a value future.
struct Recv<'a, T> {
/// Receiver being waited on.
receiver: &'a mut Receiver<T>,
/// Entry in the waiter `LinkedList`.
waiter: WaiterCell,
}
// The wrapper around `UnsafeCell` isolates the unsafe impl `Send` and `Sync`
// from `Recv`.
struct WaiterCell(UnsafeCell<Waiter>);
unsafe impl Send for WaiterCell {}
unsafe impl Sync for WaiterCell {}
/// Max number of receivers. Reserve space to lock.
const MAX_RECEIVERS: usize = usize::MAX >> 2;
/// Create a bounded, multi-producer, multi-consumer channel where each sent
/// value is broadcasted to all active receivers.
///
/// **Note:** The actual capacity may be greater than the provided `capacity`.
///
/// All data sent on [`Sender`] will become available on every active
/// [`Receiver`] in the same order as it was sent.
///
/// The `Sender` can be cloned to `send` to the same channel from multiple
/// points in the process or it can be used concurrently from an `Arc`. New
/// `Receiver` handles are created by calling [`Sender::subscribe`].
///
/// If all [`Receiver`] handles are dropped, the `send` method will return a
/// [`SendError`]. Similarly, if all [`Sender`] handles are dropped, the [`recv`]
/// method will return a [`RecvError`].
///
/// [`Sender`]: crate::sync::broadcast::Sender
/// [`Sender::subscribe`]: crate::sync::broadcast::Sender::subscribe
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`SendError`]: crate::sync::broadcast::error::SendError
/// [`RecvError`]: crate::sync::broadcast::error::RecvError
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// # }
/// ```
///
/// # Panics
///
/// This will panic if `capacity` is equal to `0`.
///
/// This pre-allocates space for `capacity` messages. Allocation failure may result in a panic or
/// [an allocation error](std::alloc::handle_alloc_error).
#[track_caller]
pub fn channel<T: Clone>(capacity: usize) -> (Sender<T>, Receiver<T>) {
// SAFETY: In the line below we are creating one extra receiver, so there will be 1 in total.
let tx = unsafe { Sender::new_with_receiver_count(1, capacity) };
let rx = Receiver {
shared: tx.shared.clone(),
next: 0,
};
(tx, rx)
}
impl<T> Sender<T> {
/// Creates the sending-half of the [`broadcast`] channel.
///
/// See the documentation of [`broadcast::channel`] for more information on this method.
///
/// [`broadcast`]: crate::sync::broadcast
/// [`broadcast::channel`]: crate::sync::broadcast::channel
#[track_caller]
pub fn new(capacity: usize) -> Self {
// SAFETY: We don't create extra receivers, so there are 0.
unsafe { Self::new_with_receiver_count(0, capacity) }
}
/// Creates the sending-half of the [`broadcast`](self) channel, and provide the receiver
/// count.
///
/// See the documentation of [`broadcast::channel`](self::channel) for more errors when
/// calling this function.
///
/// # Safety:
///
/// The caller must ensure that the amount of receivers for this Sender is correct before
/// the channel functionalities are used, the count is zero by default, as this function
/// does not create any receivers by itself.
#[track_caller]
unsafe fn new_with_receiver_count(receiver_count: usize, mut capacity: usize) -> Self {
assert!(capacity > 0, "broadcast channel capacity cannot be zero");
assert!(
capacity <= usize::MAX >> 1,
"broadcast channel capacity exceeded `usize::MAX / 2`"
);
// Round to a power of two
capacity = capacity.next_power_of_two();
let mut buffer = Vec::with_capacity(capacity);
for i in 0..capacity {
buffer.push(Mutex::new(Slot {
rem: AtomicUsize::new(0),
pos: (i as u64).wrapping_sub(capacity as u64),
val: None,
}));
}
let shared = Arc::new(Shared {
buffer: buffer.into_boxed_slice(),
mask: capacity - 1,
tail: Mutex::new(Tail {
pos: 0,
rx_cnt: receiver_count,
closed: receiver_count == 0,
waiters: LinkedList::new(),
}),
num_tx: AtomicUsize::new(1),
num_weak_tx: AtomicUsize::new(0),
notify_last_rx_drop: Notify::new(),
});
Sender { shared }
}
/// Attempts to send a value to all active [`Receiver`] handles, returning
/// it back if it could not be sent.
///
/// A successful send occurs when there is at least one active [`Receiver`]
/// handle. An unsuccessful send would be one where all associated
/// [`Receiver`] handles have already been dropped.
///
/// # Return
///
/// On success, the number of subscribed [`Receiver`] handles is returned.
/// This does not mean that this number of receivers will see the message as
/// a receiver may drop or lag ([see lagging](self#lagging)) before receiving
/// the message.
///
/// # Note
///
/// A return value of `Ok` **does not** mean that the sent value will be
/// observed by all or any of the active [`Receiver`] handles. [`Receiver`]
/// handles may be dropped before receiving the sent message.
///
/// A return value of `Err` **does not** mean that future calls to `send`
/// will fail. New [`Receiver`] handles may be created by calling
/// [`subscribe`].
///
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`subscribe`]: crate::sync::broadcast::Sender::subscribe
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tokio::spawn(async move {
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// assert_eq!(rx1.recv().await.unwrap(), 20);
/// });
///
/// tokio::spawn(async move {
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// assert_eq!(rx2.recv().await.unwrap(), 20);
/// });
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// # }
/// ```
pub fn send(&self, value: T) -> Result<usize, SendError<T>> {
let mut tail = self.shared.tail.lock();
if tail.rx_cnt == 0 {
return Err(SendError(value));
}
// Position to write into
let pos = tail.pos;
let rem = tail.rx_cnt;
let idx = (pos & self.shared.mask as u64) as usize;
// Update the tail position
tail.pos = tail.pos.wrapping_add(1);
// Get the slot
let mut slot = self.shared.buffer[idx].lock();
// Track the position
slot.pos = pos;
// Set remaining receivers
slot.rem.with_mut(|v| *v = rem);
// Write the value
slot.val = Some(value);
// Release the slot lock before notifying the receivers.
drop(slot);
// Notify and release the mutex. This must happen after the slot lock is
// released, otherwise the writer lock bit could be cleared while another
// thread is in the critical section.
self.shared.notify_rx(tail);
Ok(rem)
}
/// Creates a new [`Receiver`] handle that will receive values sent **after**
/// this call to `subscribe`.
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, _rx) = broadcast::channel(16);
///
/// // Will not be seen
/// tx.send(10).unwrap();
///
/// let mut rx = tx.subscribe();
///
/// tx.send(20).unwrap();
///
/// let value = rx.recv().await.unwrap();
/// assert_eq!(20, value);
/// # }
/// ```
pub fn subscribe(&self) -> Receiver<T> {
let shared = self.shared.clone();
new_receiver(shared)
}
/// Converts the `Sender` to a [`WeakSender`] that does not count
/// towards RAII semantics, i.e. if all `Sender` instances of the
/// channel were dropped and only `WeakSender` instances remain,
/// the channel is closed.
#[must_use = "Downgrade creates a WeakSender without destroying the original non-weak sender."]
pub fn downgrade(&self) -> WeakSender<T> {
self.shared.num_weak_tx.fetch_add(1, Relaxed);
WeakSender {
shared: self.shared.clone(),
}
}
/// Returns the number of queued values.
///
/// A value is queued until it has either been seen by all receivers that were alive at the time
/// it was sent, or has been evicted from the queue by subsequent sends that exceeded the
/// queue's capacity.
///
/// # Note
///
/// In contrast to [`Receiver::len`], this method only reports queued values and not values that
/// have been evicted from the queue before being seen by all receivers.
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// tx.send(10).unwrap();
/// tx.send(20).unwrap();
/// tx.send(30).unwrap();
///
/// assert_eq!(tx.len(), 3);
///
/// rx1.recv().await.unwrap();
///
/// // The len is still 3 since rx2 hasn't seen the first value yet.
/// assert_eq!(tx.len(), 3);
///
/// rx2.recv().await.unwrap();
///
/// assert_eq!(tx.len(), 2);
/// # }
/// ```
pub fn len(&self) -> usize {
let tail = self.shared.tail.lock();
let base_idx = (tail.pos & self.shared.mask as u64) as usize;
let mut low = 0;
let mut high = self.shared.buffer.len();
while low < high {
let mid = low + (high - low) / 2;
let idx = base_idx.wrapping_add(mid) & self.shared.mask;
if self.shared.buffer[idx].lock().rem.load(SeqCst) == 0 {
low = mid + 1;
} else {
high = mid;
}
}
self.shared.buffer.len() - low
}
/// Returns true if there are no queued values.
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel(16);
/// let mut rx2 = tx.subscribe();
///
/// assert!(tx.is_empty());
///
/// tx.send(10).unwrap();
///
/// assert!(!tx.is_empty());
///
/// rx1.recv().await.unwrap();
///
/// // The queue is still not empty since rx2 hasn't seen the value.
/// assert!(!tx.is_empty());
///
/// rx2.recv().await.unwrap();
///
/// assert!(tx.is_empty());
/// # }
/// ```
pub fn is_empty(&self) -> bool {
let tail = self.shared.tail.lock();
let idx = (tail.pos.wrapping_sub(1) & self.shared.mask as u64) as usize;
self.shared.buffer[idx].lock().rem.load(SeqCst) == 0
}
/// Returns the number of active receivers.
///
/// An active receiver is a [`Receiver`] handle returned from [`channel`] or
/// [`subscribe`]. These are the handles that will receive values sent on
/// this [`Sender`].
///
/// # Note
///
/// It is not guaranteed that a sent message will reach this number of
/// receivers. Active receivers may never call [`recv`] again before
/// dropping.
///
/// [`recv`]: crate::sync::broadcast::Receiver::recv
/// [`Receiver`]: crate::sync::broadcast::Receiver
/// [`Sender`]: crate::sync::broadcast::Sender
/// [`subscribe`]: crate::sync::broadcast::Sender::subscribe
/// [`channel`]: crate::sync::broadcast::channel
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, _rx1) = broadcast::channel(16);
///
/// assert_eq!(1, tx.receiver_count());
///
/// let mut _rx2 = tx.subscribe();
///
/// assert_eq!(2, tx.receiver_count());
///
/// tx.send(10).unwrap();
/// # }
/// ```
pub fn receiver_count(&self) -> usize {
let tail = self.shared.tail.lock();
tail.rx_cnt
}
/// Returns `true` if senders belong to the same channel.
///
/// # Examples
///
/// ```
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, _rx) = broadcast::channel::<()>(16);
/// let tx2 = tx.clone();
///
/// assert!(tx.same_channel(&tx2));
///
/// let (tx3, _rx3) = broadcast::channel::<()>(16);
///
/// assert!(!tx3.same_channel(&tx2));
/// # }
/// ```
pub fn same_channel(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.shared, &other.shared)
}
/// A future which completes when the number of [Receiver]s subscribed to this `Sender` reaches
/// zero.
///
/// # Examples
///
/// ```
/// use futures::FutureExt;
/// use tokio::sync::broadcast;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx1) = broadcast::channel::<u32>(16);
/// let mut rx2 = tx.subscribe();
///
/// let _ = tx.send(10);
///
/// assert_eq!(rx1.recv().await.unwrap(), 10);
/// drop(rx1);
/// assert!(tx.closed().now_or_never().is_none());
///
/// assert_eq!(rx2.recv().await.unwrap(), 10);
/// drop(rx2);
/// assert!(tx.closed().now_or_never().is_some());
/// # }
/// ```
pub async fn closed(&self) {
loop {
let notified = self.shared.notify_last_rx_drop.notified();
{
// Ensure the lock drops if the channel isn't closed
let tail = self.shared.tail.lock();
if tail.closed {
return;
}
}
notified.await;
}
}
fn close_channel(&self) {
let mut tail = self.shared.tail.lock();
tail.closed = true;
self.shared.notify_rx(tail);
}
/// Returns the number of [`Sender`] handles.
pub fn strong_count(&self) -> usize {
self.shared.num_tx.load(Acquire)
}
/// Returns the number of [`WeakSender`] handles.
pub fn weak_count(&self) -> usize {
self.shared.num_weak_tx.load(Acquire)
}
}
/// Create a new `Receiver` which reads starting from the tail.
fn new_receiver<T>(shared: Arc<Shared<T>>) -> Receiver<T> {
let mut tail = shared.tail.lock();
assert!(tail.rx_cnt != MAX_RECEIVERS, "max receivers");
if tail.rx_cnt == 0 {
// Potentially need to re-open the channel, if a new receiver has been added between calls
// to poll(). Note that we use rx_cnt == 0 instead of is_closed since is_closed also
// applies if the sender has been dropped
tail.closed = false;
}
tail.rx_cnt = tail.rx_cnt.checked_add(1).expect("overflow");
let next = tail.pos;
drop(tail);
Receiver { shared, next }
}
/// List used in `Shared::notify_rx`. It wraps a guarded linked list
/// and gates the access to it on the `Shared.tail` mutex. It also empties
/// the list on drop.
struct WaitersList<'a, T> {
list: GuardedLinkedList<Waiter, <Waiter as linked_list::Link>::Target>,
is_empty: bool,
shared: &'a Shared<T>,
}
impl<'a, T> Drop for WaitersList<'a, T> {
fn drop(&mut self) {
// If the list is not empty, we unlink all waiters from it.
// We do not wake the waiters to avoid double panics.
if !self.is_empty {
let _lock_guard = self.shared.tail.lock();
while self.list.pop_back().is_some() {}
}
}
}
impl<'a, T> WaitersList<'a, T> {
fn new(
unguarded_list: LinkedList<Waiter, <Waiter as linked_list::Link>::Target>,
guard: Pin<&'a Waiter>,
shared: &'a Shared<T>,
) -> Self {
let guard_ptr = NonNull::from(guard.get_ref());
let list = unguarded_list.into_guarded(guard_ptr);
WaitersList {
list,
is_empty: false,
shared,
}
}
/// Removes the last element from the guarded list. Modifying this list
/// requires an exclusive access to the main list in `Notify`.
fn pop_back_locked(&mut self, _tail: &mut Tail) -> Option<NonNull<Waiter>> {
let result = self.list.pop_back();
if result.is_none() {
// Save information about emptiness to avoid waiting for lock
// in the destructor.
self.is_empty = true;
}
result
}
}
impl<T> Shared<T> {
fn notify_rx<'a, 'b: 'a>(&'b self, mut tail: MutexGuard<'a, Tail>) {
// It is critical for `GuardedLinkedList` safety that the guard node is
// pinned in memory and is not dropped until the guarded list is dropped.
let guard = Waiter::new();
pin!(guard);
// We move all waiters to a secondary list. It uses a `GuardedLinkedList`
// underneath to allow every waiter to safely remove itself from it.
//
// * This list will be still guarded by the `waiters` lock.
// `NotifyWaitersList` wrapper makes sure we hold the lock to modify it.
// * This wrapper will empty the list on drop. It is critical for safety
// that we will not leave any list entry with a pointer to the local
// guard node after this function returns / panics.
let mut list = WaitersList::new(std::mem::take(&mut tail.waiters), guard.as_ref(), self);
let mut wakers = WakeList::new();
'outer: loop {
while wakers.can_push() {
match list.pop_back_locked(&mut tail) {
Some(waiter) => {
unsafe {
// Safety: accessing `waker` is safe because
// the tail lock is held.
if let Some(waker) = (*waiter.as_ptr()).waker.take() {
wakers.push(waker);
}
// Safety: `queued` is atomic.
let queued = &(*waiter.as_ptr()).queued;
// `Relaxed` suffices because the tail lock is held.
assert!(queued.load(Relaxed));
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/set_once.rs | tokio/src/sync/set_once.rs | use super::Notify;
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::AtomicBool;
use std::error::Error;
use std::fmt;
use std::future::{poll_fn, Future};
use std::mem::MaybeUninit;
use std::ops::Drop;
use std::ptr;
use std::sync::atomic::Ordering;
use std::task::Poll;
// This file contains an implementation of an SetOnce. The value of SetOnce
// can only be modified once during initialization.
//
// 1. When `value_set` is false, the `value` is not initialized and wait()
// future will keep on waiting.
// 2. When `value_set` is true, the wait() future completes, get() will return
// Some(&T)
//
// The value cannot be changed after set() is called. Subsequent calls to set()
// will return a `SetOnceError`.
/// A thread-safe cell that can be written to only once.
///
/// A `SetOnce` is inspired from python's [`asyncio.Event`] type. It can be
/// used to wait until the value of the `SetOnce` is set like a "Event" mechanism.
///
/// # Example
///
/// ```
/// use tokio::sync::{SetOnce, SetOnceError};
///
/// static ONCE: SetOnce<u32> = SetOnce::const_new();
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() -> Result<(), SetOnceError<u32>> {
///
/// // set the value inside a task somewhere...
/// tokio::spawn(async move { ONCE.set(20) });
///
/// // checking with .get doesn't block main thread
/// println!("{:?}", ONCE.get());
///
/// // wait until the value is set, blocks the thread
/// println!("{:?}", ONCE.wait().await);
///
/// Ok(())
/// # }
/// ```
///
/// A `SetOnce` is typically used for global variables that need to be
/// initialized once on first use, but need no further changes. The `SetOnce`
/// in Tokio allows the initialization procedure to be asynchronous.
///
/// # Example
///
/// ```
/// use tokio::sync::{SetOnce, SetOnceError};
/// use std::sync::Arc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() -> Result<(), SetOnceError<u32>> {
/// let once = SetOnce::new();
///
/// let arc = Arc::new(once);
/// let first_cl = Arc::clone(&arc);
/// let second_cl = Arc::clone(&arc);
///
/// // set the value inside a task
/// tokio::spawn(async move { first_cl.set(20) }).await.unwrap()?;
///
/// // wait inside task to not block the main thread
/// tokio::spawn(async move {
/// // wait inside async context for the value to be set
/// assert_eq!(*second_cl.wait().await, 20);
/// }).await.unwrap();
///
/// // subsequent set calls will fail
/// assert!(arc.set(30).is_err());
///
/// println!("{:?}", arc.get());
///
/// Ok(())
/// # }
/// ```
///
/// [`asyncio.Event`]: https://docs.python.org/3/library/asyncio-sync.html#asyncio.Event
pub struct SetOnce<T> {
value_set: AtomicBool,
value: UnsafeCell<MaybeUninit<T>>,
notify: Notify,
}
impl<T> Default for SetOnce<T> {
fn default() -> SetOnce<T> {
SetOnce::new()
}
}
impl<T: fmt::Debug> fmt::Debug for SetOnce<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("SetOnce")
.field("value", &self.get())
.finish()
}
}
impl<T: Clone> Clone for SetOnce<T> {
fn clone(&self) -> SetOnce<T> {
SetOnce::new_with(self.get().cloned())
}
}
impl<T: PartialEq> PartialEq for SetOnce<T> {
fn eq(&self, other: &SetOnce<T>) -> bool {
self.get() == other.get()
}
}
impl<T: Eq> Eq for SetOnce<T> {}
impl<T> Drop for SetOnce<T> {
fn drop(&mut self) {
// TODO: Use get_mut()
if self.value_set.load(Ordering::Relaxed) {
// SAFETY: If the value_set is true, then the value is initialized
// then there is a value to be dropped and this is safe
unsafe { self.value.with_mut(|ptr| ptr::drop_in_place(ptr as *mut T)) }
}
}
}
impl<T> From<T> for SetOnce<T> {
fn from(value: T) -> Self {
SetOnce {
value_set: AtomicBool::new(true),
value: UnsafeCell::new(MaybeUninit::new(value)),
notify: Notify::new(),
}
}
}
impl<T> SetOnce<T> {
/// Creates a new empty `SetOnce` instance.
pub fn new() -> Self {
Self {
value_set: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
notify: Notify::new(),
}
}
/// Creates a new empty `SetOnce` instance.
///
/// Equivalent to `SetOnce::new`, except that it can be used in static
/// variables.
///
/// When using the `tracing` [unstable feature], a `SetOnce` created with
/// `const_new` will not be instrumented. As such, it will not be visible
/// in [`tokio-console`]. Instead, [`SetOnce::new`] should be used to
/// create an instrumented object if that is needed.
///
/// # Example
///
/// ```
/// use tokio::sync::{SetOnce, SetOnceError};
///
/// static ONCE: SetOnce<u32> = SetOnce::const_new();
///
/// fn get_global_integer() -> Result<Option<&'static u32>, SetOnceError<u32>> {
/// ONCE.set(2)?;
/// Ok(ONCE.get())
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() -> Result<(), SetOnceError<u32>> {
/// let result = get_global_integer()?;
///
/// assert_eq!(result, Some(&2));
/// Ok(())
/// # }
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new() -> Self {
Self {
value_set: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
notify: Notify::const_new(),
}
}
/// Creates a new `SetOnce` that contains the provided value, if any.
///
/// If the `Option` is `None`, this is equivalent to `SetOnce::new`.
///
/// [`SetOnce::new`]: crate::sync::SetOnce::new
pub fn new_with(value: Option<T>) -> Self {
if let Some(v) = value {
SetOnce::from(v)
} else {
SetOnce::new()
}
}
/// Creates a new `SetOnce` that contains the provided value.
///
/// # Example
///
/// When using the `tracing` [unstable feature], a `SetOnce` created with
/// `const_new_with` will not be instrumented. As such, it will not be
/// visible in [`tokio-console`]. Instead, [`SetOnce::new_with`] should be
/// used to create an instrumented object if that is needed.
///
/// ```
/// use tokio::sync::SetOnce;
///
/// static ONCE: SetOnce<u32> = SetOnce::const_new_with(1);
///
/// fn get_global_integer() -> Option<&'static u32> {
/// ONCE.get()
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let result = get_global_integer();
///
/// assert_eq!(result, Some(&1));
/// # }
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new_with(value: T) -> Self {
Self {
value_set: AtomicBool::new(true),
value: UnsafeCell::new(MaybeUninit::new(value)),
notify: Notify::const_new(),
}
}
/// Returns `true` if the `SetOnce` currently contains a value, and `false`
/// otherwise.
pub fn initialized(&self) -> bool {
// Using acquire ordering so we're able to read/catch any writes that
// are done with `Ordering::Release`
self.value_set.load(Ordering::Acquire)
}
// SAFETY: The SetOnce must not be empty.
unsafe fn get_unchecked(&self) -> &T {
unsafe { &*self.value.with(|ptr| (*ptr).as_ptr()) }
}
/// Returns a reference to the value currently stored in the `SetOnce`, or
/// `None` if the `SetOnce` is empty.
pub fn get(&self) -> Option<&T> {
if self.initialized() {
// SAFETY: the SetOnce is initialized, so we can safely
// call get_unchecked and return the value
Some(unsafe { self.get_unchecked() })
} else {
None
}
}
/// Sets the value of the `SetOnce` to the given value if the `SetOnce` is
/// empty.
///
/// If the `SetOnce` already has a value, this call will fail with an
/// [`SetOnceError`].
///
/// [`SetOnceError`]: crate::sync::SetOnceError
pub fn set(&self, value: T) -> Result<(), SetOnceError<T>> {
if self.initialized() {
return Err(SetOnceError(value));
}
// SAFETY: lock notify to ensure only one caller of set
// can run at a time.
let guard = self.notify.lock_waiter_list();
if self.initialized() {
return Err(SetOnceError(value));
}
// SAFETY: We have locked the mutex and checked if the value is
// initialized or not, so we can safely write to the value
unsafe {
self.value.with_mut(|ptr| (*ptr).as_mut_ptr().write(value));
}
// Using release ordering so any threads that read a true from this
// atomic is able to read the value we just stored.
self.value_set.store(true, Ordering::Release);
// notify the waiting wakers that the value is set
guard.notify_waiters();
Ok(())
}
/// Takes the value from the cell, destroying the cell in the process.
/// Returns `None` if the cell is empty.
pub fn into_inner(self) -> Option<T> {
// TODO: Use get_mut()
let value_set = self.value_set.load(Ordering::Relaxed);
if value_set {
// Since we have taken ownership of self, its drop implementation
// will be called by the end of this function, to prevent a double
// free we will set the value_set to false so that the drop
// implementation does not try to drop the value again.
self.value_set.store(false, Ordering::Relaxed);
// SAFETY: The SetOnce is currently initialized, we can assume the
// value is initialized and return that, when we return the value
// we give the drop handler to the return scope.
Some(unsafe { self.value.with_mut(|ptr| ptr::read(ptr).assume_init()) })
} else {
None
}
}
/// Waits until the value is set.
///
/// If the `SetOnce` is already initialized, it will return the value
/// immediately.
///
/// # Cancel safety
///
/// This method is cancel safe.
pub async fn wait(&self) -> &T {
loop {
if let Some(val) = self.get() {
return val;
}
let notify_fut = self.notify.notified();
pin!(notify_fut);
poll_fn(|cx| {
// Register under the notify's internal lock.
let ret = notify_fut.as_mut().poll(cx);
if self.value_set.load(Ordering::Relaxed) {
return Poll::Ready(());
}
ret
})
.await;
}
}
}
// Since `get` gives us access to immutable references of the SetOnce, SetOnce
// can only be Sync if T is Sync, otherwise SetOnce would allow sharing
// references of !Sync values across threads. We need T to be Send in order for
// SetOnce to by Sync because we can use `set` on `&SetOnce<T>` to send values
// (of type T) across threads.
unsafe impl<T: Sync + Send> Sync for SetOnce<T> {}
// Access to SetOnce's value is guarded by the Atomic boolean flag
// and atomic operations on `value_set`, so as long as T itself is Send
// it's safe to send it to another thread
unsafe impl<T: Send> Send for SetOnce<T> {}
/// Error that can be returned from [`SetOnce::set`].
///
/// This error means that the `SetOnce` was already initialized when
/// set was called
///
/// [`SetOnce::set`]: crate::sync::SetOnce::set
#[derive(Debug, PartialEq, Eq)]
pub struct SetOnceError<T>(pub T);
impl<T> fmt::Display for SetOnceError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "SetOnceError")
}
}
impl<T: fmt::Debug> Error for SetOnceError<T> {}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/oneshot.rs | tokio/src/sync/oneshot.rs | #![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
//! A one-shot channel is used for sending a single message between
//! asynchronous tasks. The [`channel`] function is used to create a
//! [`Sender`] and [`Receiver`] handle pair that form the channel.
//!
//! The `Sender` handle is used by the producer to send the value.
//! The `Receiver` handle is used by the consumer to receive the value.
//!
//! Each handle can be used on separate tasks.
//!
//! Since the `send` method is not async, it can be used anywhere. This includes
//! sending between two runtimes, and using it from non-async code.
//!
//! If the [`Receiver`] is closed before receiving a message which has already
//! been sent, the message will remain in the channel until the receiver is
//! dropped, at which point the message will be dropped immediately.
//!
//! # Examples
//!
//! ```
//! use tokio::sync::oneshot;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, rx) = oneshot::channel();
//!
//! tokio::spawn(async move {
//! if let Err(_) = tx.send(3) {
//! println!("the receiver dropped");
//! }
//! });
//!
//! match rx.await {
//! Ok(v) => println!("got = {:?}", v),
//! Err(_) => println!("the sender dropped"),
//! }
//! # }
//! ```
//!
//! If the sender is dropped without sending, the receiver will fail with
//! [`error::RecvError`]:
//!
//! ```
//! use tokio::sync::oneshot;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, rx) = oneshot::channel::<u32>();
//!
//! tokio::spawn(async move {
//! drop(tx);
//! });
//!
//! match rx.await {
//! Ok(_) => panic!("This doesn't happen"),
//! Err(_) => println!("the sender dropped"),
//! }
//! # }
//! ```
//!
//! To use a `oneshot` channel in a `tokio::select!` loop, add `&mut` in front of
//! the channel.
//!
//! ```
//! use tokio::sync::oneshot;
//! use tokio::time::{interval, sleep, Duration};
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn _doc() {}
//! # #[tokio::main(flavor = "current_thread", start_paused = true)]
//! # async fn main() {
//! let (send, mut recv) = oneshot::channel();
//! let mut interval = interval(Duration::from_millis(100));
//!
//! # let handle =
//! tokio::spawn(async move {
//! sleep(Duration::from_secs(1)).await;
//! send.send("shut down").unwrap();
//! });
//!
//! loop {
//! tokio::select! {
//! _ = interval.tick() => println!("Another 100ms"),
//! msg = &mut recv => {
//! println!("Got message: {}", msg.unwrap());
//! break;
//! }
//! }
//! }
//! # handle.await.unwrap();
//! # }
//! ```
//!
//! To use a `Sender` from a destructor, put it in an [`Option`] and call
//! [`Option::take`].
//!
//! ```
//! use tokio::sync::oneshot;
//!
//! struct SendOnDrop {
//! sender: Option<oneshot::Sender<&'static str>>,
//! }
//! impl Drop for SendOnDrop {
//! fn drop(&mut self) {
//! if let Some(sender) = self.sender.take() {
//! // Using `let _ =` to ignore send errors.
//! let _ = sender.send("I got dropped!");
//! }
//! }
//! }
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn _doc() {}
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (send, recv) = oneshot::channel();
//!
//! let send_on_drop = SendOnDrop { sender: Some(send) };
//! drop(send_on_drop);
//!
//! assert_eq!(recv.await, Ok("I got dropped!"));
//! # }
//! ```
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::Arc;
#[cfg(all(tokio_unstable, feature = "tracing"))]
use crate::util::trace;
use std::fmt;
use std::future::Future;
use std::mem::MaybeUninit;
use std::pin::Pin;
use std::sync::atomic::Ordering::{self, AcqRel, Acquire};
use std::task::Poll::{Pending, Ready};
use std::task::{ready, Context, Poll, Waker};
/// Sends a value to the associated [`Receiver`].
///
/// A pair of both a [`Sender`] and a [`Receiver`] are created by the
/// [`channel`](fn@channel) function.
///
/// # Examples
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel();
///
/// tokio::spawn(async move {
/// if let Err(_) = tx.send(3) {
/// println!("the receiver dropped");
/// }
/// });
///
/// match rx.await {
/// Ok(v) => println!("got = {:?}", v),
/// Err(_) => println!("the sender dropped"),
/// }
/// # }
/// ```
///
/// If the sender is dropped without sending, the receiver will fail with
/// [`error::RecvError`]:
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel::<u32>();
///
/// tokio::spawn(async move {
/// drop(tx);
/// });
///
/// match rx.await {
/// Ok(_) => panic!("This doesn't happen"),
/// Err(_) => println!("the sender dropped"),
/// }
/// # }
/// ```
///
/// To use a `Sender` from a destructor, put it in an [`Option`] and call
/// [`Option::take`].
///
/// ```
/// use tokio::sync::oneshot;
///
/// struct SendOnDrop {
/// sender: Option<oneshot::Sender<&'static str>>,
/// }
/// impl Drop for SendOnDrop {
/// fn drop(&mut self) {
/// if let Some(sender) = self.sender.take() {
/// // Using `let _ =` to ignore send errors.
/// let _ = sender.send("I got dropped!");
/// }
/// }
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn _doc() {}
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (send, recv) = oneshot::channel();
///
/// let send_on_drop = SendOnDrop { sender: Some(send) };
/// drop(send_on_drop);
///
/// assert_eq!(recv.await, Ok("I got dropped!"));
/// # }
/// ```
///
/// [`Option`]: std::option::Option
/// [`Option::take`]: std::option::Option::take
#[derive(Debug)]
pub struct Sender<T> {
inner: Option<Arc<Inner<T>>>,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
}
/// Receives a value from the associated [`Sender`].
///
/// A pair of both a [`Sender`] and a [`Receiver`] are created by the
/// [`channel`](fn@channel) function.
///
/// This channel has no `recv` method because the receiver itself implements the
/// [`Future`] trait. To receive a `Result<T, `[`error::RecvError`]`>`, `.await` the `Receiver` object directly.
///
/// The `poll` method on the `Future` trait is allowed to spuriously return
/// `Poll::Pending` even if the message has been sent. If such a spurious
/// failure happens, then the caller will be woken when the spurious failure has
/// been resolved so that the caller can attempt to receive the message again.
/// Note that receiving such a wakeup does not guarantee that the next call will
/// succeed — it could fail with another spurious failure. (A spurious failure
/// does not mean that the message is lost. It is just delayed.)
///
/// [`Future`]: trait@std::future::Future
///
/// # Cancellation safety
///
/// The `Receiver` is cancel safe. If it is used as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, it is guaranteed that no message was received on this
/// channel.
///
/// # Examples
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel();
///
/// tokio::spawn(async move {
/// if let Err(_) = tx.send(3) {
/// println!("the receiver dropped");
/// }
/// });
///
/// match rx.await {
/// Ok(v) => println!("got = {:?}", v),
/// Err(_) => println!("the sender dropped"),
/// }
/// # }
/// ```
///
/// If the sender is dropped without sending, the receiver will fail with
/// [`error::RecvError`]:
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel::<u32>();
///
/// tokio::spawn(async move {
/// drop(tx);
/// });
///
/// match rx.await {
/// Ok(_) => panic!("This doesn't happen"),
/// Err(_) => println!("the sender dropped"),
/// }
/// # }
/// ```
///
/// To use a `Receiver` in a `tokio::select!` loop, add `&mut` in front of the
/// channel.
///
/// ```
/// use tokio::sync::oneshot;
/// use tokio::time::{interval, sleep, Duration};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn _doc() {}
/// # #[tokio::main(flavor = "current_thread", start_paused = true)]
/// # async fn main() {
/// let (send, mut recv) = oneshot::channel();
/// let mut interval = interval(Duration::from_millis(100));
///
/// # let handle =
/// tokio::spawn(async move {
/// sleep(Duration::from_secs(1)).await;
/// send.send("shut down").unwrap();
/// });
///
/// loop {
/// tokio::select! {
/// _ = interval.tick() => println!("Another 100ms"),
/// msg = &mut recv => {
/// println!("Got message: {}", msg.unwrap());
/// break;
/// }
/// }
/// }
/// # handle.await.unwrap();
/// # }
/// ```
#[derive(Debug)]
pub struct Receiver<T> {
inner: Option<Arc<Inner<T>>>,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
#[cfg(all(tokio_unstable, feature = "tracing"))]
async_op_span: tracing::Span,
#[cfg(all(tokio_unstable, feature = "tracing"))]
async_op_poll_span: tracing::Span,
}
pub mod error {
//! `Oneshot` error types.
use std::fmt;
/// Error returned by the `Future` implementation for `Receiver`.
///
/// This error is returned by the receiver when the sender is dropped without sending.
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct RecvError(pub(super) ());
/// Error returned by the `try_recv` function on `Receiver`.
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum TryRecvError {
/// The send half of the channel has not yet sent a value.
Empty,
/// The send half of the channel was dropped without sending a value.
Closed,
}
// ===== impl RecvError =====
impl fmt::Display for RecvError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "channel closed")
}
}
impl std::error::Error for RecvError {}
// ===== impl TryRecvError =====
impl fmt::Display for TryRecvError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TryRecvError::Empty => write!(fmt, "channel empty"),
TryRecvError::Closed => write!(fmt, "channel closed"),
}
}
}
impl std::error::Error for TryRecvError {}
}
use self::error::*;
struct Inner<T> {
/// Manages the state of the inner cell.
state: AtomicUsize,
/// The value. This is set by `Sender` and read by `Receiver`. The state of
/// the cell is tracked by `state`.
value: UnsafeCell<Option<T>>,
/// The task to notify when the receiver drops without consuming the value.
///
/// ## Safety
///
/// The `TX_TASK_SET` bit in the `state` field is set if this field is
/// initialized. If that bit is unset, this field may be uninitialized.
tx_task: Task,
/// The task to notify when the value is sent.
///
/// ## Safety
///
/// The `RX_TASK_SET` bit in the `state` field is set if this field is
/// initialized. If that bit is unset, this field may be uninitialized.
rx_task: Task,
}
struct Task(UnsafeCell<MaybeUninit<Waker>>);
impl Task {
/// # Safety
///
/// The caller must do the necessary synchronization to ensure that
/// the [`Self::0`] contains the valid [`Waker`] during the call.
unsafe fn will_wake(&self, cx: &mut Context<'_>) -> bool {
unsafe { self.with_task(|w| w.will_wake(cx.waker())) }
}
/// # Safety
///
/// The caller must do the necessary synchronization to ensure that
/// the [`Self::0`] contains the valid [`Waker`] during the call.
unsafe fn with_task<F, R>(&self, f: F) -> R
where
F: FnOnce(&Waker) -> R,
{
self.0.with(|ptr| {
let waker: *const Waker = unsafe { (*ptr).as_ptr() };
f(unsafe { &*waker })
})
}
/// # Safety
///
/// The caller must do the necessary synchronization to ensure that
/// the [`Self::0`] contains the valid [`Waker`] during the call.
unsafe fn drop_task(&self) {
self.0.with_mut(|ptr| {
let ptr: *mut Waker = unsafe { (*ptr).as_mut_ptr() };
unsafe {
ptr.drop_in_place();
}
});
}
/// # Safety
///
/// The caller must do the necessary synchronization to ensure that
/// the [`Self::0`] contains the valid [`Waker`] during the call.
unsafe fn set_task(&self, cx: &mut Context<'_>) {
self.0.with_mut(|ptr| {
let ptr: *mut Waker = unsafe { (*ptr).as_mut_ptr() };
unsafe {
ptr.write(cx.waker().clone());
}
});
}
}
#[derive(Clone, Copy)]
struct State(usize);
/// Creates a new one-shot channel for sending single values across asynchronous
/// tasks.
///
/// The function returns separate "send" and "receive" handles. The `Sender`
/// handle is used by the producer to send the value. The `Receiver` handle is
/// used by the consumer to receive the value.
///
/// Each handle can be used on separate tasks.
///
/// # Examples
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel();
///
/// tokio::spawn(async move {
/// if let Err(_) = tx.send(3) {
/// println!("the receiver dropped");
/// }
/// });
///
/// match rx.await {
/// Ok(v) => println!("got = {:?}", v),
/// Err(_) => println!("the sender dropped"),
/// }
/// # }
/// ```
#[track_caller]
pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let location = std::panic::Location::caller();
let resource_span = tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "Sender|Receiver",
kind = "Sync",
loc.file = location.file(),
loc.line = location.line(),
loc.col = location.column(),
);
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
tx_dropped = false,
tx_dropped.op = "override",
)
});
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
rx_dropped = false,
rx_dropped.op = "override",
)
});
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
value_sent = false,
value_sent.op = "override",
)
});
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
value_received = false,
value_received.op = "override",
)
});
resource_span
};
let inner = Arc::new(Inner {
state: AtomicUsize::new(State::new().as_usize()),
value: UnsafeCell::new(None),
tx_task: Task(UnsafeCell::new(MaybeUninit::uninit())),
rx_task: Task(UnsafeCell::new(MaybeUninit::uninit())),
});
let tx = Sender {
inner: Some(inner.clone()),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: resource_span.clone(),
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let async_op_span = resource_span
.in_scope(|| tracing::trace_span!("runtime.resource.async_op", source = "Receiver::await"));
#[cfg(all(tokio_unstable, feature = "tracing"))]
let async_op_poll_span =
async_op_span.in_scope(|| tracing::trace_span!("runtime.resource.async_op.poll"));
let rx = Receiver {
inner: Some(inner),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
#[cfg(all(tokio_unstable, feature = "tracing"))]
async_op_span,
#[cfg(all(tokio_unstable, feature = "tracing"))]
async_op_poll_span,
};
(tx, rx)
}
impl<T> Sender<T> {
/// Attempts to send a value on this channel, returning it back if it could
/// not be sent.
///
/// This method consumes `self` as only one value may ever be sent on a `oneshot`
/// channel. It is not marked async because sending a message to an `oneshot`
/// channel never requires any form of waiting. Because of this, the `send`
/// method can be used in both synchronous and asynchronous code without
/// problems.
///
/// A successful send occurs when it is determined that the other end of the
/// channel has not hung up already. An unsuccessful send would be one where
/// the corresponding receiver has already been deallocated. Note that a
/// return value of `Err` means that the data will never be received, but
/// a return value of `Ok` does *not* mean that the data will be received.
/// It is possible for the corresponding receiver to hang up immediately
/// after this function returns `Ok`.
///
/// # Examples
///
/// Send a value to another task
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel();
///
/// tokio::spawn(async move {
/// if let Err(_) = tx.send(3) {
/// println!("the receiver dropped");
/// }
/// });
///
/// match rx.await {
/// Ok(v) => println!("got = {:?}", v),
/// Err(_) => println!("the sender dropped"),
/// }
/// # }
/// ```
pub fn send(mut self, t: T) -> Result<(), T> {
let inner = self.inner.take().unwrap();
inner.value.with_mut(|ptr| unsafe {
// SAFETY: The receiver will not access the `UnsafeCell` unless the
// channel has been marked as "complete" (the `VALUE_SENT` state bit
// is set).
// That bit is only set by the sender later on in this method, and
// calling this method consumes `self`. Therefore, if it was possible to
// call this method, we know that the `VALUE_SENT` bit is unset, and
// the receiver is not currently accessing the `UnsafeCell`.
*ptr = Some(t);
});
if !inner.complete() {
unsafe {
// SAFETY: The receiver will not access the `UnsafeCell` unless
// the channel has been marked as "complete". Calling
// `complete()` will return true if this bit is set, and false
// if it is not set. Thus, if `complete()` returned false, it is
// safe for us to access the value, because we know that the
// receiver will not.
return Err(inner.consume_value().unwrap());
}
}
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
value_sent = true,
value_sent.op = "override",
)
});
Ok(())
}
/// Waits for the associated [`Receiver`] handle to close.
///
/// A [`Receiver`] is closed by either calling [`close`] explicitly or the
/// [`Receiver`] value is dropped.
///
/// This function is useful when paired with `select!` to abort a
/// computation when the receiver is no longer interested in the result.
///
/// # Return
///
/// Returns a `Future` which must be awaited on.
///
/// [`Receiver`]: Receiver
/// [`close`]: Receiver::close
///
/// # Examples
///
/// Basic usage
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (mut tx, rx) = oneshot::channel::<()>();
///
/// tokio::spawn(async move {
/// drop(rx);
/// });
///
/// tx.closed().await;
/// println!("the receiver dropped");
/// # }
/// ```
///
/// Paired with select
///
/// ```
/// use tokio::sync::oneshot;
/// use tokio::time::{self, Duration};
///
/// async fn compute() -> String {
/// // Complex computation returning a `String`
/// # "hello".to_string()
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (mut tx, rx) = oneshot::channel();
///
/// tokio::spawn(async move {
/// tokio::select! {
/// _ = tx.closed() => {
/// // The receiver dropped, no need to do any further work
/// }
/// value = compute() => {
/// // The send can fail if the channel was closed at the exact same
/// // time as when compute() finished, so just ignore the failure.
/// let _ = tx.send(value);
/// }
/// }
/// });
///
/// // Wait for up to 10 seconds
/// let _ = time::timeout(Duration::from_secs(10), rx).await;
/// # }
/// ```
pub async fn closed(&mut self) {
use std::future::poll_fn;
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = self.resource_span.clone();
#[cfg(all(tokio_unstable, feature = "tracing"))]
let closed = trace::async_op(
|| poll_fn(|cx| self.poll_closed(cx)),
resource_span,
"Sender::closed",
"poll_closed",
false,
);
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let closed = poll_fn(|cx| self.poll_closed(cx));
closed.await;
}
/// Returns `true` if the associated [`Receiver`] handle has been dropped.
///
/// A [`Receiver`] is closed by either calling [`close`] explicitly or the
/// [`Receiver`] value is dropped.
///
/// If `true` is returned, a call to `send` will always result in an error.
///
/// [`Receiver`]: Receiver
/// [`close`]: Receiver::close
///
/// # Examples
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = oneshot::channel();
///
/// assert!(!tx.is_closed());
///
/// drop(rx);
///
/// assert!(tx.is_closed());
/// assert!(tx.send("never received").is_err());
/// # }
/// ```
pub fn is_closed(&self) -> bool {
let inner = self.inner.as_ref().unwrap();
let state = State::load(&inner.state, Acquire);
state.is_closed()
}
/// Checks whether the `oneshot` channel has been closed, and if not, schedules the
/// `Waker` in the provided `Context` to receive a notification when the channel is
/// closed.
///
/// A [`Receiver`] is closed by either calling [`close`] explicitly, or when the
/// [`Receiver`] value is dropped.
///
/// Note that on multiple calls to poll, only the `Waker` from the `Context` passed
/// to the most recent call will be scheduled to receive a wakeup.
///
/// [`Receiver`]: struct@crate::sync::oneshot::Receiver
/// [`close`]: fn@crate::sync::oneshot::Receiver::close
///
/// # Return value
///
/// This function returns:
///
/// * `Poll::Pending` if the channel is still open.
/// * `Poll::Ready(())` if the channel is closed.
///
/// # Examples
///
/// ```
/// use tokio::sync::oneshot;
///
/// use std::future::poll_fn;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (mut tx, mut rx) = oneshot::channel::<()>();
///
/// tokio::spawn(async move {
/// rx.close();
/// });
///
/// poll_fn(|cx| tx.poll_closed(cx)).await;
///
/// println!("the receiver dropped");
/// # }
/// ```
pub fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> {
ready!(crate::trace::trace_leaf(cx));
// Keep track of task budget
let coop = ready!(crate::task::coop::poll_proceed(cx));
let inner = self.inner.as_ref().unwrap();
let mut state = State::load(&inner.state, Acquire);
if state.is_closed() {
coop.made_progress();
return Ready(());
}
if state.is_tx_task_set() {
let will_notify = unsafe { inner.tx_task.will_wake(cx) };
if !will_notify {
state = State::unset_tx_task(&inner.state);
if state.is_closed() {
// Set the flag again so that the waker is released in drop
State::set_tx_task(&inner.state);
coop.made_progress();
return Ready(());
} else {
unsafe { inner.tx_task.drop_task() };
}
}
}
if !state.is_tx_task_set() {
// Attempt to set the task
unsafe {
inner.tx_task.set_task(cx);
}
// Update the state
state = State::set_tx_task(&inner.state);
if state.is_closed() {
coop.made_progress();
return Ready(());
}
}
Pending
}
}
impl<T> Drop for Sender<T> {
fn drop(&mut self) {
if let Some(inner) = self.inner.as_ref() {
inner.complete();
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
tx_dropped = true,
tx_dropped.op = "override",
)
});
}
}
}
impl<T> Receiver<T> {
/// Prevents the associated [`Sender`] handle from sending a value.
///
/// Any `send` operation which happens after calling `close` is guaranteed
/// to fail. After calling `close`, [`try_recv`] should be called to
/// receive a value if one was sent **before** the call to `close`
/// completed.
///
/// This function is useful to perform a graceful shutdown and ensure that a
/// value will not be sent into the channel and never received.
///
/// `close` is no-op if a message is already received or the channel
/// is already closed.
///
/// [`Sender`]: Sender
/// [`try_recv`]: Receiver::try_recv
///
/// # Examples
///
/// Prevent a value from being sent
///
/// ```
/// use tokio::sync::oneshot;
/// use tokio::sync::oneshot::error::TryRecvError;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = oneshot::channel();
///
/// assert!(!tx.is_closed());
///
/// rx.close();
///
/// assert!(tx.is_closed());
/// assert!(tx.send("never received").is_err());
///
/// match rx.try_recv() {
/// Err(TryRecvError::Closed) => {}
/// _ => unreachable!(),
/// }
/// # }
/// ```
///
/// Receive a value sent **before** calling `close`
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = oneshot::channel();
///
/// assert!(tx.send("will receive").is_ok());
///
/// rx.close();
///
/// let msg = rx.try_recv().unwrap();
/// assert_eq!(msg, "will receive");
/// # }
/// ```
pub fn close(&mut self) {
if let Some(inner) = self.inner.as_ref() {
inner.close();
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
rx_dropped = true,
rx_dropped.op = "override",
)
});
}
}
/// Checks if this receiver is terminated.
///
/// This function returns true if this receiver has already yielded a [`Poll::Ready`] result.
/// If so, this receiver should no longer be polled.
///
/// # Examples
///
/// Sending a value and polling it.
///
/// ```
/// use tokio::sync::oneshot;
///
/// use std::task::Poll;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = oneshot::channel();
///
/// // A receiver is not terminated when it is initialized.
/// assert!(!rx.is_terminated());
///
/// // A receiver is not terminated it is polled and is still pending.
/// let poll = futures::poll!(&mut rx);
/// assert_eq!(poll, Poll::Pending);
/// assert!(!rx.is_terminated());
///
/// // A receiver is not terminated if a value has been sent, but not yet read.
/// tx.send(0).unwrap();
/// assert!(!rx.is_terminated());
///
/// // A receiver *is* terminated after it has been polled and yielded a value.
/// assert_eq!((&mut rx).await, Ok(0));
/// assert!(rx.is_terminated());
/// # }
/// ```
///
/// Dropping the sender.
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = oneshot::channel::<()>();
///
/// // A receiver is not immediately terminated when the sender is dropped.
/// drop(tx);
/// assert!(!rx.is_terminated());
///
/// // A receiver *is* terminated after it has been polled and yielded an error.
/// let _ = (&mut rx).await.unwrap_err();
/// assert!(rx.is_terminated());
/// # }
/// ```
pub fn is_terminated(&self) -> bool {
self.inner.is_none()
}
/// Checks if a channel is empty.
///
/// This method returns `true` if the channel has no messages.
///
/// It is not necessarily safe to poll an empty receiver, which may have
/// already yielded a value. Use [`is_terminated()`][Self::is_terminated]
/// to check whether or not a receiver can be safely polled, instead.
///
/// # Examples
///
/// Sending a value.
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = oneshot::channel();
/// assert!(rx.is_empty());
///
/// tx.send(0).unwrap();
/// assert!(!rx.is_empty());
///
/// let _ = (&mut rx).await;
/// assert!(rx.is_empty());
/// # }
/// ```
///
/// Dropping the sender.
///
/// ```
/// use tokio::sync::oneshot;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = oneshot::channel::<()>();
///
/// // A channel is empty if the sender is dropped.
/// drop(tx);
/// assert!(rx.is_empty());
///
/// // A closed channel still yields an error, however.
/// (&mut rx).await.expect_err("should yield an error");
/// assert!(rx.is_empty());
/// # }
/// ```
///
/// Terminated channels are empty.
///
/// ```should_panic,ignore-wasm
/// use tokio::sync::oneshot;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx) = oneshot::channel();
/// tx.send(0).unwrap();
/// let _ = (&mut rx).await;
///
/// // NB: an empty channel is not necessarily safe to poll!
/// assert!(rx.is_empty());
/// let _ = (&mut rx).await;
/// }
/// ```
pub fn is_empty(&self) -> bool {
let Some(inner) = self.inner.as_ref() else {
// The channel has already terminated.
return true;
};
let state = State::load(&inner.state, Acquire);
if state.is_complete() {
// SAFETY: If `state.is_complete()` returns true, then the
// `VALUE_SENT` bit has been set and the sender side of the
// channel will no longer attempt to access the inner
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/barrier.rs | tokio/src/sync/barrier.rs | use crate::loom::sync::Mutex;
use crate::sync::watch;
#[cfg(all(tokio_unstable, feature = "tracing"))]
use crate::util::trace;
/// A barrier enables multiple tasks to synchronize the beginning of some computation.
///
/// ```
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// use tokio::sync::Barrier;
/// use std::sync::Arc;
///
/// let mut handles = Vec::with_capacity(10);
/// let barrier = Arc::new(Barrier::new(10));
/// for _ in 0..10 {
/// let c = barrier.clone();
/// // The same messages will be printed together.
/// // You will NOT see any interleaving.
/// handles.push(tokio::spawn(async move {
/// println!("before wait");
/// let wait_result = c.wait().await;
/// println!("after wait");
/// wait_result
/// }));
/// }
///
/// // Will not resolve until all "after wait" messages have been printed
/// let mut num_leaders = 0;
/// for handle in handles {
/// let wait_result = handle.await.unwrap();
/// if wait_result.is_leader() {
/// num_leaders += 1;
/// }
/// }
///
/// // Exactly one barrier will resolve as the "leader"
/// assert_eq!(num_leaders, 1);
/// # }
/// ```
#[derive(Debug)]
pub struct Barrier {
state: Mutex<BarrierState>,
wait: watch::Receiver<usize>,
n: usize,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
}
#[derive(Debug)]
struct BarrierState {
waker: watch::Sender<usize>,
arrived: usize,
generation: usize,
}
impl Barrier {
/// Creates a new barrier that can block a given number of tasks.
///
/// A barrier will block `n`-1 tasks which call [`Barrier::wait`] and then wake up all
/// tasks at once when the `n`th task calls `wait`.
#[track_caller]
pub fn new(mut n: usize) -> Barrier {
let (waker, wait) = crate::sync::watch::channel(0);
if n == 0 {
// if n is 0, it's not clear what behavior the user wants.
// in std::sync::Barrier, an n of 0 exhibits the same behavior as n == 1, where every
// .wait() immediately unblocks, so we adopt that here as well.
n = 1;
}
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let location = std::panic::Location::caller();
let resource_span = tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "Barrier",
kind = "Sync",
loc.file = location.file(),
loc.line = location.line(),
loc.col = location.column(),
);
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
size = n,
);
tracing::trace!(
target: "runtime::resource::state_update",
arrived = 0,
)
});
resource_span
};
Barrier {
state: Mutex::new(BarrierState {
waker,
arrived: 0,
generation: 1,
}),
n,
wait,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
}
}
/// Does not resolve until all tasks have rendezvoused here.
///
/// Barriers are re-usable after all tasks have rendezvoused once, and can
/// be used continuously.
///
/// A single (arbitrary) future will receive a [`BarrierWaitResult`] that returns `true` from
/// [`BarrierWaitResult::is_leader`] when returning from this function, and all other tasks
/// will receive a result that will return `false` from `is_leader`.
///
/// # Cancel safety
///
/// This method is not cancel safe.
pub async fn wait(&self) -> BarrierWaitResult {
#[cfg(all(tokio_unstable, feature = "tracing"))]
return trace::async_op(
|| self.wait_internal(),
self.resource_span.clone(),
"Barrier::wait",
"poll",
false,
)
.await;
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
return self.wait_internal().await;
}
async fn wait_internal(&self) -> BarrierWaitResult {
crate::trace::async_trace_leaf().await;
// NOTE: we are taking a _synchronous_ lock here.
// It is okay to do so because the critical section is fast and never yields, so it cannot
// deadlock even if another future is concurrently holding the lock.
// It is _desirable_ to do so as synchronous Mutexes are, at least in theory, faster than
// the asynchronous counter-parts, so we should use them where possible [citation needed].
// NOTE: the extra scope here is so that the compiler doesn't think `state` is held across
// a yield point, and thus marks the returned future as !Send.
let generation = {
let mut state = self.state.lock();
let generation = state.generation;
state.arrived += 1;
#[cfg(all(tokio_unstable, feature = "tracing"))]
tracing::trace!(
target: "runtime::resource::state_update",
arrived = 1,
arrived.op = "add",
);
#[cfg(all(tokio_unstable, feature = "tracing"))]
tracing::trace!(
target: "runtime::resource::async_op::state_update",
arrived = true,
);
if state.arrived == self.n {
#[cfg(all(tokio_unstable, feature = "tracing"))]
tracing::trace!(
target: "runtime::resource::async_op::state_update",
is_leader = true,
);
// we are the leader for this generation
// wake everyone, increment the generation, and return
state
.waker
.send(state.generation)
.expect("there is at least one receiver");
state.arrived = 0;
state.generation += 1;
return BarrierWaitResult(true);
}
generation
};
// we're going to have to wait for the last of the generation to arrive
let mut wait = self.wait.clone();
loop {
let _ = wait.changed().await;
// note that the first time through the loop, this _will_ yield a generation
// immediately, since we cloned a receiver that has never seen any values.
if *wait.borrow() >= generation {
break;
}
}
BarrierWaitResult(false)
}
}
/// A `BarrierWaitResult` is returned by `wait` when all tasks in the `Barrier` have rendezvoused.
#[derive(Debug, Clone)]
pub struct BarrierWaitResult(bool);
impl BarrierWaitResult {
/// Returns `true` if this task from wait is the "leader task".
///
/// Only one task will have `true` returned from their result, all other tasks will have
/// `false` returned.
pub fn is_leader(&self) -> bool {
self.0
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/watch.rs | tokio/src/sync/watch.rs | #![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
//! A multi-producer, multi-consumer channel that only retains the *last* sent
//! value.
//!
//! This channel is useful for watching for changes to a value from multiple
//! points in the code base, for example, changes to configuration values.
//!
//! # Usage
//!
//! [`channel`] returns a [`Sender`] / [`Receiver`] pair. These are the producer
//! and consumer halves of the channel. The channel is created with an initial
//! value.
//!
//! Each [`Receiver`] independently tracks the last value *seen* by its caller.
//!
//! To access the **current** value stored in the channel and mark it as *seen*
//! by a given [`Receiver`], use [`Receiver::borrow_and_update()`].
//!
//! To access the current value **without** marking it as *seen*, use
//! [`Receiver::borrow()`]. (If the value has already been marked *seen*,
//! [`Receiver::borrow()`] is equivalent to [`Receiver::borrow_and_update()`].)
//!
//! For more information on when to use these methods, see
//! [here](#borrow_and_update-versus-borrow).
//!
//! ## Change notifications
//!
//! The [`Receiver`] half provides an asynchronous [`changed`] method. This
//! method is ready when a new, *unseen* value is sent via the [`Sender`] half.
//!
//! * [`Receiver::changed()`] returns:
//! * `Ok(())` on receiving a new value.
//! * `Err(`[`RecvError`](error::RecvError)`)` if the
//! channel has been closed __AND__ the current value is *seen*.
//! * If the current value is *unseen* when calling [`changed`], then
//! [`changed`] will return immediately. If the current value is *seen*, then
//! it will sleep until either a new message is sent via the [`Sender`] half,
//! or the [`Sender`] is dropped.
//! * On completion, the [`changed`] method marks the new value as *seen*.
//! * At creation, the initial value is considered *seen*. In other words,
//! [`Receiver::changed()`] will not return until a subsequent value is sent.
//! * New [`Receiver`] instances can be created with [`Sender::subscribe()`].
//! The current value at the time the [`Receiver`] is created is considered
//! *seen*.
//!
//! ## [`changed`] versus [`has_changed`]
//!
//! The [`Receiver`] half provides two methods for checking for changes
//! in the channel, [`has_changed`] and [`changed`].
//!
//! * [`has_changed`] is a *synchronous* method that checks whether the current
//! value is seen or not and returns a boolean. This method does __not__ mark the
//! value as seen.
//!
//! * [`changed`] is an *asynchronous* method that will return once an unseen
//! value is in the channel. This method does mark the value as seen.
//!
//! Note there are two behavioral differences on when these two methods return
//! an error.
//!
//! - [`has_changed`] errors if and only if the channel is closed.
//! - [`changed`] errors if the channel has been closed __AND__
//! the current value is seen.
//!
//! See the example below that shows how these methods have different fallibility.
//!
//! ## [`borrow_and_update`] versus [`borrow`]
//!
//! If the receiver intends to await notifications from [`changed`] in a loop,
//! [`Receiver::borrow_and_update()`] should be preferred over
//! [`Receiver::borrow()`]. This avoids a potential race where a new value is
//! sent between [`changed`] being ready and the value being read. (If
//! [`Receiver::borrow()`] is used, the loop may run twice with the same value.)
//!
//! If the receiver is only interested in the current value, and does not intend
//! to wait for changes, then [`Receiver::borrow()`] can be used. It may be more
//! convenient to use [`borrow`](Receiver::borrow) since it's an `&self`
//! method---[`borrow_and_update`](Receiver::borrow_and_update) requires `&mut
//! self`.
//!
//! # Examples
//!
//! The following example prints `hello! world! `.
//!
//! ```
//! use tokio::sync::watch;
//! use tokio::time::{Duration, sleep};
//!
//! # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
//! let (tx, mut rx) = watch::channel("hello");
//!
//! tokio::spawn(async move {
//! // Use the equivalent of a "do-while" loop so the initial value is
//! // processed before awaiting the `changed()` future.
//! loop {
//! println!("{}! ", *rx.borrow_and_update());
//! if rx.changed().await.is_err() {
//! break;
//! }
//! }
//! });
//!
//! sleep(Duration::from_millis(100)).await;
//! tx.send("world")?;
//! # Ok(())
//! # }
//! ```
//!
//! Difference on fallibility of [`changed`] versus [`has_changed`].
//! ```
//! use tokio::sync::watch;
//!
//! #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, mut rx) = watch::channel("hello");
//! tx.send("goodbye").unwrap();
//! drop(tx);
//!
//! // `has_changed` does not mark the value as seen and errors
//! // since the channel is closed.
//! assert!(rx.has_changed().is_err());
//!
//! // `changed` returns Ok since the value is not already marked as seen
//! // even if the channel is closed.
//! assert!(rx.changed().await.is_ok());
//!
//! // The `changed` call above marks the value as seen.
//! // The next `changed` call now returns an error as the channel is closed
//! // AND the current value is seen.
//! assert!(rx.changed().await.is_err());
//! # }
//! ```
//!
//! # Closing
//!
//! [`Sender::is_closed`] and [`Sender::closed`] allow the producer to detect
//! when all [`Receiver`] handles have been dropped. This indicates that there
//! is no further interest in the values being produced and work can be stopped.
//!
//! The value in the channel will not be dropped until all senders and all
//! receivers have been dropped.
//!
//! # Thread safety
//!
//! Both [`Sender`] and [`Receiver`] are thread safe. They can be moved to other
//! threads and can be used in a concurrent environment. Clones of [`Receiver`]
//! handles may be moved to separate threads and also used concurrently.
//!
//! [`Sender`]: crate::sync::watch::Sender
//! [`Receiver`]: crate::sync::watch::Receiver
//! [`changed`]: crate::sync::watch::Receiver::changed
//! [`has_changed`]: crate::sync::watch::Receiver::has_changed
//! [`borrow`]: crate::sync::watch::Receiver::borrow
//! [`borrow_and_update`]: crate::sync::watch::Receiver::borrow_and_update
//! [`Receiver::changed()`]: crate::sync::watch::Receiver::changed
//! [`Receiver::borrow()`]: crate::sync::watch::Receiver::borrow
//! [`Receiver::borrow_and_update()`]:
//! crate::sync::watch::Receiver::borrow_and_update
//! [`channel`]: crate::sync::watch::channel
//! [`Sender::is_closed`]: crate::sync::watch::Sender::is_closed
//! [`Sender::closed`]: crate::sync::watch::Sender::closed
//! [`Sender::subscribe()`]: crate::sync::watch::Sender::subscribe
use crate::sync::notify::Notify;
use crate::task::coop::cooperative;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::atomic::Ordering::{AcqRel, Relaxed};
use crate::loom::sync::{Arc, RwLock, RwLockReadGuard};
use std::fmt;
use std::mem;
use std::ops;
use std::panic;
/// Receives values from the associated [`Sender`](struct@Sender).
///
/// Instances are created by the [`channel`](fn@channel) function.
///
/// To turn this receiver into a `Stream`, you can use the [`WatchStream`]
/// wrapper.
///
/// [`WatchStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.WatchStream.html
#[derive(Debug)]
pub struct Receiver<T> {
/// Pointer to the shared state
shared: Arc<Shared<T>>,
/// Last observed version
version: Version,
}
/// Sends values to the associated [`Receiver`](struct@Receiver).
///
/// Instances are created by the [`channel`](fn@channel) function.
#[derive(Debug)]
pub struct Sender<T> {
shared: Arc<Shared<T>>,
}
impl<T> Clone for Sender<T> {
fn clone(&self) -> Self {
self.shared.ref_count_tx.fetch_add(1, Relaxed);
Self {
shared: self.shared.clone(),
}
}
}
impl<T: Default> Default for Sender<T> {
fn default() -> Self {
Self::new(T::default())
}
}
/// Returns a reference to the inner value.
///
/// Outstanding borrows hold a read lock on the inner value. This means that
/// long-lived borrows could cause the producer half to block. It is recommended
/// to keep the borrow as short-lived as possible. Additionally, if you are
/// running in an environment that allows `!Send` futures, you must ensure that
/// the returned `Ref` type is never held alive across an `.await` point,
/// otherwise, it can lead to a deadlock.
///
/// The priority policy of the lock is dependent on the underlying lock
/// implementation, and this type does not guarantee that any particular policy
/// will be used. In particular, a producer which is waiting to acquire the lock
/// in `send` might or might not block concurrent calls to `borrow`, e.g.:
///
/// <details><summary>Potential deadlock example</summary>
///
/// ```text
/// // Task 1 (on thread A) | // Task 2 (on thread B)
/// let _ref1 = rx.borrow(); |
/// | // will block
/// | let _ = tx.send(());
/// // may deadlock |
/// let _ref2 = rx.borrow(); |
/// ```
/// </details>
#[derive(Debug)]
pub struct Ref<'a, T> {
inner: RwLockReadGuard<'a, T>,
has_changed: bool,
}
impl<'a, T> Ref<'a, T> {
/// Indicates if the borrowed value is considered as _changed_ since the last
/// time it has been marked as seen.
///
/// Unlike [`Receiver::has_changed()`], this method does not fail if the channel is closed.
///
/// When borrowed from the [`Sender`] this function will always return `false`.
///
/// # Examples
///
/// ```
/// use tokio::sync::watch;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = watch::channel("hello");
///
/// tx.send("goodbye").unwrap();
/// // The sender does never consider the value as changed.
/// assert!(!tx.borrow().has_changed());
///
/// // Drop the sender immediately, just for testing purposes.
/// drop(tx);
///
/// // Even if the sender has already been dropped...
/// assert!(rx.has_changed().is_err());
/// // ...the modified value is still readable and detected as changed.
/// assert_eq!(*rx.borrow(), "goodbye");
/// assert!(rx.borrow().has_changed());
///
/// // Read the changed value and mark it as seen.
/// {
/// let received = rx.borrow_and_update();
/// assert_eq!(*received, "goodbye");
/// assert!(received.has_changed());
/// // Release the read lock when leaving this scope.
/// }
///
/// // Now the value has already been marked as seen and could
/// // never be modified again (after the sender has been dropped).
/// assert!(!rx.borrow().has_changed());
/// # }
/// ```
pub fn has_changed(&self) -> bool {
self.has_changed
}
}
struct Shared<T> {
/// The most recent value.
value: RwLock<T>,
/// The current version.
///
/// The lowest bit represents a "closed" state. The rest of the bits
/// represent the current version.
state: AtomicState,
/// Tracks the number of `Receiver` instances.
ref_count_rx: AtomicUsize,
/// Tracks the number of `Sender` instances.
ref_count_tx: AtomicUsize,
/// Notifies waiting receivers that the value changed.
notify_rx: big_notify::BigNotify,
/// Notifies any task listening for `Receiver` dropped events.
notify_tx: Notify,
}
impl<T: fmt::Debug> fmt::Debug for Shared<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let state = self.state.load();
f.debug_struct("Shared")
.field("value", &self.value)
.field("version", &state.version())
.field("is_closed", &state.is_closed())
.field("ref_count_rx", &self.ref_count_rx)
.finish()
}
}
pub mod error {
//! Watch error types.
use std::error::Error;
use std::fmt;
/// Error produced when sending a value fails.
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct SendError<T>(pub T);
// ===== impl SendError =====
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SendError").finish_non_exhaustive()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "channel closed")
}
}
impl<T> Error for SendError<T> {}
/// Error produced when receiving a change notification.
#[derive(Debug, Clone)]
pub struct RecvError(pub(super) ());
// ===== impl RecvError =====
impl fmt::Display for RecvError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "channel closed")
}
}
impl Error for RecvError {}
}
mod big_notify {
use super::Notify;
use crate::sync::notify::Notified;
// To avoid contention on the lock inside the `Notify`, we store multiple
// copies of it. Then, we use either circular access or randomness to spread
// out threads over different `Notify` objects.
//
// Some simple benchmarks show that randomness performs slightly better than
// circular access (probably due to contention on `next`), so we prefer to
// use randomness when Tokio is compiled with a random number generator.
//
// When the random number generator is not available, we fall back to
// circular access.
pub(super) struct BigNotify {
#[cfg(not(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros"))))]
next: std::sync::atomic::AtomicUsize,
inner: [Notify; 8],
}
impl BigNotify {
pub(super) fn new() -> Self {
Self {
#[cfg(not(all(
not(loom),
feature = "sync",
any(feature = "rt", feature = "macros")
)))]
next: std::sync::atomic::AtomicUsize::new(0),
inner: Default::default(),
}
}
pub(super) fn notify_waiters(&self) {
for notify in &self.inner {
notify.notify_waiters();
}
}
/// This function implements the case where randomness is not available.
#[cfg(not(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros"))))]
pub(super) fn notified(&self) -> Notified<'_> {
let i = self.next.fetch_add(1, std::sync::atomic::Ordering::Relaxed) % 8;
self.inner[i].notified()
}
/// This function implements the case where randomness is available.
#[cfg(all(not(loom), feature = "sync", any(feature = "rt", feature = "macros")))]
pub(super) fn notified(&self) -> Notified<'_> {
let i = crate::runtime::context::thread_rng_n(8) as usize;
self.inner[i].notified()
}
}
}
use self::state::{AtomicState, Version};
mod state {
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::atomic::Ordering;
const CLOSED_BIT: usize = 1;
// Using 2 as the step size preserves the `CLOSED_BIT`.
const STEP_SIZE: usize = 2;
/// The version part of the state. The lowest bit is always zero.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(super) struct Version(usize);
/// Snapshot of the state. The first bit is used as the CLOSED bit.
/// The remaining bits are used as the version.
///
/// The CLOSED bit tracks whether all senders have been dropped. Dropping all
/// receivers does not set it.
#[derive(Copy, Clone, Debug)]
pub(super) struct StateSnapshot(usize);
/// The state stored in an atomic integer.
///
/// The `Sender` uses `Release` ordering for storing a new state
/// and the `Receiver`s use `Acquire` ordering for loading the
/// current state. This ensures that written values are seen by
/// the `Receiver`s for a proper handover.
#[derive(Debug)]
pub(super) struct AtomicState(AtomicUsize);
impl Version {
/// Decrements the version.
pub(super) fn decrement(&mut self) {
// Using a wrapping decrement here is required to ensure that the
// operation is consistent with `std::sync::atomic::AtomicUsize::fetch_add()`
// which wraps on overflow.
self.0 = self.0.wrapping_sub(STEP_SIZE);
}
pub(super) const INITIAL: Self = Version(0);
}
impl StateSnapshot {
/// Extract the version from the state.
pub(super) fn version(self) -> Version {
Version(self.0 & !CLOSED_BIT)
}
/// Is the closed bit set?
pub(super) fn is_closed(self) -> bool {
(self.0 & CLOSED_BIT) == CLOSED_BIT
}
}
impl AtomicState {
/// Create a new `AtomicState` that is not closed and which has the
/// version set to `Version::INITIAL`.
pub(super) fn new() -> Self {
AtomicState(AtomicUsize::new(Version::INITIAL.0))
}
/// Load the current value of the state.
///
/// Only used by the receiver and for debugging purposes.
///
/// The receiver side (read-only) uses `Acquire` ordering for a proper handover
/// of the shared value with the sender side (single writer). The state is always
/// updated after modifying and before releasing the (exclusive) lock on the
/// shared value.
pub(super) fn load(&self) -> StateSnapshot {
StateSnapshot(self.0.load(Ordering::Acquire))
}
/// Increment the version counter.
pub(super) fn increment_version_while_locked(&self) {
// Use `Release` ordering to ensure that the shared value
// has been written before updating the version. The shared
// value is still protected by an exclusive lock during this
// method.
self.0.fetch_add(STEP_SIZE, Ordering::Release);
}
/// Set the closed bit in the state.
pub(super) fn set_closed(&self) {
self.0.fetch_or(CLOSED_BIT, Ordering::Release);
}
}
}
/// Creates a new watch channel, returning the "send" and "receive" handles.
///
/// All values sent by [`Sender`] will become visible to the [`Receiver`] handles.
/// Only the last value sent is made available to the [`Receiver`] half. All
/// intermediate values are dropped.
///
/// # Examples
///
/// The following example prints `hello! world! `.
///
/// ```
/// use tokio::sync::watch;
/// use tokio::time::{Duration, sleep};
///
/// # async fn dox() -> Result<(), Box<dyn std::error::Error>> {
/// let (tx, mut rx) = watch::channel("hello");
///
/// tokio::spawn(async move {
/// // Use the equivalent of a "do-while" loop so the initial value is
/// // processed before awaiting the `changed()` future.
/// loop {
/// println!("{}! ", *rx.borrow_and_update());
/// if rx.changed().await.is_err() {
/// break;
/// }
/// }
/// });
///
/// sleep(Duration::from_millis(100)).await;
/// tx.send("world")?;
/// # Ok(())
/// # }
/// ```
///
/// [`Sender`]: struct@Sender
/// [`Receiver`]: struct@Receiver
pub fn channel<T>(init: T) -> (Sender<T>, Receiver<T>) {
let shared = Arc::new(Shared {
value: RwLock::new(init),
state: AtomicState::new(),
ref_count_rx: AtomicUsize::new(1),
ref_count_tx: AtomicUsize::new(1),
notify_rx: big_notify::BigNotify::new(),
notify_tx: Notify::new(),
});
let tx = Sender {
shared: shared.clone(),
};
let rx = Receiver {
shared,
version: Version::INITIAL,
};
(tx, rx)
}
impl<T> Receiver<T> {
fn from_shared(version: Version, shared: Arc<Shared<T>>) -> Self {
// No synchronization necessary as this is only used as a counter and
// not memory access.
shared.ref_count_rx.fetch_add(1, Relaxed);
Self { shared, version }
}
/// Returns a reference to the most recently sent value.
///
/// This method does not mark the returned value as seen, so future calls to
/// [`changed`] may return immediately even if you have already seen the
/// value with a call to `borrow`.
///
/// Outstanding borrows hold a read lock on the inner value. This means that
/// long-lived borrows could cause the producer half to block. It is recommended
/// to keep the borrow as short-lived as possible. Additionally, if you are
/// running in an environment that allows `!Send` futures, you must ensure that
/// the returned `Ref` type is never held alive across an `.await` point,
/// otherwise, it can lead to a deadlock.
///
/// The priority policy of the lock is dependent on the underlying lock
/// implementation, and this type does not guarantee that any particular policy
/// will be used. In particular, a producer which is waiting to acquire the lock
/// in `send` might or might not block concurrent calls to `borrow`, e.g.:
///
/// <details><summary>Potential deadlock example</summary>
///
/// ```text
/// // Task 1 (on thread A) | // Task 2 (on thread B)
/// let _ref1 = rx.borrow(); |
/// | // will block
/// | let _ = tx.send(());
/// // may deadlock |
/// let _ref2 = rx.borrow(); |
/// ```
/// </details>
///
/// For more information on when to use this method versus
/// [`borrow_and_update`], see [here](self#borrow_and_update-versus-borrow).
///
/// [`changed`]: Receiver::changed
/// [`borrow_and_update`]: Receiver::borrow_and_update
///
/// # Examples
///
/// ```
/// use tokio::sync::watch;
///
/// let (_, rx) = watch::channel("hello");
/// assert_eq!(*rx.borrow(), "hello");
/// ```
pub fn borrow(&self) -> Ref<'_, T> {
let inner = self.shared.value.read();
// After obtaining a read-lock no concurrent writes could occur
// and the loaded version matches that of the borrowed reference.
let new_version = self.shared.state.load().version();
let has_changed = self.version != new_version;
Ref { inner, has_changed }
}
/// Returns a reference to the most recently sent value and marks that value
/// as seen.
///
/// This method marks the current value as seen. Subsequent calls to [`changed`]
/// will not return immediately until the [`Sender`] has modified the shared
/// value again.
///
/// Outstanding borrows hold a read lock on the inner value. This means that
/// long-lived borrows could cause the producer half to block. It is recommended
/// to keep the borrow as short-lived as possible. Additionally, if you are
/// running in an environment that allows `!Send` futures, you must ensure that
/// the returned `Ref` type is never held alive across an `.await` point,
/// otherwise, it can lead to a deadlock.
///
/// The priority policy of the lock is dependent on the underlying lock
/// implementation, and this type does not guarantee that any particular policy
/// will be used. In particular, a producer which is waiting to acquire the lock
/// in `send` might or might not block concurrent calls to `borrow`, e.g.:
///
/// <details><summary>Potential deadlock example</summary>
///
/// ```text
/// // Task 1 (on thread A) | // Task 2 (on thread B)
/// let _ref1 = rx1.borrow_and_update(); |
/// | // will block
/// | let _ = tx.send(());
/// // may deadlock |
/// let _ref2 = rx2.borrow_and_update(); |
/// ```
/// </details>
///
/// For more information on when to use this method versus [`borrow`], see
/// [here](self#borrow_and_update-versus-borrow).
///
/// [`changed`]: Receiver::changed
/// [`borrow`]: Receiver::borrow
pub fn borrow_and_update(&mut self) -> Ref<'_, T> {
let inner = self.shared.value.read();
// After obtaining a read-lock no concurrent writes could occur
// and the loaded version matches that of the borrowed reference.
let new_version = self.shared.state.load().version();
let has_changed = self.version != new_version;
// Mark the shared value as seen by updating the version
self.version = new_version;
Ref { inner, has_changed }
}
/// Checks if this channel contains a message that this receiver has not yet
/// seen. The current value will not be marked as seen.
///
/// Although this method is called `has_changed`, it does not check
/// messages for equality, so this call will return true even if the current
/// message is equal to the previous message.
///
/// # Errors
///
/// Returns a [`RecvError`](error::RecvError) if and only if the channel has been closed.
///
/// # Examples
///
/// ## Basic usage
///
/// ```
/// use tokio::sync::watch;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = watch::channel("hello");
///
/// tx.send("goodbye").unwrap();
///
/// assert!(rx.has_changed().unwrap());
/// assert_eq!(*rx.borrow_and_update(), "goodbye");
///
/// // The value has been marked as seen
/// assert!(!rx.has_changed().unwrap());
/// # }
/// ```
///
/// ## Closed channel example
///
/// ```
/// use tokio::sync::watch;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = watch::channel("hello");
/// tx.send("goodbye").unwrap();
///
/// drop(tx);
///
/// // The channel is closed
/// assert!(rx.has_changed().is_err());
/// # }
/// ```
pub fn has_changed(&self) -> Result<bool, error::RecvError> {
// Load the version from the state
let state = self.shared.state.load();
if state.is_closed() {
// All senders have dropped.
return Err(error::RecvError(()));
}
let new_version = state.version();
Ok(self.version != new_version)
}
/// Marks the state as changed.
///
/// After invoking this method [`has_changed()`](Self::has_changed)
/// returns `true` and [`changed()`](Self::changed) returns
/// immediately, regardless of whether a new value has been sent.
///
/// This is useful for triggering an initial change notification after
/// subscribing to synchronize new receivers.
pub fn mark_changed(&mut self) {
self.version.decrement();
}
/// Marks the state as unchanged.
///
/// The current value will be considered seen by the receiver.
///
/// This is useful if you are not interested in the current value
/// visible in the receiver.
pub fn mark_unchanged(&mut self) {
let current_version = self.shared.state.load().version();
self.version = current_version;
}
/// Waits for a change notification, then marks the current value as seen.
///
/// If the current value in the channel has not yet been marked seen when
/// this method is called, the method marks that value seen and returns
/// immediately. If the newest value has already been marked seen, then the
/// method sleeps until a new message is sent by a [`Sender`] connected to
/// this `Receiver`, or until all [`Sender`]s are dropped.
///
/// For more information, see
/// [*Change notifications*](self#change-notifications) in the module-level documentation.
///
/// # Errors
///
/// Returns a [`RecvError`](error::RecvError) if the channel has been closed __AND__
/// the current value is seen.
///
/// # Cancel safety
///
/// This method is cancel safe. If you use it as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, then it is guaranteed that no values have been marked
/// seen by this call to `changed`.
///
/// [`Sender`]: struct@Sender
///
/// # Examples
///
/// ```
/// use tokio::sync::watch;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = watch::channel("hello");
///
/// tokio::spawn(async move {
/// tx.send("goodbye").unwrap();
/// });
///
/// assert!(rx.changed().await.is_ok());
/// assert_eq!(*rx.borrow_and_update(), "goodbye");
///
/// // The `tx` handle has been dropped
/// assert!(rx.changed().await.is_err());
/// # }
/// ```
pub async fn changed(&mut self) -> Result<(), error::RecvError> {
cooperative(changed_impl(&self.shared, &mut self.version)).await
}
/// Waits for a value that satisfies the provided condition.
///
/// This method will call the provided closure whenever something is sent on
/// the channel. Once the closure returns `true`, this method will return a
/// reference to the value that was passed to the closure.
///
/// Before `wait_for` starts waiting for changes, it will call the closure
/// on the current value. If the closure returns `true` when given the
/// current value, then `wait_for` will immediately return a reference to
/// the current value. This is the case even if the current value is already
/// considered seen.
///
/// The watch channel only keeps track of the most recent value, so if
/// several messages are sent faster than `wait_for` is able to call the
/// closure, then it may skip some updates. Whenever the closure is called,
/// it will be called with the most recent value.
///
/// When this function returns, the value that was passed to the closure
/// when it returned `true` will be considered seen.
///
/// If the channel is closed, then `wait_for` will return a [`RecvError`].
/// Once this happens, no more messages can ever be sent on the channel.
/// When an error is returned, it is guaranteed that the closure has been
/// called on the last value, and that it returned `false` for that value.
/// (If the closure returned `true`, then the last value would have been
/// returned instead of the error.)
///
/// Like the [`borrow`] method, the returned borrow holds a read lock on the
/// inner value. This means that long-lived borrows could cause the producer
/// half to block. It is recommended to keep the borrow as short-lived as
/// possible. See the documentation of `borrow` for more information on
/// this.
///
/// [`borrow`]: Receiver::borrow
/// [`RecvError`]: error::RecvError
///
/// # Cancel safety
///
/// This method is cancel safe. If you use it as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, then it is guaranteed that the last seen value `val`
/// (if any) satisfies `f(val) == false`.
///
/// # Panics
///
/// If and only if the closure `f` panics. In that case, no resource owned
/// or shared by this [`Receiver`] will be poisoned.
///
/// # Examples
///
/// ```
/// use tokio::sync::watch;
/// use tokio::time::{sleep, Duration};
///
/// #[tokio::main(flavor = "current_thread", start_paused = true)]
/// async fn main() {
/// let (tx, mut rx) = watch::channel("hello");
///
/// tokio::spawn(async move {
/// sleep(Duration::from_secs(1)).await;
/// tx.send("goodbye").unwrap();
/// });
///
/// assert!(rx.wait_for(|val| *val == "goodbye").await.is_ok());
/// assert_eq!(*rx.borrow(), "goodbye");
/// }
/// ```
pub async fn wait_for(
&mut self,
f: impl FnMut(&T) -> bool,
) -> Result<Ref<'_, T>, error::RecvError> {
cooperative(self.wait_for_inner(f)).await
}
async fn wait_for_inner(
&mut self,
mut f: impl FnMut(&T) -> bool,
) -> Result<Ref<'_, T>, error::RecvError> {
let mut closed = false;
loop {
{
let inner = self.shared.value.read();
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/batch_semaphore.rs | tokio/src/sync/batch_semaphore.rs | #![cfg_attr(not(feature = "sync"), allow(unreachable_pub, dead_code))]
//! # Implementation Details.
//!
//! The semaphore is implemented using an intrusive linked list of waiters. An
//! atomic counter tracks the number of available permits. If the semaphore does
//! not contain the required number of permits, the task attempting to acquire
//! permits places its waker at the end of a queue. When new permits are made
//! available (such as by releasing an initial acquisition), they are assigned
//! to the task at the front of the queue, waking that task if its requested
//! number of permits is met.
//!
//! Because waiters are enqueued at the back of the linked list and dequeued
//! from the front, the semaphore is fair. Tasks trying to acquire large numbers
//! of permits at a time will always be woken eventually, even if many other
//! tasks are acquiring smaller numbers of permits. This means that in a
//! use-case like tokio's read-write lock, writers will not be starved by
//! readers.
use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::{Mutex, MutexGuard};
use crate::util::linked_list::{self, LinkedList};
#[cfg(all(tokio_unstable, feature = "tracing"))]
use crate::util::trace;
use crate::util::WakeList;
use std::future::Future;
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::*;
use std::task::{ready, Context, Poll, Waker};
use std::{cmp, fmt};
/// An asynchronous counting semaphore which permits waiting on multiple permits at once.
pub(crate) struct Semaphore {
waiters: Mutex<Waitlist>,
/// The current number of available permits in the semaphore.
permits: AtomicUsize,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
}
struct Waitlist {
queue: LinkedList<Waiter, <Waiter as linked_list::Link>::Target>,
closed: bool,
}
/// Error returned from the [`Semaphore::try_acquire`] function.
///
/// [`Semaphore::try_acquire`]: crate::sync::Semaphore::try_acquire
#[derive(Debug, PartialEq, Eq)]
pub enum TryAcquireError {
/// The semaphore has been [closed] and cannot issue new permits.
///
/// [closed]: crate::sync::Semaphore::close
Closed,
/// The semaphore has no available permits.
NoPermits,
}
/// Error returned from the [`Semaphore::acquire`] function.
///
/// An `acquire` operation can only fail if the semaphore has been
/// [closed].
///
/// [closed]: crate::sync::Semaphore::close
/// [`Semaphore::acquire`]: crate::sync::Semaphore::acquire
#[derive(Debug)]
pub struct AcquireError(());
pub(crate) struct Acquire<'a> {
node: Waiter,
semaphore: &'a Semaphore,
num_permits: usize,
queued: bool,
}
/// An entry in the wait queue.
struct Waiter {
/// The current state of the waiter.
///
/// This is either the number of remaining permits required by
/// the waiter, or a flag indicating that the waiter is not yet queued.
state: AtomicUsize,
/// The waker to notify the task awaiting permits.
///
/// # Safety
///
/// This may only be accessed while the wait queue is locked.
waker: UnsafeCell<Option<Waker>>,
/// Intrusive linked-list pointers.
///
/// # Safety
///
/// This may only be accessed while the wait queue is locked.
///
/// TODO: Ideally, we would be able to use loom to enforce that
/// this isn't accessed concurrently. However, it is difficult to
/// use a `UnsafeCell` here, since the `Link` trait requires _returning_
/// references to `Pointers`, and `UnsafeCell` requires that checked access
/// take place inside a closure. We should consider changing `Pointers` to
/// use `UnsafeCell` internally.
pointers: linked_list::Pointers<Waiter>,
#[cfg(all(tokio_unstable, feature = "tracing"))]
ctx: trace::AsyncOpTracingCtx,
/// Should not be `Unpin`.
_p: PhantomPinned,
}
generate_addr_of_methods! {
impl<> Waiter {
unsafe fn addr_of_pointers(self: NonNull<Self>) -> NonNull<linked_list::Pointers<Waiter>> {
&self.pointers
}
}
}
impl Semaphore {
/// The maximum number of permits which a semaphore can hold.
///
/// Note that this reserves three bits of flags in the permit counter, but
/// we only actually use one of them. However, the previous semaphore
/// implementation used three bits, so we will continue to reserve them to
/// avoid a breaking change if additional flags need to be added in the
/// future.
pub(crate) const MAX_PERMITS: usize = usize::MAX >> 3;
const CLOSED: usize = 1;
// The least-significant bit in the number of permits is reserved to use
// as a flag indicating that the semaphore has been closed. Consequently
// PERMIT_SHIFT is used to leave that bit for that purpose.
const PERMIT_SHIFT: usize = 1;
/// Creates a new semaphore with the initial number of permits
///
/// Maximum number of permits on 32-bit platforms is `1<<29`.
pub(crate) fn new(permits: usize) -> Self {
assert!(
permits <= Self::MAX_PERMITS,
"a semaphore may not have more than MAX_PERMITS permits ({})",
Self::MAX_PERMITS
);
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let resource_span = tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "Semaphore",
kind = "Sync",
is_internal = true
);
resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
permits = permits,
permits.op = "override",
)
});
resource_span
};
Self {
permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT),
waiters: Mutex::new(Waitlist {
queue: LinkedList::new(),
closed: false,
}),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
}
}
/// Creates a new semaphore with the initial number of permits.
///
/// Maximum number of permits on 32-bit platforms is `1<<29`.
#[cfg(not(all(loom, test)))]
pub(crate) const fn const_new(permits: usize) -> Self {
assert!(permits <= Self::MAX_PERMITS);
Self {
permits: AtomicUsize::new(permits << Self::PERMIT_SHIFT),
waiters: Mutex::const_new(Waitlist {
queue: LinkedList::new(),
closed: false,
}),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Creates a new closed semaphore with 0 permits.
pub(crate) fn new_closed() -> Self {
Self {
permits: AtomicUsize::new(Self::CLOSED),
waiters: Mutex::new(Waitlist {
queue: LinkedList::new(),
closed: true,
}),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Creates a new closed semaphore with 0 permits.
#[cfg(not(all(loom, test)))]
pub(crate) const fn const_new_closed() -> Self {
Self {
permits: AtomicUsize::new(Self::CLOSED),
waiters: Mutex::const_new(Waitlist {
queue: LinkedList::new(),
closed: true,
}),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Returns the current number of available permits.
pub(crate) fn available_permits(&self) -> usize {
self.permits.load(Acquire) >> Self::PERMIT_SHIFT
}
/// Adds `added` new permits to the semaphore.
///
/// The maximum number of permits is `usize::MAX >> 3`, and this function will panic if the limit is exceeded.
pub(crate) fn release(&self, added: usize) {
if added == 0 {
return;
}
// Assign permits to the wait queue
self.add_permits_locked(added, self.waiters.lock());
}
/// Closes the semaphore. This prevents the semaphore from issuing new
/// permits and notifies all pending waiters.
pub(crate) fn close(&self) {
let mut waiters = self.waiters.lock();
// If the semaphore's permits counter has enough permits for an
// unqueued waiter to acquire all the permits it needs immediately,
// it won't touch the wait list. Therefore, we have to set a bit on
// the permit counter as well. However, we must do this while
// holding the lock --- otherwise, if we set the bit and then wait
// to acquire the lock we'll enter an inconsistent state where the
// permit counter is closed, but the wait list is not.
self.permits.fetch_or(Self::CLOSED, Release);
waiters.closed = true;
while let Some(mut waiter) = waiters.queue.pop_back() {
let waker = unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) };
if let Some(waker) = waker {
waker.wake();
}
}
}
/// Returns true if the semaphore is closed.
pub(crate) fn is_closed(&self) -> bool {
self.permits.load(Acquire) & Self::CLOSED == Self::CLOSED
}
pub(crate) fn try_acquire(&self, num_permits: usize) -> Result<(), TryAcquireError> {
assert!(
num_permits <= Self::MAX_PERMITS,
"a semaphore may not have more than MAX_PERMITS permits ({})",
Self::MAX_PERMITS
);
let num_permits = num_permits << Self::PERMIT_SHIFT;
let mut curr = self.permits.load(Acquire);
loop {
// Has the semaphore closed?
if curr & Self::CLOSED == Self::CLOSED {
return Err(TryAcquireError::Closed);
}
// Are there enough permits remaining?
if curr < num_permits {
return Err(TryAcquireError::NoPermits);
}
let next = curr - num_permits;
match self.permits.compare_exchange(curr, next, AcqRel, Acquire) {
Ok(_) => {
// TODO: Instrument once issue has been solved
return Ok(());
}
Err(actual) => curr = actual,
}
}
}
pub(crate) fn acquire(&self, num_permits: usize) -> Acquire<'_> {
Acquire::new(self, num_permits)
}
/// Release `rem` permits to the semaphore's wait list, starting from the
/// end of the queue.
///
/// If `rem` exceeds the number of permits needed by the wait list, the
/// remainder are assigned back to the semaphore.
fn add_permits_locked(&self, mut rem: usize, waiters: MutexGuard<'_, Waitlist>) {
let mut wakers = WakeList::new();
let mut lock = Some(waiters);
let mut is_empty = false;
while rem > 0 {
let mut waiters = lock.take().unwrap_or_else(|| self.waiters.lock());
'inner: while wakers.can_push() {
// Was the waiter assigned enough permits to wake it?
match waiters.queue.last() {
Some(waiter) => {
if !waiter.assign_permits(&mut rem) {
break 'inner;
}
}
None => {
is_empty = true;
// If we assigned permits to all the waiters in the queue, and there are
// still permits left over, assign them back to the semaphore.
break 'inner;
}
};
let mut waiter = waiters.queue.pop_back().unwrap();
if let Some(waker) =
unsafe { waiter.as_mut().waker.with_mut(|waker| (*waker).take()) }
{
wakers.push(waker);
}
}
if rem > 0 && is_empty {
let permits = rem;
assert!(
permits <= Self::MAX_PERMITS,
"cannot add more than MAX_PERMITS permits ({})",
Self::MAX_PERMITS
);
let prev = self.permits.fetch_add(rem << Self::PERMIT_SHIFT, Release);
let prev = prev >> Self::PERMIT_SHIFT;
assert!(
prev + permits <= Self::MAX_PERMITS,
"number of added permits ({}) would overflow MAX_PERMITS ({})",
rem,
Self::MAX_PERMITS
);
// add remaining permits back
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
permits = rem,
permits.op = "add",
)
});
rem = 0;
}
drop(waiters); // release the lock
wakers.wake_all();
}
assert_eq!(rem, 0);
}
/// Decrease a semaphore's permits by a maximum of `n`.
///
/// If there are insufficient permits and it's not possible to reduce by `n`,
/// return the number of permits that were actually reduced.
pub(crate) fn forget_permits(&self, n: usize) -> usize {
if n == 0 {
return 0;
}
let mut curr_bits = self.permits.load(Acquire);
loop {
let curr = curr_bits >> Self::PERMIT_SHIFT;
let new = curr.saturating_sub(n);
match self.permits.compare_exchange_weak(
curr_bits,
new << Self::PERMIT_SHIFT,
AcqRel,
Acquire,
) {
Ok(_) => return std::cmp::min(curr, n),
Err(actual) => curr_bits = actual,
};
}
}
fn poll_acquire(
&self,
cx: &mut Context<'_>,
num_permits: usize,
node: Pin<&mut Waiter>,
queued: bool,
) -> Poll<Result<(), AcquireError>> {
let mut acquired = 0;
let needed = if queued {
node.state.load(Acquire) << Self::PERMIT_SHIFT
} else {
num_permits << Self::PERMIT_SHIFT
};
let mut lock = None;
// First, try to take the requested number of permits from the
// semaphore.
let mut curr = self.permits.load(Acquire);
let mut waiters = loop {
// Has the semaphore closed?
if curr & Self::CLOSED > 0 {
return Poll::Ready(Err(AcquireError::closed()));
}
let mut remaining = 0;
let total = curr
.checked_add(acquired)
.expect("number of permits must not overflow");
let (next, acq) = if total >= needed {
let next = curr - (needed - acquired);
(next, needed >> Self::PERMIT_SHIFT)
} else {
remaining = (needed - acquired) - curr;
(0, curr >> Self::PERMIT_SHIFT)
};
if remaining > 0 && lock.is_none() {
// No permits were immediately available, so this permit will
// (probably) need to wait. We'll need to acquire a lock on the
// wait queue before continuing. We need to do this _before_ the
// CAS that sets the new value of the semaphore's `permits`
// counter. Otherwise, if we subtract the permits and then
// acquire the lock, we might miss additional permits being
// added while waiting for the lock.
lock = Some(self.waiters.lock());
}
match self.permits.compare_exchange(curr, next, AcqRel, Acquire) {
Ok(_) => {
acquired += acq;
if remaining == 0 {
if !queued {
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
permits = acquired,
permits.op = "sub",
);
tracing::trace!(
target: "runtime::resource::async_op::state_update",
permits_obtained = acquired,
permits.op = "add",
)
});
return Poll::Ready(Ok(()));
} else if lock.is_none() {
break self.waiters.lock();
}
}
break lock.expect("lock must be acquired before waiting");
}
Err(actual) => curr = actual,
}
};
if waiters.closed {
return Poll::Ready(Err(AcquireError::closed()));
}
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
permits = acquired,
permits.op = "sub",
)
});
if node.assign_permits(&mut acquired) {
self.add_permits_locked(acquired, waiters);
return Poll::Ready(Ok(()));
}
assert_eq!(acquired, 0);
let mut old_waker = None;
// Otherwise, register the waker & enqueue the node.
node.waker.with_mut(|waker| {
// Safety: the wait list is locked, so we may modify the waker.
let waker = unsafe { &mut *waker };
// Do we need to register the new waker?
if waker
.as_ref()
.map_or(true, |waker| !waker.will_wake(cx.waker()))
{
old_waker = waker.replace(cx.waker().clone());
}
});
// If the waiter is not already in the wait queue, enqueue it.
if !queued {
let node = unsafe {
let node = Pin::into_inner_unchecked(node) as *mut _;
NonNull::new_unchecked(node)
};
waiters.queue.push_front(node);
}
drop(waiters);
drop(old_waker);
Poll::Pending
}
}
impl fmt::Debug for Semaphore {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Semaphore")
.field("permits", &self.available_permits())
.finish()
}
}
impl Waiter {
fn new(
num_permits: usize,
#[cfg(all(tokio_unstable, feature = "tracing"))] ctx: trace::AsyncOpTracingCtx,
) -> Self {
Waiter {
waker: UnsafeCell::new(None),
state: AtomicUsize::new(num_permits),
pointers: linked_list::Pointers::new(),
#[cfg(all(tokio_unstable, feature = "tracing"))]
ctx,
_p: PhantomPinned,
}
}
/// Assign permits to the waiter.
///
/// Returns `true` if the waiter should be removed from the queue
fn assign_permits(&self, n: &mut usize) -> bool {
let mut curr = self.state.load(Acquire);
loop {
let assign = cmp::min(curr, *n);
let next = curr - assign;
match self.state.compare_exchange(curr, next, AcqRel, Acquire) {
Ok(_) => {
*n -= assign;
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.ctx.async_op_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::async_op::state_update",
permits_obtained = assign,
permits.op = "add",
);
});
return next == 0;
}
Err(actual) => curr = actual,
}
}
}
}
impl Future for Acquire<'_> {
type Output = Result<(), AcquireError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
ready!(crate::trace::trace_leaf(cx));
#[cfg(all(tokio_unstable, feature = "tracing"))]
let _resource_span = self.node.ctx.resource_span.clone().entered();
#[cfg(all(tokio_unstable, feature = "tracing"))]
let _async_op_span = self.node.ctx.async_op_span.clone().entered();
#[cfg(all(tokio_unstable, feature = "tracing"))]
let _async_op_poll_span = self.node.ctx.async_op_poll_span.clone().entered();
let (node, semaphore, needed, queued) = self.project();
// First, ensure the current task has enough budget to proceed.
#[cfg(all(tokio_unstable, feature = "tracing"))]
let coop = ready!(trace_poll_op!(
"poll_acquire",
crate::task::coop::poll_proceed(cx),
));
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let coop = ready!(crate::task::coop::poll_proceed(cx));
let result = match semaphore.poll_acquire(cx, needed, node, *queued) {
Poll::Pending => {
*queued = true;
Poll::Pending
}
Poll::Ready(r) => {
coop.made_progress();
r?;
*queued = false;
Poll::Ready(Ok(()))
}
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
return trace_poll_op!("poll_acquire", result);
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
return result;
}
}
impl<'a> Acquire<'a> {
fn new(semaphore: &'a Semaphore, num_permits: usize) -> Self {
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
return Self {
node: Waiter::new(num_permits),
semaphore,
num_permits,
queued: false,
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
return semaphore.resource_span.in_scope(|| {
let async_op_span =
tracing::trace_span!("runtime.resource.async_op", source = "Acquire::new");
let async_op_poll_span = async_op_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::async_op::state_update",
permits_requested = num_permits,
permits.op = "override",
);
tracing::trace!(
target: "runtime::resource::async_op::state_update",
permits_obtained = 0usize,
permits.op = "override",
);
tracing::trace_span!("runtime.resource.async_op.poll")
});
let ctx = trace::AsyncOpTracingCtx {
async_op_span,
async_op_poll_span,
resource_span: semaphore.resource_span.clone(),
};
Self {
node: Waiter::new(num_permits, ctx),
semaphore,
num_permits,
queued: false,
}
});
}
fn project(self: Pin<&mut Self>) -> (Pin<&mut Waiter>, &Semaphore, usize, &mut bool) {
fn is_unpin<T: Unpin>() {}
unsafe {
// Safety: all fields other than `node` are `Unpin`
is_unpin::<&Semaphore>();
is_unpin::<&mut bool>();
is_unpin::<usize>();
let this = self.get_unchecked_mut();
(
Pin::new_unchecked(&mut this.node),
this.semaphore,
this.num_permits,
&mut this.queued,
)
}
}
}
impl Drop for Acquire<'_> {
fn drop(&mut self) {
// If the future is completed, there is no node in the wait list, so we
// can skip acquiring the lock.
if !self.queued {
return;
}
// This is where we ensure safety. The future is being dropped,
// which means we must ensure that the waiter entry is no longer stored
// in the linked list.
let mut waiters = self.semaphore.waiters.lock();
// remove the entry from the list
let node = NonNull::from(&mut self.node);
// Safety: we have locked the wait list.
unsafe { waiters.queue.remove(node) };
let acquired_permits = self.num_permits - self.node.state.load(Acquire);
if acquired_permits > 0 {
self.semaphore.add_permits_locked(acquired_permits, waiters);
}
}
}
// Safety: the `Acquire` future is not `Sync` automatically because it contains
// a `Waiter`, which, in turn, contains an `UnsafeCell`. However, the
// `UnsafeCell` is only accessed when the future is borrowed mutably (either in
// `poll` or in `drop`). Therefore, it is safe (although not particularly
// _useful_) for the future to be borrowed immutably across threads.
unsafe impl Sync for Acquire<'_> {}
// ===== impl AcquireError ====
impl AcquireError {
fn closed() -> AcquireError {
AcquireError(())
}
}
impl fmt::Display for AcquireError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "semaphore closed")
}
}
impl std::error::Error for AcquireError {}
// ===== impl TryAcquireError =====
impl TryAcquireError {
/// Returns `true` if the error was caused by a closed semaphore.
#[allow(dead_code)] // may be used later!
pub(crate) fn is_closed(&self) -> bool {
matches!(self, TryAcquireError::Closed)
}
/// Returns `true` if the error was caused by calling `try_acquire` on a
/// semaphore with no available permits.
#[allow(dead_code)] // may be used later!
pub(crate) fn is_no_permits(&self) -> bool {
matches!(self, TryAcquireError::NoPermits)
}
}
impl fmt::Display for TryAcquireError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TryAcquireError::Closed => write!(fmt, "semaphore closed"),
TryAcquireError::NoPermits => write!(fmt, "no permits available"),
}
}
}
impl std::error::Error for TryAcquireError {}
/// # Safety
///
/// `Waiter` is forced to be !Unpin.
unsafe impl linked_list::Link for Waiter {
type Handle = NonNull<Waiter>;
type Target = Waiter;
fn as_raw(handle: &Self::Handle) -> NonNull<Waiter> {
*handle
}
unsafe fn from_raw(ptr: NonNull<Waiter>) -> NonNull<Waiter> {
ptr
}
unsafe fn pointers(target: NonNull<Waiter>) -> NonNull<linked_list::Pointers<Waiter>> {
unsafe { Waiter::addr_of_pointers(target) }
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mod.rs | tokio/src/sync/mod.rs | #![cfg_attr(loom, allow(dead_code, unreachable_pub, unused_imports))]
//! Synchronization primitives for use in asynchronous contexts.
//!
//! Tokio programs tend to be organized as a set of [tasks] where each task
//! operates independently and may be executed on separate physical threads. The
//! synchronization primitives provided in this module permit these independent
//! tasks to communicate together.
//!
//! [tasks]: crate::task
//!
//! # Message passing
//!
//! The most common form of synchronization in a Tokio program is message
//! passing. Two tasks operate independently and send messages to each other to
//! synchronize. Doing so has the advantage of avoiding shared state.
//!
//! Message passing is implemented using channels. A channel supports sending a
//! message from one producer task to one or more consumer tasks. There are a
//! few flavors of channels provided by Tokio. Each channel flavor supports
//! different message passing patterns. When a channel supports multiple
//! producers, many separate tasks may **send** messages. When a channel
//! supports multiple consumers, many different separate tasks may **receive**
//! messages.
//!
//! Tokio provides many different channel flavors as different message passing
//! patterns are best handled with different implementations.
//!
//! ## `oneshot` channel
//!
//! The [`oneshot` channel][oneshot] supports sending a **single** value from a
//! single producer to a single consumer. This channel is usually used to send
//! the result of a computation to a waiter.
//!
//! **Example:** using a [`oneshot` channel][oneshot] to receive the result of a
//! computation.
//!
//! ```
//! use tokio::sync::oneshot;
//!
//! async fn some_computation() -> String {
//! "represents the result of the computation".to_string()
//! }
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, rx) = oneshot::channel();
//!
//! tokio::spawn(async move {
//! let res = some_computation().await;
//! tx.send(res).unwrap();
//! });
//!
//! // Do other work while the computation is happening in the background
//!
//! // Wait for the computation result
//! let res = rx.await.unwrap();
//! # }
//! ```
//!
//! Note, if the task produces a computation result as its final
//! action before terminating, the [`JoinHandle`] can be used to
//! receive that value instead of allocating resources for the
//! `oneshot` channel. Awaiting on [`JoinHandle`] returns `Result`. If
//! the task panics, the `Joinhandle` yields `Err` with the panic
//! cause.
//!
//! **Example:**
//!
//! ```
//! async fn some_computation() -> String {
//! "the result of the computation".to_string()
//! }
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let join_handle = tokio::spawn(async move {
//! some_computation().await
//! });
//!
//! // Do other work while the computation is happening in the background
//!
//! // Wait for the computation result
//! let res = join_handle.await.unwrap();
//! # }
//! ```
//!
//! [`JoinHandle`]: crate::task::JoinHandle
//!
//! ## `mpsc` channel
//!
//! The [`mpsc` channel][mpsc] supports sending **many** values from **many**
//! producers to a single consumer. This channel is often used to send work to a
//! task or to receive the result of many computations.
//!
//! This is also the channel you should use if you want to send many messages
//! from a single producer to a single consumer. There is no dedicated spsc
//! channel.
//!
//! **Example:** using an mpsc to incrementally stream the results of a series
//! of computations.
//!
//! ```
//! use tokio::sync::mpsc;
//!
//! async fn some_computation(input: u32) -> String {
//! format!("the result of computation {}", input)
//! }
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, mut rx) = mpsc::channel(100);
//!
//! tokio::spawn(async move {
//! for i in 0..10 {
//! let res = some_computation(i).await;
//! tx.send(res).await.unwrap();
//! }
//! });
//!
//! while let Some(res) = rx.recv().await {
//! println!("got = {}", res);
//! }
//! # }
//! ```
//!
//! The argument to `mpsc::channel` is the channel capacity. This is the maximum
//! number of values that can be stored in the channel pending receipt at any
//! given time. Properly setting this value is key in implementing robust
//! programs as the channel capacity plays a critical part in handling back
//! pressure.
//!
//! A common concurrency pattern for resource management is to spawn a task
//! dedicated to managing that resource and using message passing between other
//! tasks to interact with the resource. The resource may be anything that may
//! not be concurrently used. Some examples include a socket and program state.
//! For example, if multiple tasks need to send data over a single socket, spawn
//! a task to manage the socket and use a channel to synchronize.
//!
//! **Example:** sending data from many tasks over a single socket using message
//! passing.
//!
//! ```no_run
//! # #[cfg(not(target_family = "wasm"))]
//! # {
//! use tokio::io::{self, AsyncWriteExt};
//! use tokio::net::TcpStream;
//! use tokio::sync::mpsc;
//!
//! #[tokio::main]
//! async fn main() -> io::Result<()> {
//! let mut socket = TcpStream::connect("www.example.com:1234").await?;
//! let (tx, mut rx) = mpsc::channel(100);
//!
//! for _ in 0..10 {
//! // Each task needs its own `tx` handle. This is done by cloning the
//! // original handle.
//! let tx = tx.clone();
//!
//! tokio::spawn(async move {
//! tx.send(&b"data to write"[..]).await.unwrap();
//! });
//! }
//!
//! // The `rx` half of the channel returns `None` once **all** `tx` clones
//! // drop. To ensure `None` is returned, drop the handle owned by the
//! // current task. If this `tx` handle is not dropped, there will always
//! // be a single outstanding `tx` handle.
//! drop(tx);
//!
//! while let Some(res) = rx.recv().await {
//! socket.write_all(res).await?;
//! }
//!
//! Ok(())
//! }
//! # }
//! ```
//!
//! The [`mpsc`] and [`oneshot`] channels can be combined to provide a request /
//! response type synchronization pattern with a shared resource. A task is
//! spawned to synchronize a resource and waits on commands received on a
//! [`mpsc`] channel. Each command includes a [`oneshot`] `Sender` on which the
//! result of the command is sent.
//!
//! **Example:** use a task to synchronize a `u64` counter. Each task sends an
//! "fetch and increment" command. The counter value **before** the increment is
//! sent over the provided `oneshot` channel.
//!
//! ```
//! use tokio::sync::{oneshot, mpsc};
//! use Command::Increment;
//!
//! enum Command {
//! Increment,
//! // Other commands can be added here
//! }
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (cmd_tx, mut cmd_rx) = mpsc::channel::<(Command, oneshot::Sender<u64>)>(100);
//!
//! // Spawn a task to manage the counter
//! tokio::spawn(async move {
//! let mut counter: u64 = 0;
//!
//! while let Some((cmd, response)) = cmd_rx.recv().await {
//! match cmd {
//! Increment => {
//! let prev = counter;
//! counter += 1;
//! response.send(prev).unwrap();
//! }
//! }
//! }
//! });
//!
//! let mut join_handles = vec![];
//!
//! // Spawn tasks that will send the increment command.
//! for _ in 0..10 {
//! let cmd_tx = cmd_tx.clone();
//!
//! join_handles.push(tokio::spawn(async move {
//! let (resp_tx, resp_rx) = oneshot::channel();
//!
//! cmd_tx.send((Increment, resp_tx)).await.ok().unwrap();
//! let res = resp_rx.await.unwrap();
//!
//! println!("previous value = {}", res);
//! }));
//! }
//!
//! // Wait for all tasks to complete
//! for join_handle in join_handles.drain(..) {
//! join_handle.await.unwrap();
//! }
//! # }
//! ```
//!
//! ## `broadcast` channel
//!
//! The [`broadcast` channel] supports sending **many** values from
//! **many** producers to **many** consumers. Each consumer will receive
//! **each** value. This channel can be used to implement "fan out" style
//! patterns common with pub / sub or "chat" systems.
//!
//! This channel tends to be used less often than `oneshot` and `mpsc` but still
//! has its use cases.
//!
//! This is also the channel you should use if you want to broadcast values from
//! a single producer to many consumers. There is no dedicated spmc broadcast
//! channel.
//!
//! Basic usage
//!
//! ```
//! use tokio::sync::broadcast;
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! let (tx, mut rx1) = broadcast::channel(16);
//! let mut rx2 = tx.subscribe();
//!
//! tokio::spawn(async move {
//! assert_eq!(rx1.recv().await.unwrap(), 10);
//! assert_eq!(rx1.recv().await.unwrap(), 20);
//! });
//!
//! tokio::spawn(async move {
//! assert_eq!(rx2.recv().await.unwrap(), 10);
//! assert_eq!(rx2.recv().await.unwrap(), 20);
//! });
//!
//! tx.send(10).unwrap();
//! tx.send(20).unwrap();
//! # }
//! ```
//!
//! [`broadcast` channel]: crate::sync::broadcast
//!
//! ## `watch` channel
//!
//! The [`watch` channel] supports sending **many** values from **many**
//! producers to **many** consumers. However, only the **most recent** value is
//! stored in the channel. Consumers are notified when a new value is sent, but
//! there is no guarantee that consumers will see **all** values.
//!
//! The [`watch` channel] is similar to a [`broadcast` channel] with capacity 1.
//!
//! Use cases for the [`watch` channel] include broadcasting configuration
//! changes or signalling program state changes, such as transitioning to
//! shutdown.
//!
//! **Example:** use a [`watch` channel] to notify tasks of configuration
//! changes. In this example, a configuration file is checked periodically. When
//! the file changes, the configuration changes are signalled to consumers.
//!
//! ```
//! use tokio::sync::watch;
//! use tokio::time::{self, Duration, Instant};
//!
//! use std::io;
//!
//! #[derive(Debug, Clone, Eq, PartialEq)]
//! struct Config {
//! timeout: Duration,
//! }
//!
//! impl Config {
//! async fn load_from_file() -> io::Result<Config> {
//! // file loading and deserialization logic here
//! # Ok(Config { timeout: Duration::from_secs(1) })
//! }
//! }
//!
//! async fn my_async_operation() {
//! // Do something here
//! }
//!
//! # #[tokio::main(flavor = "current_thread")]
//! # async fn main() {
//! // Load initial configuration value
//! let mut config = Config::load_from_file().await.unwrap();
//!
//! // Create the watch channel, initialized with the loaded configuration
//! let (tx, rx) = watch::channel(config.clone());
//!
//! // Spawn a task to monitor the file.
//! tokio::spawn(async move {
//! loop {
//! // Wait 10 seconds between checks
//! time::sleep(Duration::from_secs(10)).await;
//!
//! // Load the configuration file
//! let new_config = Config::load_from_file().await.unwrap();
//!
//! // If the configuration changed, send the new config value
//! // on the watch channel.
//! if new_config != config {
//! tx.send(new_config.clone()).unwrap();
//! config = new_config;
//! }
//! }
//! });
//!
//! let mut handles = vec![];
//!
//! // Spawn tasks that runs the async operation for at most `timeout`. If
//! // the timeout elapses, restart the operation.
//! //
//! // The task simultaneously watches the `Config` for changes. When the
//! // timeout duration changes, the timeout is updated without restarting
//! // the in-flight operation.
//! for _ in 0..5 {
//! // Clone a config watch handle for use in this task
//! let mut rx = rx.clone();
//!
//! let handle = tokio::spawn(async move {
//! // Start the initial operation and pin the future to the stack.
//! // Pinning to the stack is required to resume the operation
//! // across multiple calls to `select!`
//! let op = my_async_operation();
//! tokio::pin!(op);
//!
//! // Get the initial config value
//! let mut conf = rx.borrow().clone();
//!
//! let mut op_start = Instant::now();
//! let sleep = time::sleep_until(op_start + conf.timeout);
//! tokio::pin!(sleep);
//!
//! loop {
//! tokio::select! {
//! _ = &mut sleep => {
//! // The operation elapsed. Restart it
//! op.set(my_async_operation());
//!
//! // Track the new start time
//! op_start = Instant::now();
//!
//! // Restart the timeout
//! sleep.set(time::sleep_until(op_start + conf.timeout));
//! }
//! _ = rx.changed() => {
//! conf = rx.borrow_and_update().clone();
//!
//! // The configuration has been updated. Update the
//! // `sleep` using the new `timeout` value.
//! sleep.as_mut().reset(op_start + conf.timeout);
//! }
//! _ = &mut op => {
//! // The operation completed!
//! return
//! }
//! }
//! }
//! });
//!
//! handles.push(handle);
//! }
//!
//! for handle in handles.drain(..) {
//! handle.await.unwrap();
//! }
//! # }
//! ```
//!
//! [`watch` channel]: mod@crate::sync::watch
//! [`broadcast` channel]: mod@crate::sync::broadcast
//!
//! # State synchronization
//!
//! The remaining synchronization primitives focus on synchronizing state.
//! These are asynchronous equivalents to versions provided by `std`. They
//! operate in a similar way as their `std` counterparts but will wait
//! asynchronously instead of blocking the thread.
//!
//! * [`Barrier`] Ensures multiple tasks will wait for each other to reach a
//! point in the program, before continuing execution all together.
//!
//! * [`Mutex`] Mutual Exclusion mechanism, which ensures that at most one
//! thread at a time is able to access some data.
//!
//! * [`Notify`] Basic task notification. `Notify` supports notifying a
//! receiving task without sending data. In this case, the task wakes up and
//! resumes processing.
//!
//! * [`RwLock`] Provides a mutual exclusion mechanism which allows multiple
//! readers at the same time, while allowing only one writer at a time. In
//! some cases, this can be more efficient than a mutex.
//!
//! * [`Semaphore`] Limits the amount of concurrency. A semaphore holds a
//! number of permits, which tasks may request in order to enter a critical
//! section. Semaphores are useful for implementing limiting or bounding of
//! any kind.
//!
//! # Runtime compatibility
//!
//! All synchronization primitives provided in this module are runtime agnostic.
//! You can freely move them between different instances of the Tokio runtime
//! or even use them from non-Tokio runtimes.
//!
//! When used in a Tokio runtime, the synchronization primitives participate in
//! [cooperative scheduling](crate::task::coop#cooperative-scheduling) to avoid
//! starvation. This feature does not apply when used from non-Tokio runtimes.
//!
//! As an exception, methods ending in `_timeout` are not runtime agnostic
//! because they require access to the Tokio timer. See the documentation of
//! each `*_timeout` method for more information on its use.
cfg_sync! {
/// Named future types.
pub mod futures {
pub use super::notify::{Notified, OwnedNotified};
}
mod barrier;
pub use barrier::{Barrier, BarrierWaitResult};
pub mod broadcast;
pub mod mpsc;
mod mutex;
pub use mutex::{Mutex, MutexGuard, TryLockError, OwnedMutexGuard, MappedMutexGuard, OwnedMappedMutexGuard};
pub(crate) mod notify;
pub use notify::Notify;
pub mod oneshot;
pub(crate) mod batch_semaphore;
pub use batch_semaphore::{AcquireError, TryAcquireError};
mod semaphore;
pub use semaphore::{Semaphore, SemaphorePermit, OwnedSemaphorePermit};
mod rwlock;
pub use rwlock::RwLock;
pub use rwlock::owned_read_guard::OwnedRwLockReadGuard;
pub use rwlock::owned_write_guard::OwnedRwLockWriteGuard;
pub use rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
pub use rwlock::read_guard::RwLockReadGuard;
pub use rwlock::write_guard::RwLockWriteGuard;
pub use rwlock::write_guard_mapped::RwLockMappedWriteGuard;
mod task;
pub(crate) use task::AtomicWaker;
mod once_cell;
pub use self::once_cell::{OnceCell, SetError};
mod set_once;
pub use self::set_once::{SetOnce, SetOnceError};
pub mod watch;
}
cfg_not_sync! {
cfg_fs! {
pub(crate) mod batch_semaphore;
mod mutex;
pub(crate) use mutex::Mutex;
}
#[cfg(any(feature = "rt", feature = "signal", all(unix, feature = "process")))]
pub(crate) mod notify;
#[cfg(any(feature = "rt", all(windows, feature = "process")))]
pub(crate) mod oneshot;
cfg_atomic_waker_impl! {
mod task;
pub(crate) use task::AtomicWaker;
}
#[cfg(any(feature = "signal", all(unix, feature = "process")))]
pub(crate) mod watch;
}
/// Unit tests
#[cfg(test)]
mod tests;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/semaphore.rs | tokio/src/sync/semaphore.rs | use super::batch_semaphore as ll; // low level implementation
use super::{AcquireError, TryAcquireError};
#[cfg(all(tokio_unstable, feature = "tracing"))]
use crate::util::trace;
use std::sync::Arc;
/// Counting semaphore performing asynchronous permit acquisition.
///
/// A semaphore maintains a set of permits. Permits are used to synchronize
/// access to a shared resource. A semaphore differs from a mutex in that it
/// can allow more than one concurrent caller to access the shared resource at a
/// time.
///
/// When `acquire` is called and the semaphore has remaining permits, the
/// function immediately returns a permit. However, if no remaining permits are
/// available, `acquire` (asynchronously) waits until an outstanding permit is
/// dropped. At this point, the freed permit is assigned to the caller.
///
/// This `Semaphore` is fair, which means that permits are given out in the order
/// they were requested. This fairness is also applied when `acquire_many` gets
/// involved, so if a call to `acquire_many` at the front of the queue requests
/// more permits than currently available, this can prevent a call to `acquire`
/// from completing, even if the semaphore has enough permits complete the call
/// to `acquire`.
///
/// To use the `Semaphore` in a poll function, you can use the [`PollSemaphore`]
/// utility.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use tokio::sync::{Semaphore, TryAcquireError};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let semaphore = Semaphore::new(3);
///
/// let a_permit = semaphore.acquire().await.unwrap();
/// let two_permits = semaphore.acquire_many(2).await.unwrap();
///
/// assert_eq!(semaphore.available_permits(), 0);
///
/// let permit_attempt = semaphore.try_acquire();
/// assert_eq!(permit_attempt.err(), Some(TryAcquireError::NoPermits));
/// # }
/// ```
///
/// ## Limit the number of simultaneously opened files in your program
///
/// Most operating systems have limits on the number of open file
/// handles. Even in systems without explicit limits, resource constraints
/// implicitly set an upper bound on the number of open files. If your
/// program attempts to open a large number of files and exceeds this
/// limit, it will result in an error.
///
/// This example uses a Semaphore with 100 permits. By acquiring a permit from
/// the Semaphore before accessing a file, you ensure that your program opens
/// no more than 100 files at a time. When trying to open the 101st
/// file, the program will wait until a permit becomes available before
/// proceeding to open another file.
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::io::Result;
/// use tokio::fs::File;
/// use tokio::sync::Semaphore;
/// use tokio::io::AsyncWriteExt;
///
/// static PERMITS: Semaphore = Semaphore::const_new(100);
///
/// async fn write_to_file(message: &[u8]) -> Result<()> {
/// let _permit = PERMITS.acquire().await.unwrap();
/// let mut buffer = File::create("example.txt").await?;
/// buffer.write_all(message).await?;
/// Ok(()) // Permit goes out of scope here, and is available again for acquisition
/// }
/// # }
/// ```
///
/// ## Limit the number of outgoing requests being sent at the same time
///
/// In some scenarios, it might be required to limit the number of outgoing
/// requests being sent in parallel. This could be due to limits of a consumed
/// API or the network resources of the system the application is running on.
///
/// This example uses an `Arc<Semaphore>` with 10 permits. Each task spawned is
/// given a reference to the semaphore by cloning the `Arc<Semaphore>`. Before
/// a task sends a request, it must acquire a permit from the semaphore by
/// calling [`Semaphore::acquire`]. This ensures that at most 10 requests are
/// sent in parallel at any given time. After a task has sent a request, it
/// drops the permit to allow other tasks to send requests.
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::Semaphore;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// // Define maximum number of parallel requests.
/// let semaphore = Arc::new(Semaphore::new(5));
/// // Spawn many tasks that will send requests.
/// let mut jhs = Vec::new();
/// for task_id in 0..50 {
/// let semaphore = semaphore.clone();
/// let jh = tokio::spawn(async move {
/// // Acquire permit before sending request.
/// let _permit = semaphore.acquire().await.unwrap();
/// // Send the request.
/// let response = send_request(task_id).await;
/// // Drop the permit after the request has been sent.
/// drop(_permit);
/// // Handle response.
/// // ...
///
/// response
/// });
/// jhs.push(jh);
/// }
/// // Collect responses from tasks.
/// let mut responses = Vec::new();
/// for jh in jhs {
/// let response = jh.await.unwrap();
/// responses.push(response);
/// }
/// // Process responses.
/// // ...
/// # }
/// # async fn send_request(task_id: usize) {
/// # // Send request.
/// # }
/// ```
///
/// ## Limit the number of incoming requests being handled at the same time
///
/// Similar to limiting the number of simultaneously opened files, network handles
/// are a limited resource. Allowing an unbounded amount of requests to be processed
/// could result in a denial-of-service, among many other issues.
///
/// This example uses an `Arc<Semaphore>` instead of a global variable.
/// To limit the number of requests that can be processed at the time,
/// we acquire a permit for each task before spawning it. Once acquired,
/// a new task is spawned; and once finished, the permit is dropped inside
/// of the task to allow others to spawn. Permits must be acquired via
/// [`Semaphore::acquire_owned`] to be movable across the task boundary.
/// (Since our semaphore is not a global variable — if it was, then `acquire` would be enough.)
///
/// ```no_run
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::sync::Arc;
/// use tokio::sync::Semaphore;
/// use tokio::net::TcpListener;
///
/// #[tokio::main]
/// async fn main() -> std::io::Result<()> {
/// let semaphore = Arc::new(Semaphore::new(3));
/// let listener = TcpListener::bind("127.0.0.1:8080").await?;
///
/// loop {
/// // Acquire permit before accepting the next socket.
/// //
/// // We use `acquire_owned` so that we can move `permit` into
/// // other tasks.
/// let permit = semaphore.clone().acquire_owned().await.unwrap();
/// let (mut socket, _) = listener.accept().await?;
///
/// tokio::spawn(async move {
/// // Do work using the socket.
/// handle_connection(&mut socket).await;
/// // Drop socket while the permit is still live.
/// drop(socket);
/// // Drop the permit, so more tasks can be created.
/// drop(permit);
/// });
/// }
/// }
/// # async fn handle_connection(_socket: &mut tokio::net::TcpStream) {
/// # // Do work
/// # }
/// # }
/// ```
///
/// ## Prevent tests from running in parallel
///
/// By default, Rust runs tests in the same file in parallel. However, in some
/// cases, running two tests in parallel may lead to problems. For example, this
/// can happen when tests use the same database.
///
/// Consider the following scenario:
/// 1. `test_insert`: Inserts a key-value pair into the database, then retrieves
/// the value using the same key to verify the insertion.
/// 2. `test_update`: Inserts a key, then updates the key to a new value and
/// verifies that the value has been accurately updated.
/// 3. `test_others`: A third test that doesn't modify the database state. It
/// can run in parallel with the other tests.
///
/// In this example, `test_insert` and `test_update` need to run in sequence to
/// work, but it doesn't matter which test runs first. We can leverage a
/// semaphore with a single permit to address this challenge.
///
/// ```
/// # use tokio::sync::Mutex;
/// # use std::collections::BTreeMap;
/// # struct Database {
/// # map: Mutex<BTreeMap<String, i32>>,
/// # }
/// # impl Database {
/// # pub const fn setup() -> Database {
/// # Database {
/// # map: Mutex::const_new(BTreeMap::new()),
/// # }
/// # }
/// # pub async fn insert(&self, key: &str, value: i32) {
/// # self.map.lock().await.insert(key.to_string(), value);
/// # }
/// # pub async fn update(&self, key: &str, value: i32) {
/// # self.map.lock().await
/// # .entry(key.to_string())
/// # .and_modify(|origin| *origin = value);
/// # }
/// # pub async fn delete(&self, key: &str) {
/// # self.map.lock().await.remove(key);
/// # }
/// # pub async fn get(&self, key: &str) -> i32 {
/// # *self.map.lock().await.get(key).unwrap()
/// # }
/// # }
/// use tokio::sync::Semaphore;
///
/// // Initialize a static semaphore with only one permit, which is used to
/// // prevent test_insert and test_update from running in parallel.
/// static PERMIT: Semaphore = Semaphore::const_new(1);
///
/// // Initialize the database that will be used by the subsequent tests.
/// static DB: Database = Database::setup();
///
/// #[tokio::test]
/// # async fn fake_test_insert() {}
/// async fn test_insert() {
/// // Acquire permit before proceeding. Since the semaphore has only one permit,
/// // the test will wait if the permit is already acquired by other tests.
/// let permit = PERMIT.acquire().await.unwrap();
///
/// // Do the actual test stuff with database
///
/// // Insert a key-value pair to database
/// let (key, value) = ("name", 0);
/// DB.insert(key, value).await;
///
/// // Verify that the value has been inserted correctly.
/// assert_eq!(DB.get(key).await, value);
///
/// // Undo the insertion, so the database is empty at the end of the test.
/// DB.delete(key).await;
///
/// // Drop permit. This allows the other test to start running.
/// drop(permit);
/// }
///
/// #[tokio::test]
/// # async fn fake_test_update() {}
/// async fn test_update() {
/// // Acquire permit before proceeding. Since the semaphore has only one permit,
/// // the test will wait if the permit is already acquired by other tests.
/// let permit = PERMIT.acquire().await.unwrap();
///
/// // Do the same insert.
/// let (key, value) = ("name", 0);
/// DB.insert(key, value).await;
///
/// // Update the existing value with a new one.
/// let new_value = 1;
/// DB.update(key, new_value).await;
///
/// // Verify that the value has been updated correctly.
/// assert_eq!(DB.get(key).await, new_value);
///
/// // Undo any modificattion.
/// DB.delete(key).await;
///
/// // Drop permit. This allows the other test to start running.
/// drop(permit);
/// }
///
/// #[tokio::test]
/// # async fn fake_test_others() {}
/// async fn test_others() {
/// // This test can run in parallel with test_insert and test_update,
/// // so it does not use PERMIT.
/// }
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// # test_insert().await;
/// # test_update().await;
/// # test_others().await;
/// # }
/// ```
///
/// ## Rate limiting using a token bucket
///
/// This example showcases the [`add_permits`] and [`SemaphorePermit::forget`] methods.
///
/// Many applications and systems have constraints on the rate at which certain
/// operations should occur. Exceeding this rate can result in suboptimal
/// performance or even errors.
///
/// This example implements rate limiting using a [token bucket]. A token bucket is a form of rate
/// limiting that doesn't kick in immediately, to allow for short bursts of incoming requests that
/// arrive at the same time.
///
/// With a token bucket, each incoming request consumes a token, and the tokens are refilled at a
/// certain rate that defines the rate limit. When a burst of requests arrives, tokens are
/// immediately given out until the bucket is empty. Once the bucket is empty, requests will have to
/// wait for new tokens to be added.
///
/// Unlike the example that limits how many requests can be handled at the same time, we do not add
/// tokens back when we finish handling a request. Instead, tokens are added only by a timer task.
///
/// Note that this implementation is suboptimal when the duration is small, because it consumes a
/// lot of cpu constantly looping and sleeping.
///
/// [token bucket]: https://en.wikipedia.org/wiki/Token_bucket
/// [`add_permits`]: crate::sync::Semaphore::add_permits
/// [`SemaphorePermit::forget`]: crate::sync::SemaphorePermit::forget
/// ```
/// use std::sync::Arc;
/// use tokio::sync::Semaphore;
/// use tokio::time::{interval, Duration};
///
/// struct TokenBucket {
/// sem: Arc<Semaphore>,
/// jh: tokio::task::JoinHandle<()>,
/// }
///
/// impl TokenBucket {
/// fn new(duration: Duration, capacity: usize) -> Self {
/// let sem = Arc::new(Semaphore::new(capacity));
///
/// // refills the tokens at the end of each interval
/// let jh = tokio::spawn({
/// let sem = sem.clone();
/// let mut interval = interval(duration);
/// interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
///
/// async move {
/// loop {
/// interval.tick().await;
///
/// if sem.available_permits() < capacity {
/// sem.add_permits(1);
/// }
/// }
/// }
/// });
///
/// Self { jh, sem }
/// }
///
/// async fn acquire(&self) {
/// // This can return an error if the semaphore is closed, but we
/// // never close it, so this error can never happen.
/// let permit = self.sem.acquire().await.unwrap();
/// // To avoid releasing the permit back to the semaphore, we use
/// // the `SemaphorePermit::forget` method.
/// permit.forget();
/// }
/// }
///
/// impl Drop for TokenBucket {
/// fn drop(&mut self) {
/// // Kill the background task so it stops taking up resources when we
/// // don't need it anymore.
/// self.jh.abort();
/// }
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn _hidden() {}
/// # #[tokio::main(flavor = "current_thread", start_paused = true)]
/// # async fn main() {
/// let capacity = 5;
/// let update_interval = Duration::from_secs_f32(1.0 / capacity as f32);
/// let bucket = TokenBucket::new(update_interval, capacity);
///
/// for _ in 0..5 {
/// bucket.acquire().await;
///
/// // do the operation
/// }
/// # }
/// ```
///
/// [`PollSemaphore`]: https://docs.rs/tokio-util/latest/tokio_util/sync/struct.PollSemaphore.html
/// [`Semaphore::acquire_owned`]: crate::sync::Semaphore::acquire_owned
#[derive(Debug)]
pub struct Semaphore {
/// The low level semaphore
ll_sem: ll::Semaphore,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
}
/// A permit from the semaphore.
///
/// This type is created by the [`acquire`] method.
///
/// [`acquire`]: crate::sync::Semaphore::acquire()
#[must_use]
#[clippy::has_significant_drop]
#[derive(Debug)]
pub struct SemaphorePermit<'a> {
sem: &'a Semaphore,
permits: u32,
}
/// An owned permit from the semaphore.
///
/// This type is created by the [`acquire_owned`] method.
///
/// [`acquire_owned`]: crate::sync::Semaphore::acquire_owned()
#[must_use]
#[clippy::has_significant_drop]
#[derive(Debug)]
pub struct OwnedSemaphorePermit {
sem: Arc<Semaphore>,
permits: u32,
}
#[test]
#[cfg(not(loom))]
fn bounds() {
fn check_unpin<T: Unpin>() {}
// This has to take a value, since the async fn's return type is unnameable.
fn check_send_sync_val<T: Send + Sync>(_t: T) {}
fn check_send_sync<T: Send + Sync>() {}
check_unpin::<Semaphore>();
check_unpin::<SemaphorePermit<'_>>();
check_send_sync::<Semaphore>();
let semaphore = Semaphore::new(0);
check_send_sync_val(semaphore.acquire());
}
impl Semaphore {
/// The maximum number of permits which a semaphore can hold. It is `usize::MAX >> 3`.
///
/// Exceeding this limit typically results in a panic.
pub const MAX_PERMITS: usize = super::batch_semaphore::Semaphore::MAX_PERMITS;
/// Creates a new semaphore with the initial number of permits.
///
/// Panics if `permits` exceeds [`Semaphore::MAX_PERMITS`].
#[track_caller]
pub fn new(permits: usize) -> Self {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let resource_span = {
let location = std::panic::Location::caller();
tracing::trace_span!(
parent: None,
"runtime.resource",
concrete_type = "Semaphore",
kind = "Sync",
loc.file = location.file(),
loc.line = location.line(),
loc.col = location.column(),
inherits_child_attrs = true,
)
};
#[cfg(all(tokio_unstable, feature = "tracing"))]
let ll_sem = resource_span.in_scope(|| ll::Semaphore::new(permits));
#[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
let ll_sem = ll::Semaphore::new(permits);
Self {
ll_sem,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span,
}
}
/// Creates a new semaphore with the initial number of permits.
///
/// When using the `tracing` [unstable feature], a `Semaphore` created with
/// `const_new` will not be instrumented. As such, it will not be visible
/// in [`tokio-console`]. Instead, [`Semaphore::new`] should be used to
/// create an instrumented object if that is needed.
///
/// # Examples
///
/// ```
/// use tokio::sync::Semaphore;
///
/// static SEM: Semaphore = Semaphore::const_new(10);
/// ```
///
/// [`tokio-console`]: https://github.com/tokio-rs/console
/// [unstable feature]: crate#unstable-features
#[cfg(not(all(loom, test)))]
pub const fn const_new(permits: usize) -> Self {
Self {
ll_sem: ll::Semaphore::const_new(permits),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Creates a new closed semaphore with 0 permits.
pub(crate) fn new_closed() -> Self {
Self {
ll_sem: ll::Semaphore::new_closed(),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Creates a new closed semaphore with 0 permits.
#[cfg(not(all(loom, test)))]
pub(crate) const fn const_new_closed() -> Self {
Self {
ll_sem: ll::Semaphore::const_new_closed(),
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span::none(),
}
}
/// Returns the current number of available permits.
pub fn available_permits(&self) -> usize {
self.ll_sem.available_permits()
}
/// Adds `n` new permits to the semaphore.
///
/// The maximum number of permits is [`Semaphore::MAX_PERMITS`], and this function will panic if the limit is exceeded.
pub fn add_permits(&self, n: usize) {
self.ll_sem.release(n);
}
/// Decrease a semaphore's permits by a maximum of `n`.
///
/// If there are insufficient permits and it's not possible to reduce by `n`,
/// return the number of permits that were actually reduced.
pub fn forget_permits(&self, n: usize) -> usize {
self.ll_sem.forget_permits(n)
}
/// Acquires a permit from the semaphore.
///
/// If the semaphore has been closed, this returns an [`AcquireError`].
/// Otherwise, this returns a [`SemaphorePermit`] representing the
/// acquired permit.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute permits in the order they
/// were requested. Cancelling a call to `acquire` makes you lose your place
/// in the queue.
///
/// # Examples
///
/// ```
/// use tokio::sync::Semaphore;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let semaphore = Semaphore::new(2);
///
/// let permit_1 = semaphore.acquire().await.unwrap();
/// assert_eq!(semaphore.available_permits(), 1);
///
/// let permit_2 = semaphore.acquire().await.unwrap();
/// assert_eq!(semaphore.available_permits(), 0);
///
/// drop(permit_1);
/// assert_eq!(semaphore.available_permits(), 1);
/// # }
/// ```
///
/// [`AcquireError`]: crate::sync::AcquireError
/// [`SemaphorePermit`]: crate::sync::SemaphorePermit
pub async fn acquire(&self) -> Result<SemaphorePermit<'_>, AcquireError> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let inner = trace::async_op(
|| self.ll_sem.acquire(1),
self.resource_span.clone(),
"Semaphore::acquire",
"poll",
true,
);
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let inner = self.ll_sem.acquire(1);
inner.await?;
Ok(SemaphorePermit {
sem: self,
permits: 1,
})
}
/// Acquires `n` permits from the semaphore.
///
/// If the semaphore has been closed, this returns an [`AcquireError`].
/// Otherwise, this returns a [`SemaphorePermit`] representing the
/// acquired permits.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute permits in the order they
/// were requested. Cancelling a call to `acquire_many` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use tokio::sync::Semaphore;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let semaphore = Semaphore::new(5);
///
/// let permit = semaphore.acquire_many(3).await.unwrap();
/// assert_eq!(semaphore.available_permits(), 2);
/// # }
/// ```
///
/// [`AcquireError`]: crate::sync::AcquireError
/// [`SemaphorePermit`]: crate::sync::SemaphorePermit
pub async fn acquire_many(&self, n: u32) -> Result<SemaphorePermit<'_>, AcquireError> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
trace::async_op(
|| self.ll_sem.acquire(n as usize),
self.resource_span.clone(),
"Semaphore::acquire_many",
"poll",
true,
)
.await?;
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
self.ll_sem.acquire(n as usize).await?;
Ok(SemaphorePermit {
sem: self,
permits: n,
})
}
/// Tries to acquire a permit from the semaphore.
///
/// If the semaphore has been closed, this returns a [`TryAcquireError::Closed`]
/// and a [`TryAcquireError::NoPermits`] if there are no permits left. Otherwise,
/// this returns a [`SemaphorePermit`] representing the acquired permits.
///
/// # Examples
///
/// ```
/// use tokio::sync::{Semaphore, TryAcquireError};
///
/// # fn main() {
/// let semaphore = Semaphore::new(2);
///
/// let permit_1 = semaphore.try_acquire().unwrap();
/// assert_eq!(semaphore.available_permits(), 1);
///
/// let permit_2 = semaphore.try_acquire().unwrap();
/// assert_eq!(semaphore.available_permits(), 0);
///
/// let permit_3 = semaphore.try_acquire();
/// assert_eq!(permit_3.err(), Some(TryAcquireError::NoPermits));
/// # }
/// ```
///
/// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed
/// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits
/// [`SemaphorePermit`]: crate::sync::SemaphorePermit
pub fn try_acquire(&self) -> Result<SemaphorePermit<'_>, TryAcquireError> {
match self.ll_sem.try_acquire(1) {
Ok(()) => Ok(SemaphorePermit {
sem: self,
permits: 1,
}),
Err(e) => Err(e),
}
}
/// Tries to acquire `n` permits from the semaphore.
///
/// If the semaphore has been closed, this returns a [`TryAcquireError::Closed`]
/// and a [`TryAcquireError::NoPermits`] if there are not enough permits left.
/// Otherwise, this returns a [`SemaphorePermit`] representing the acquired permits.
///
/// # Examples
///
/// ```
/// use tokio::sync::{Semaphore, TryAcquireError};
///
/// # fn main() {
/// let semaphore = Semaphore::new(4);
///
/// let permit_1 = semaphore.try_acquire_many(3).unwrap();
/// assert_eq!(semaphore.available_permits(), 1);
///
/// let permit_2 = semaphore.try_acquire_many(2);
/// assert_eq!(permit_2.err(), Some(TryAcquireError::NoPermits));
/// # }
/// ```
///
/// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed
/// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits
/// [`SemaphorePermit`]: crate::sync::SemaphorePermit
pub fn try_acquire_many(&self, n: u32) -> Result<SemaphorePermit<'_>, TryAcquireError> {
match self.ll_sem.try_acquire(n as usize) {
Ok(()) => Ok(SemaphorePermit {
sem: self,
permits: n,
}),
Err(e) => Err(e),
}
}
/// Acquires a permit from the semaphore.
///
/// The semaphore must be wrapped in an [`Arc`] to call this method.
/// If the semaphore has been closed, this returns an [`AcquireError`].
/// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the
/// acquired permit.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute permits in the order they
/// were requested. Cancelling a call to `acquire_owned` makes you lose your
/// place in the queue.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::Semaphore;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let semaphore = Arc::new(Semaphore::new(3));
/// let mut join_handles = Vec::new();
///
/// for _ in 0..5 {
/// let permit = semaphore.clone().acquire_owned().await.unwrap();
/// join_handles.push(tokio::spawn(async move {
/// // perform task...
/// // explicitly own `permit` in the task
/// drop(permit);
/// }));
/// }
///
/// for handle in join_handles {
/// handle.await.unwrap();
/// }
/// # }
/// ```
///
/// [`Arc`]: std::sync::Arc
/// [`AcquireError`]: crate::sync::AcquireError
/// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit
pub async fn acquire_owned(self: Arc<Self>) -> Result<OwnedSemaphorePermit, AcquireError> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let inner = trace::async_op(
|| self.ll_sem.acquire(1),
self.resource_span.clone(),
"Semaphore::acquire_owned",
"poll",
true,
);
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let inner = self.ll_sem.acquire(1);
inner.await?;
Ok(OwnedSemaphorePermit {
sem: self,
permits: 1,
})
}
/// Acquires `n` permits from the semaphore.
///
/// The semaphore must be wrapped in an [`Arc`] to call this method.
/// If the semaphore has been closed, this returns an [`AcquireError`].
/// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the
/// acquired permit.
///
/// # Cancel safety
///
/// This method uses a queue to fairly distribute permits in the order they
/// were requested. Cancelling a call to `acquire_many_owned` makes you lose
/// your place in the queue.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::Semaphore;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let semaphore = Arc::new(Semaphore::new(10));
/// let mut join_handles = Vec::new();
///
/// for _ in 0..5 {
/// let permit = semaphore.clone().acquire_many_owned(2).await.unwrap();
/// join_handles.push(tokio::spawn(async move {
/// // perform task...
/// // explicitly own `permit` in the task
/// drop(permit);
/// }));
/// }
///
/// for handle in join_handles {
/// handle.await.unwrap();
/// }
/// # }
/// ```
///
/// [`Arc`]: std::sync::Arc
/// [`AcquireError`]: crate::sync::AcquireError
/// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit
pub async fn acquire_many_owned(
self: Arc<Self>,
n: u32,
) -> Result<OwnedSemaphorePermit, AcquireError> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
let inner = trace::async_op(
|| self.ll_sem.acquire(n as usize),
self.resource_span.clone(),
"Semaphore::acquire_many_owned",
"poll",
true,
);
#[cfg(not(all(tokio_unstable, feature = "tracing")))]
let inner = self.ll_sem.acquire(n as usize);
inner.await?;
Ok(OwnedSemaphorePermit {
sem: self,
permits: n,
})
}
/// Tries to acquire a permit from the semaphore.
///
/// The semaphore must be wrapped in an [`Arc`] to call this method. If
/// the semaphore has been closed, this returns a [`TryAcquireError::Closed`]
/// and a [`TryAcquireError::NoPermits`] if there are no permits left.
/// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the
/// acquired permit.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{Semaphore, TryAcquireError};
///
/// # fn main() {
/// let semaphore = Arc::new(Semaphore::new(2));
///
/// let permit_1 = Arc::clone(&semaphore).try_acquire_owned().unwrap();
/// assert_eq!(semaphore.available_permits(), 1);
///
/// let permit_2 = Arc::clone(&semaphore).try_acquire_owned().unwrap();
/// assert_eq!(semaphore.available_permits(), 0);
///
/// let permit_3 = semaphore.try_acquire_owned();
/// assert_eq!(permit_3.err(), Some(TryAcquireError::NoPermits));
/// # }
/// ```
///
/// [`Arc`]: std::sync::Arc
/// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed
/// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits
/// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit
pub fn try_acquire_owned(self: Arc<Self>) -> Result<OwnedSemaphorePermit, TryAcquireError> {
match self.ll_sem.try_acquire(1) {
Ok(()) => Ok(OwnedSemaphorePermit {
sem: self,
permits: 1,
}),
Err(e) => Err(e),
}
}
/// Tries to acquire `n` permits from the semaphore.
///
/// The semaphore must be wrapped in an [`Arc`] to call this method. If
/// the semaphore has been closed, this returns a [`TryAcquireError::Closed`]
/// and a [`TryAcquireError::NoPermits`] if there are no permits left.
/// Otherwise, this returns a [`OwnedSemaphorePermit`] representing the
/// acquired permit.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{Semaphore, TryAcquireError};
///
/// # fn main() {
/// let semaphore = Arc::new(Semaphore::new(4));
///
/// let permit_1 = Arc::clone(&semaphore).try_acquire_many_owned(3).unwrap();
/// assert_eq!(semaphore.available_permits(), 1);
///
/// let permit_2 = semaphore.try_acquire_many_owned(2);
/// assert_eq!(permit_2.err(), Some(TryAcquireError::NoPermits));
/// # }
/// ```
///
/// [`Arc`]: std::sync::Arc
/// [`TryAcquireError::Closed`]: crate::sync::TryAcquireError::Closed
/// [`TryAcquireError::NoPermits`]: crate::sync::TryAcquireError::NoPermits
/// [`OwnedSemaphorePermit`]: crate::sync::OwnedSemaphorePermit
pub fn try_acquire_many_owned(
self: Arc<Self>,
n: u32,
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock/owned_read_guard.rs | tokio/src/sync/rwlock/owned_read_guard.rs | use crate::sync::rwlock::RwLock;
use std::marker::PhantomData;
use std::sync::Arc;
use std::{fmt, mem, ops, ptr};
/// Owned RAII structure used to release the shared read access of a lock when
/// dropped.
///
/// This structure is created by the [`read_owned`] method on
/// [`RwLock`].
///
/// [`read_owned`]: method@crate::sync::RwLock::read_owned
/// [`RwLock`]: struct@crate::sync::RwLock
#[clippy::has_significant_drop]
pub struct OwnedRwLockReadGuard<T: ?Sized, U: ?Sized = T> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
pub(super) lock: Arc<RwLock<T>>,
pub(super) data: *const U,
pub(super) _p: PhantomData<T>,
}
#[allow(dead_code)] // Unused fields are still used in Drop.
struct Inner<T: ?Sized, U: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
lock: Arc<RwLock<T>>,
data: *const U,
}
impl<T: ?Sized, U: ?Sized> OwnedRwLockReadGuard<T, U> {
fn skip_drop(self) -> Inner<T, U> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the values in every field of the guard, then
// forgets the originals, so in the end no value is duplicated.
unsafe {
Inner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: ptr::read(&me.resource_span),
lock: ptr::read(&me.lock),
data: me.data,
}
}
}
/// Makes a new `OwnedRwLockReadGuard` for a component of the locked data.
/// This operation cannot fail as the `OwnedRwLockReadGuard` passed in
/// already locked the data.
///
/// This is an associated function that needs to be
/// used as `OwnedRwLockReadGuard::map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// let guard = lock.read_owned().await;
/// let guard = OwnedRwLockReadGuard::map(guard, |f| &f.0);
///
/// assert_eq!(1, *guard);
/// # }
/// ```
#[inline]
pub fn map<F, V: ?Sized>(this: Self, f: F) -> OwnedRwLockReadGuard<T, V>
where
F: FnOnce(&U) -> &V,
{
let data = f(&*this) as *const V;
let this = this.skip_drop();
OwnedRwLockReadGuard {
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
}
}
/// Attempts to make a new [`OwnedRwLockReadGuard`] for a component of the
/// locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `OwnedRwLockReadGuard` passed in
/// already locked the data.
///
/// This is an associated function that needs to be used as
/// `OwnedRwLockReadGuard::try_map(..)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// let guard = lock.read_owned().await;
/// let guard = OwnedRwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail");
///
/// assert_eq!(1, *guard);
/// # }
/// ```
#[inline]
pub fn try_map<F, V: ?Sized>(this: Self, f: F) -> Result<OwnedRwLockReadGuard<T, V>, Self>
where
F: FnOnce(&U) -> Option<&V>,
{
let data = match f(&*this) {
Some(data) => data as *const V,
None => return Err(this),
};
let this = this.skip_drop();
Ok(OwnedRwLockReadGuard {
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
})
}
/// Returns a reference to the original `Arc<RwLock>`.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// let guard = lock.clone().read_owned().await;
/// assert!(Arc::ptr_eq(&lock, OwnedRwLockReadGuard::rwlock(&guard)));
///
/// let guard = OwnedRwLockReadGuard::map(guard, |f| &f.0);
/// assert!(Arc::ptr_eq(&lock, OwnedRwLockReadGuard::rwlock(&guard)));
/// # }
/// ```
pub fn rwlock(this: &Self) -> &Arc<RwLock<T>> {
&this.lock
}
}
impl<T: ?Sized, U: ?Sized> ops::Deref for OwnedRwLockReadGuard<T, U> {
type Target = U;
fn deref(&self) -> &U {
unsafe { &*self.data }
}
}
impl<T: ?Sized, U: ?Sized> fmt::Debug for OwnedRwLockReadGuard<T, U>
where
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized, U: ?Sized> fmt::Display for OwnedRwLockReadGuard<T, U>
where
U: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T: ?Sized, U: ?Sized> Drop for OwnedRwLockReadGuard<T, U> {
fn drop(&mut self) {
self.lock.s.release(1);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "sub",
)
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock/owned_write_guard.rs | tokio/src/sync/rwlock/owned_write_guard.rs | use crate::sync::rwlock::owned_read_guard::OwnedRwLockReadGuard;
use crate::sync::rwlock::owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
use crate::sync::rwlock::RwLock;
use std::marker::PhantomData;
use std::sync::Arc;
use std::{fmt, mem, ops, ptr};
/// Owned RAII structure used to release the exclusive write access of a lock when
/// dropped.
///
/// This structure is created by the [`write_owned`] method
/// on [`RwLock`].
///
/// [`write_owned`]: method@crate::sync::RwLock::write_owned
/// [`RwLock`]: struct@crate::sync::RwLock
#[clippy::has_significant_drop]
pub struct OwnedRwLockWriteGuard<T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
pub(super) permits_acquired: u32,
pub(super) lock: Arc<RwLock<T>>,
pub(super) data: *mut T,
pub(super) _p: PhantomData<T>,
}
#[allow(dead_code)] // Unused fields are still used in Drop.
struct Inner<T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
permits_acquired: u32,
lock: Arc<RwLock<T>>,
data: *const T,
}
impl<T: ?Sized> OwnedRwLockWriteGuard<T> {
fn skip_drop(self) -> Inner<T> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the values in every field of the guard, then
// forgets the originals, so in the end no value is duplicated.
unsafe {
Inner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: ptr::read(&me.resource_span),
permits_acquired: me.permits_acquired,
lock: ptr::read(&me.lock),
data: me.data,
}
}
}
/// Makes a new [`OwnedRwLockMappedWriteGuard`] for a component of the locked
/// data.
///
/// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in
/// already locked the data.
///
/// This is an associated function that needs to be used as
/// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods
/// of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// {
/// let lock = Arc::clone(&lock);
/// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0);
/// *mapped = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard<T, U>
where
F: FnOnce(&mut T) -> &mut U,
{
let data = f(&mut *this) as *mut U;
let this = this.skip_drop();
OwnedRwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
}
}
/// Makes a new [`OwnedRwLockReadGuard`] for a component of the locked data.
///
/// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be used as
/// `OwnedRwLockWriteGuard::downgrade_map(..)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
///
/// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a
/// `&mut T` would result in unsoundness, as you could use interior mutability.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// let guard = Arc::clone(&lock).write_owned().await;
/// let mapped = OwnedRwLockWriteGuard::downgrade_map(guard, |f| &f.0);
/// let foo = lock.read_owned().await;
/// assert_eq!(foo.0, *mapped);
/// # }
/// ```
#[inline]
pub fn downgrade_map<F, U: ?Sized>(this: Self, f: F) -> OwnedRwLockReadGuard<T, U>
where
F: FnOnce(&T) -> &U,
{
let data = f(&*this) as *const U;
let this = this.skip_drop();
let guard = OwnedRwLockReadGuard {
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
};
// Release all but one of the permits held by the write guard
let to_release = (this.permits_acquired - 1) as usize;
guard.lock.s.release(to_release);
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
guard
}
/// Attempts to make a new [`OwnedRwLockMappedWriteGuard`] for a component
/// of the locked data. The original guard is returned if the closure
/// returns `None`.
///
/// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in
/// already locked the data.
///
/// This is an associated function that needs to be
/// used as `OwnedRwLockWriteGuard::try_map(...)`. A method would interfere
/// with methods of the same name on the contents of the locked data.
///
/// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// {
/// let guard = Arc::clone(&lock).write_owned().await;
/// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
/// *guard = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn try_map<F, U: ?Sized>(
mut this: Self,
f: F,
) -> Result<OwnedRwLockMappedWriteGuard<T, U>, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
let data = match f(&mut *this) {
Some(data) => data as *mut U,
None => return Err(this),
};
let this = this.skip_drop();
Ok(OwnedRwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
})
}
/// Attempts to make a new [`OwnedRwLockReadGuard`] for a component of
/// the locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `OwnedRwLockWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be
/// used as `OwnedRwLockWriteGuard::try_downgrade_map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a
/// `&mut T` would result in unsoundness, as you could use interior mutability.
///
/// If this function returns `Err(...)`, the lock is never unlocked nor downgraded.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// let guard = Arc::clone(&lock).write_owned().await;
/// let guard = OwnedRwLockWriteGuard::try_downgrade_map(guard, |f| Some(&f.0)).expect("should not fail");
/// let foo = lock.read_owned().await;
/// assert_eq!(foo.0, *guard);
/// # }
/// ```
#[inline]
pub fn try_downgrade_map<F, U: ?Sized>(
this: Self,
f: F,
) -> Result<OwnedRwLockReadGuard<T, U>, Self>
where
F: FnOnce(&T) -> Option<&U>,
{
let data = match f(&*this) {
Some(data) => data as *const U,
None => return Err(this),
};
let this = this.skip_drop();
let guard = OwnedRwLockReadGuard {
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
};
// Release all but one of the permits held by the write guard
let to_release = (this.permits_acquired - 1) as usize;
guard.lock.s.release(to_release);
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
Ok(guard)
}
/// Converts this `OwnedRwLockWriteGuard` into an
/// `OwnedRwLockMappedWriteGuard`. This method can be used to store a
/// non-mapped guard in a struct field that expects a mapped guard.
///
/// This is equivalent to calling `OwnedRwLockWriteGuard::map(guard, |me| me)`.
#[inline]
pub fn into_mapped(this: Self) -> OwnedRwLockMappedWriteGuard<T> {
Self::map(this, |me| me)
}
/// Atomically downgrades a write lock into a read lock without allowing
/// any writers to take exclusive access of the lock in the meantime.
///
/// **Note:** This won't *necessarily* allow any additional readers to acquire
/// locks, since [`RwLock`] is fair and it is possible that a writer is next
/// in line.
///
/// Returns an RAII guard which will drop this read access of the `RwLock`
/// when dropped.
///
/// # Examples
///
/// ```
/// # use tokio::sync::RwLock;
/// # use std::sync::Arc;
/// #
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
///
/// let n = lock.clone().write_owned().await;
///
/// let cloned_lock = lock.clone();
/// let handle = tokio::spawn(async move {
/// *cloned_lock.write_owned().await = 2;
/// });
///
/// let n = n.downgrade();
/// assert_eq!(*n, 1, "downgrade is atomic");
///
/// drop(n);
/// handle.await.unwrap();
/// assert_eq!(*lock.read().await, 2, "second writer obtained write lock");
/// # }
/// ```
pub fn downgrade(self) -> OwnedRwLockReadGuard<T> {
let this = self.skip_drop();
let guard = OwnedRwLockReadGuard {
lock: this.lock,
data: this.data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
};
// Release all but one of the permits held by the write guard
let to_release = (this.permits_acquired - 1) as usize;
guard.lock.s.release(to_release);
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
guard
}
/// Returns a reference to the original `Arc<RwLock>`.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
///
/// let guard = lock.clone().write_owned().await;
/// assert!(Arc::ptr_eq(&lock, OwnedRwLockWriteGuard::rwlock(&guard)));
/// # }
/// ```
pub fn rwlock(this: &Self) -> &Arc<RwLock<T>> {
&this.lock
}
}
impl<T: ?Sized> ops::Deref for OwnedRwLockWriteGuard<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<T: ?Sized> ops::DerefMut for OwnedRwLockWriteGuard<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.data }
}
}
impl<T: ?Sized> fmt::Debug for OwnedRwLockWriteGuard<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized> fmt::Display for OwnedRwLockWriteGuard<T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T: ?Sized> Drop for OwnedRwLockWriteGuard<T> {
fn drop(&mut self) {
self.lock.s.release(self.permits_acquired as usize);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock/read_guard.rs | tokio/src/sync/rwlock/read_guard.rs | use crate::sync::batch_semaphore::Semaphore;
use std::marker::PhantomData;
use std::{fmt, mem, ops};
/// RAII structure used to release the shared read access of a lock when
/// dropped.
///
/// This structure is created by the [`read`] method on
/// [`RwLock`].
///
/// [`read`]: method@crate::sync::RwLock::read
/// [`RwLock`]: struct@crate::sync::RwLock
#[clippy::has_significant_drop]
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockReadGuard<'a, T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
pub(super) s: &'a Semaphore,
pub(super) data: *const T,
pub(super) marker: PhantomData<&'a T>,
}
#[allow(dead_code)] // Unused fields are still used in Drop.
struct Inner<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
s: &'a Semaphore,
data: *const T,
}
impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
fn skip_drop(self) -> Inner<'a, T> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the values in every field of the guard, then
// forgets the originals, so in the end no value is duplicated.
Inner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: unsafe { std::ptr::read(&me.resource_span) },
s: me.s,
data: me.data,
}
}
/// Makes a new `RwLockReadGuard` for a component of the locked data.
///
/// This operation cannot fail as the `RwLockReadGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be
/// used as `RwLockReadGuard::map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockReadGuard::map`] from the
/// [`parking_lot` crate].
///
/// [`RwLockReadGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// let guard = lock.read().await;
/// let guard = RwLockReadGuard::map(guard, |f| &f.0);
///
/// assert_eq!(1, *guard);
/// # }
/// ```
#[inline]
pub fn map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U>
where
F: FnOnce(&T) -> &U,
{
let data = f(&*this) as *const U;
let this = this.skip_drop();
RwLockReadGuard {
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
}
}
/// Attempts to make a new [`RwLockReadGuard`] for a component of the
/// locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `RwLockReadGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be used as
/// `RwLockReadGuard::try_map(..)`. A method would interfere with methods of the
/// same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockReadGuard::try_map`] from the
/// [`parking_lot` crate].
///
/// [`RwLockReadGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockReadGuard.html#method.try_map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockReadGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// let guard = lock.read().await;
/// let guard = RwLockReadGuard::try_map(guard, |f| Some(&f.0)).expect("should not fail");
///
/// assert_eq!(1, *guard);
/// # }
/// ```
#[inline]
pub fn try_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self>
where
F: FnOnce(&T) -> Option<&U>,
{
let data = match f(&*this) {
Some(data) => data as *const U,
None => return Err(this),
};
let this = this.skip_drop();
Ok(RwLockReadGuard {
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
})
}
}
impl<T: ?Sized> ops::Deref for RwLockReadGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<'a, T: ?Sized> fmt::Debug for RwLockReadGuard<'a, T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> fmt::Display for RwLockReadGuard<'a, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Drop for RwLockReadGuard<'a, T> {
fn drop(&mut self) {
self.s.release(1);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "sub",
)
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock/write_guard.rs | tokio/src/sync/rwlock/write_guard.rs | use crate::sync::batch_semaphore::Semaphore;
use crate::sync::rwlock::read_guard::RwLockReadGuard;
use crate::sync::rwlock::write_guard_mapped::RwLockMappedWriteGuard;
use std::marker::PhantomData;
use std::{fmt, mem, ops};
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
///
/// This structure is created by the [`write`] method
/// on [`RwLock`].
///
/// [`write`]: method@crate::sync::RwLock::write
/// [`RwLock`]: struct@crate::sync::RwLock
#[clippy::has_significant_drop]
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockWriteGuard<'a, T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
pub(super) permits_acquired: u32,
pub(super) s: &'a Semaphore,
pub(super) data: *mut T,
pub(super) marker: PhantomData<&'a mut T>,
}
#[allow(dead_code)] // Unused fields are still used in Drop.
struct Inner<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
permits_acquired: u32,
s: &'a Semaphore,
data: *mut T,
}
impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
fn skip_drop(self) -> Inner<'a, T> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the values in every field of the guard, then
// forgets the originals, so in the end no value is duplicated.
Inner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: unsafe { std::ptr::read(&me.resource_span) },
permits_acquired: me.permits_acquired,
s: me.s,
data: me.data,
}
}
/// Makes a new [`RwLockMappedWriteGuard`] for a component of the locked data.
///
/// This operation cannot fail as the `RwLockWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be used as
/// `RwLockWriteGuard::map(..)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockWriteGuard::map`] from the
/// [`parking_lot` crate].
///
/// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
/// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// {
/// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0);
/// *mapped = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U>
where
F: FnOnce(&mut T) -> &mut U,
{
let data = f(&mut *this) as *mut U;
let this = this.skip_drop();
RwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
}
}
/// Makes a new [`RwLockReadGuard`] for a component of the locked data.
///
/// This operation cannot fail as the `RwLockWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be used as
/// `RwLockWriteGuard::downgrade_map(..)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
///
/// This is equivalent to a combination of asynchronous [`RwLockWriteGuard::map`] and [`RwLockWriteGuard::downgrade`]
/// from the [`parking_lot` crate].
///
/// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a
/// `&mut T` would result in unsoundness, as you could use interior mutability.
///
/// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
/// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map
/// [`RwLockWriteGuard::downgrade`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.downgrade
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// let mapped = RwLockWriteGuard::downgrade_map(lock.write().await, |f| &f.0);
/// let foo = lock.read().await;
/// assert_eq!(foo.0, *mapped);
/// # }
/// ```
#[inline]
pub fn downgrade_map<F, U: ?Sized>(this: Self, f: F) -> RwLockReadGuard<'a, U>
where
F: FnOnce(&T) -> &U,
{
let data = f(&*this) as *const U;
let this = this.skip_drop();
let guard = RwLockReadGuard {
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
};
// Release all but one of the permits held by the write guard
let to_release = (this.permits_acquired - 1) as usize;
this.s.release(to_release);
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
guard
}
/// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of
/// the locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `RwLockWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be
/// used as `RwLockWriteGuard::try_map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from
/// the [`parking_lot` crate].
///
/// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
/// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// {
/// let guard = lock.write().await;
/// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
/// *guard = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn try_map<F, U: ?Sized>(
mut this: Self,
f: F,
) -> Result<RwLockMappedWriteGuard<'a, U>, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
let data = match f(&mut *this) {
Some(data) => data as *mut U,
None => return Err(this),
};
let this = this.skip_drop();
Ok(RwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
})
}
/// Attempts to make a new [`RwLockReadGuard`] for a component of
/// the locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `RwLockWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be
/// used as `RwLockWriteGuard::try_downgrade_map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// This is equivalent to a combination of asynchronous [`RwLockWriteGuard::try_map`] and [`RwLockWriteGuard::downgrade`]
/// from the [`parking_lot` crate].
///
/// Inside of `f`, you retain exclusive access to the data, despite only being given a `&T`. Handing out a
/// `&mut T` would result in unsoundness, as you could use interior mutability.
///
/// If this function returns `Err(...)`, the lock is never unlocked nor downgraded.
///
/// [`RwLockMappedWriteGuard`]: struct@crate::sync::RwLockMappedWriteGuard
/// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map
/// [`RwLockWriteGuard::downgrade`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.downgrade
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// let guard = RwLockWriteGuard::try_downgrade_map(lock.write().await, |f| Some(&f.0)).expect("should not fail");
/// let foo = lock.read().await;
/// assert_eq!(foo.0, *guard);
/// # }
/// ```
#[inline]
pub fn try_downgrade_map<F, U: ?Sized>(this: Self, f: F) -> Result<RwLockReadGuard<'a, U>, Self>
where
F: FnOnce(&T) -> Option<&U>,
{
let data = match f(&*this) {
Some(data) => data as *const U,
None => return Err(this),
};
let this = this.skip_drop();
let guard = RwLockReadGuard {
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
};
// Release all but one of the permits held by the write guard
let to_release = (this.permits_acquired - 1) as usize;
this.s.release(to_release);
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
Ok(guard)
}
/// Converts this `RwLockWriteGuard` into an `RwLockMappedWriteGuard`. This
/// method can be used to store a non-mapped guard in a struct field that
/// expects a mapped guard.
///
/// This is equivalent to calling `RwLockWriteGuard::map(guard, |me| me)`.
#[inline]
pub fn into_mapped(this: Self) -> RwLockMappedWriteGuard<'a, T> {
RwLockWriteGuard::map(this, |me| me)
}
/// Atomically downgrades a write lock into a read lock without allowing
/// any writers to take exclusive access of the lock in the meantime.
///
/// **Note:** This won't *necessarily* allow any additional readers to acquire
/// locks, since [`RwLock`] is fair and it is possible that a writer is next
/// in line.
///
/// Returns an RAII guard which will drop this read access of the `RwLock`
/// when dropped.
///
/// # Examples
///
/// ```
/// # use tokio::sync::RwLock;
/// # use std::sync::Arc;
/// #
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
///
/// let n = lock.write().await;
///
/// let cloned_lock = lock.clone();
/// let handle = tokio::spawn(async move {
/// *cloned_lock.write().await = 2;
/// });
///
/// let n = n.downgrade();
/// assert_eq!(*n, 1, "downgrade is atomic");
///
/// drop(n);
/// handle.await.unwrap();
/// assert_eq!(*lock.read().await, 2, "second writer obtained write lock");
/// # }
/// ```
///
/// [`RwLock`]: struct@crate::sync::RwLock
pub fn downgrade(self) -> RwLockReadGuard<'a, T> {
let this = self.skip_drop();
let guard = RwLockReadGuard {
s: this.s,
data: this.data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
};
// Release all but one of the permits held by the write guard
let to_release = (this.permits_acquired - 1) as usize;
this.s.release(to_release);
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
#[cfg(all(tokio_unstable, feature = "tracing"))]
guard.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
current_readers = 1,
current_readers.op = "add",
)
});
guard
}
}
impl<T: ?Sized> ops::Deref for RwLockWriteGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<T: ?Sized> ops::DerefMut for RwLockWriteGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.data }
}
}
impl<'a, T: ?Sized> fmt::Debug for RwLockWriteGuard<'a, T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> fmt::Display for RwLockWriteGuard<'a, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Drop for RwLockWriteGuard<'a, T> {
fn drop(&mut self) {
self.s.release(self.permits_acquired as usize);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock/write_guard_mapped.rs | tokio/src/sync/rwlock/write_guard_mapped.rs | use crate::sync::batch_semaphore::Semaphore;
use std::marker::PhantomData;
use std::{fmt, mem, ops};
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
///
/// This structure is created by [mapping] an [`RwLockWriteGuard`]. It is a
/// separate type from `RwLockWriteGuard` to disallow downgrading a mapped
/// guard, since doing so can cause undefined behavior.
///
/// [mapping]: method@crate::sync::RwLockWriteGuard::map
/// [`RwLockWriteGuard`]: struct@crate::sync::RwLockWriteGuard
#[clippy::has_significant_drop]
pub struct RwLockMappedWriteGuard<'a, T: ?Sized> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
pub(super) permits_acquired: u32,
pub(super) s: &'a Semaphore,
pub(super) data: *mut T,
pub(super) marker: PhantomData<&'a mut T>,
}
#[allow(dead_code)] // Unused fields are still used in Drop.
struct Inner<'a, T: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
permits_acquired: u32,
s: &'a Semaphore,
data: *mut T,
}
impl<'a, T: ?Sized> RwLockMappedWriteGuard<'a, T> {
fn skip_drop(self) -> Inner<'a, T> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the values in every field of the guard, then
// forgets the originals, so in the end no value is duplicated.
Inner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: unsafe { std::ptr::read(&me.resource_span) },
permits_acquired: me.permits_acquired,
s: me.s,
data: me.data,
}
}
/// Makes a new `RwLockMappedWriteGuard` for a component of the locked data.
///
/// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be used as
/// `RwLockMappedWriteGuard::map(..)`. A method would interfere with methods
/// of the same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockWriteGuard::map`] from the
/// [`parking_lot` crate].
///
/// [`RwLockWriteGuard::map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// {
/// let mut mapped = RwLockWriteGuard::map(lock.write().await, |f| &mut f.0);
/// *mapped = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn map<F, U: ?Sized>(mut this: Self, f: F) -> RwLockMappedWriteGuard<'a, U>
where
F: FnOnce(&mut T) -> &mut U,
{
let data = f(&mut *this) as *mut U;
let this = this.skip_drop();
RwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
}
}
/// Attempts to make a new [`RwLockMappedWriteGuard`] for a component of
/// the locked data. The original guard is returned if the closure returns
/// `None`.
///
/// This operation cannot fail as the `RwLockMappedWriteGuard` passed in already
/// locked the data.
///
/// This is an associated function that needs to be
/// used as `RwLockMappedWriteGuard::try_map(...)`. A method would interfere
/// with methods of the same name on the contents of the locked data.
///
/// This is an asynchronous version of [`RwLockWriteGuard::try_map`] from
/// the [`parking_lot` crate].
///
/// [`RwLockWriteGuard::try_map`]: https://docs.rs/lock_api/latest/lock_api/struct.RwLockWriteGuard.html#method.try_map
/// [`parking_lot` crate]: https://crates.io/crates/parking_lot
///
/// # Examples
///
/// ```
/// use tokio::sync::{RwLock, RwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = RwLock::new(Foo(1));
///
/// {
/// let guard = lock.write().await;
/// let mut guard = RwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
/// *guard = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn try_map<F, U: ?Sized>(
mut this: Self,
f: F,
) -> Result<RwLockMappedWriteGuard<'a, U>, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
let data = match f(&mut *this) {
Some(data) => data as *mut U,
None => return Err(this),
};
let this = this.skip_drop();
Ok(RwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
s: this.s,
data,
marker: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
})
}
// Note: No `downgrade`, `downgrade_map` nor `try_downgrade_map` because they would be unsound, as we're already
// potentially been mapped with internal mutability.
}
impl<T: ?Sized> ops::Deref for RwLockMappedWriteGuard<'_, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<T: ?Sized> ops::DerefMut for RwLockMappedWriteGuard<'_, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.data }
}
}
impl<'a, T: ?Sized> fmt::Debug for RwLockMappedWriteGuard<'a, T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> fmt::Display for RwLockMappedWriteGuard<'a, T>
where
T: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<'a, T: ?Sized> Drop for RwLockMappedWriteGuard<'a, T> {
fn drop(&mut self) {
self.s.release(self.permits_acquired as usize);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/rwlock/owned_write_guard_mapped.rs | tokio/src/sync/rwlock/owned_write_guard_mapped.rs | use crate::sync::rwlock::RwLock;
use std::marker::PhantomData;
use std::sync::Arc;
use std::{fmt, mem, ops, ptr};
/// Owned RAII structure used to release the exclusive write access of a lock when
/// dropped.
///
/// This structure is created by [mapping] an [`OwnedRwLockWriteGuard`]. It is a
/// separate type from `OwnedRwLockWriteGuard` to disallow downgrading a mapped
/// guard, since doing so can cause undefined behavior.
///
/// [mapping]: method@crate::sync::OwnedRwLockWriteGuard::map
/// [`OwnedRwLockWriteGuard`]: struct@crate::sync::OwnedRwLockWriteGuard
#[clippy::has_significant_drop]
pub struct OwnedRwLockMappedWriteGuard<T: ?Sized, U: ?Sized = T> {
// When changing the fields in this struct, make sure to update the
// `skip_drop` method.
#[cfg(all(tokio_unstable, feature = "tracing"))]
pub(super) resource_span: tracing::Span,
pub(super) permits_acquired: u32,
pub(super) lock: Arc<RwLock<T>>,
pub(super) data: *mut U,
pub(super) _p: PhantomData<T>,
}
#[allow(dead_code)] // Unused fields are still used in Drop.
struct Inner<T: ?Sized, U: ?Sized> {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: tracing::Span,
permits_acquired: u32,
lock: Arc<RwLock<T>>,
data: *const U,
}
impl<T: ?Sized, U: ?Sized> OwnedRwLockMappedWriteGuard<T, U> {
fn skip_drop(self) -> Inner<T, U> {
let me = mem::ManuallyDrop::new(self);
// SAFETY: This duplicates the values in every field of the guard, then
// forgets the originals, so in the end no value is duplicated.
unsafe {
Inner {
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: ptr::read(&me.resource_span),
permits_acquired: me.permits_acquired,
lock: ptr::read(&me.lock),
data: me.data,
}
}
}
/// Makes a new `OwnedRwLockMappedWriteGuard` for a component of the locked
/// data.
///
/// This operation cannot fail as the `OwnedRwLockMappedWriteGuard` passed
/// in already locked the data.
///
/// This is an associated function that needs to be used as
/// `OwnedRwLockWriteGuard::map(..)`. A method would interfere with methods
/// of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// {
/// let lock = Arc::clone(&lock);
/// let mut mapped = OwnedRwLockWriteGuard::map(lock.write_owned().await, |f| &mut f.0);
/// *mapped = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn map<F, V: ?Sized>(mut this: Self, f: F) -> OwnedRwLockMappedWriteGuard<T, V>
where
F: FnOnce(&mut U) -> &mut V,
{
let data = f(&mut *this) as *mut V;
let this = this.skip_drop();
OwnedRwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
}
}
/// Attempts to make a new `OwnedRwLockMappedWriteGuard` for a component
/// of the locked data. The original guard is returned if the closure
/// returns `None`.
///
/// This operation cannot fail as the `OwnedRwLockMappedWriteGuard` passed
/// in already locked the data.
///
/// This is an associated function that needs to be
/// used as `OwnedRwLockMappedWriteGuard::try_map(...)`. A method would interfere with
/// methods of the same name on the contents of the locked data.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{RwLock, OwnedRwLockWriteGuard};
///
/// #[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// struct Foo(u32);
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(Foo(1)));
///
/// {
/// let guard = Arc::clone(&lock).write_owned().await;
/// let mut guard = OwnedRwLockWriteGuard::try_map(guard, |f| Some(&mut f.0)).expect("should not fail");
/// *guard = 2;
/// }
///
/// assert_eq!(Foo(2), *lock.read().await);
/// # }
/// ```
#[inline]
pub fn try_map<F, V: ?Sized>(
mut this: Self,
f: F,
) -> Result<OwnedRwLockMappedWriteGuard<T, V>, Self>
where
F: FnOnce(&mut U) -> Option<&mut V>,
{
let data = match f(&mut *this) {
Some(data) => data as *mut V,
None => return Err(this),
};
let this = this.skip_drop();
Ok(OwnedRwLockMappedWriteGuard {
permits_acquired: this.permits_acquired,
lock: this.lock,
data,
_p: PhantomData,
#[cfg(all(tokio_unstable, feature = "tracing"))]
resource_span: this.resource_span,
})
}
/// Returns a reference to the original `Arc<RwLock>`.
///
/// # Examples
///
/// ```
/// use std::sync::Arc;
/// use tokio::sync::{
/// RwLock,
/// OwnedRwLockWriteGuard,
/// OwnedRwLockMappedWriteGuard,
/// };
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let lock = Arc::new(RwLock::new(1));
///
/// let guard = lock.clone().write_owned().await;
/// let guard = OwnedRwLockWriteGuard::map(guard, |x| x);
/// assert!(Arc::ptr_eq(&lock, OwnedRwLockMappedWriteGuard::rwlock(&guard)));
/// # }
/// ```
pub fn rwlock(this: &Self) -> &Arc<RwLock<T>> {
&this.lock
}
}
impl<T: ?Sized, U: ?Sized> ops::Deref for OwnedRwLockMappedWriteGuard<T, U> {
type Target = U;
fn deref(&self) -> &U {
unsafe { &*self.data }
}
}
impl<T: ?Sized, U: ?Sized> ops::DerefMut for OwnedRwLockMappedWriteGuard<T, U> {
fn deref_mut(&mut self) -> &mut U {
unsafe { &mut *self.data }
}
}
impl<T: ?Sized, U: ?Sized> fmt::Debug for OwnedRwLockMappedWriteGuard<T, U>
where
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized, U: ?Sized> fmt::Display for OwnedRwLockMappedWriteGuard<T, U>
where
U: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T: ?Sized, U: ?Sized> Drop for OwnedRwLockMappedWriteGuard<T, U> {
fn drop(&mut self) {
self.lock.s.release(self.permits_acquired as usize);
#[cfg(all(tokio_unstable, feature = "tracing"))]
self.resource_span.in_scope(|| {
tracing::trace!(
target: "runtime::resource::state_update",
write_locked = false,
write_locked.op = "override",
)
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/notify.rs | tokio/src/sync/tests/notify.rs | use crate::sync::Notify;
use std::future::Future;
use std::sync::Arc;
use std::task::{Context, RawWaker, RawWakerVTable, Waker};
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn notify_clones_waker_before_lock() {
const VTABLE: &RawWakerVTable = &RawWakerVTable::new(clone_w, wake, wake_by_ref, drop_w);
unsafe fn clone_w(data: *const ()) -> RawWaker {
let ptr = data as *const Notify;
unsafe {
Arc::<Notify>::increment_strong_count(ptr);
}
// Or some other arbitrary code that shouldn't be executed while the
// Notify wait list is locked.
unsafe { (*ptr).notify_one() };
RawWaker::new(data, VTABLE)
}
unsafe fn drop_w(data: *const ()) {
drop(unsafe { Arc::<Notify>::from_raw(data as *const Notify) });
}
unsafe fn wake(_data: *const ()) {
unreachable!()
}
unsafe fn wake_by_ref(_data: *const ()) {
unreachable!()
}
let notify = Arc::new(Notify::new());
let notify2 = notify.clone();
let waker =
unsafe { Waker::from_raw(RawWaker::new(Arc::into_raw(notify2) as *const _, VTABLE)) };
let mut cx = Context::from_waker(&waker);
let future = notify.notified();
pin!(future);
// The result doesn't matter, we're just testing that we don't deadlock.
let _ = future.poll(&mut cx);
}
#[cfg(panic = "unwind")]
#[test]
fn notify_waiters_handles_panicking_waker() {
use futures::task::ArcWake;
let notify = Arc::new(Notify::new());
struct PanickingWaker(#[allow(dead_code)] Arc<Notify>);
impl ArcWake for PanickingWaker {
fn wake_by_ref(_arc_self: &Arc<Self>) {
panic!("waker panicked");
}
}
let bad_fut = notify.notified();
pin!(bad_fut);
let waker = futures::task::waker(Arc::new(PanickingWaker(notify.clone())));
let mut cx = Context::from_waker(&waker);
let _ = bad_fut.poll(&mut cx);
let mut futs = Vec::new();
for _ in 0..32 {
let mut fut = tokio_test::task::spawn(notify.notified());
assert!(fut.poll().is_pending());
futs.push(fut);
}
assert!(std::panic::catch_unwind(|| {
notify.notify_waiters();
})
.is_err());
for mut fut in futs {
assert!(fut.poll().is_ready());
}
}
#[test]
fn notify_simple() {
let notify = Notify::new();
let mut fut1 = tokio_test::task::spawn(notify.notified());
assert!(fut1.poll().is_pending());
let mut fut2 = tokio_test::task::spawn(notify.notified());
assert!(fut2.poll().is_pending());
notify.notify_waiters();
assert!(fut1.poll().is_ready());
assert!(fut2.poll().is_ready());
}
#[test]
#[cfg(not(target_family = "wasm"))]
fn watch_test() {
let rt = crate::runtime::Builder::new_current_thread()
.build()
.unwrap();
rt.block_on(async {
let (tx, mut rx) = crate::sync::watch::channel(());
crate::spawn(async move {
let _ = tx.send(());
});
let _ = rx.changed().await;
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_list.rs | tokio/src/sync/tests/loom_list.rs | use crate::sync::mpsc::list;
use loom::thread;
use std::sync::Arc;
#[test]
fn smoke() {
use crate::sync::mpsc::block::Read;
const NUM_TX: usize = 2;
const NUM_MSG: usize = 2;
loom::model(|| {
let (tx, mut rx) = list::channel();
let tx = Arc::new(tx);
for th in 0..NUM_TX {
let tx = tx.clone();
thread::spawn(move || {
for i in 0..NUM_MSG {
tx.push((th, i));
}
});
}
let mut next = vec![0; NUM_TX];
loop {
match rx.pop(&tx) {
Some(Read::Value((th, v))) => {
assert_eq!(v, next[th]);
next[th] += 1;
if next.iter().all(|&i| i == NUM_MSG) {
break;
}
}
Some(Read::Closed) => {
panic!();
}
None => {
thread::yield_now();
}
}
}
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_atomic_waker.rs | tokio/src/sync/tests/loom_atomic_waker.rs | use crate::sync::task::AtomicWaker;
use loom::future::block_on;
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::future::poll_fn;
use std::sync::atomic::Ordering::Relaxed;
use std::sync::Arc;
use std::task::Poll::{Pending, Ready};
struct Chan {
num: AtomicUsize,
task: AtomicWaker,
}
#[test]
fn basic_notification() {
const NUM_NOTIFY: usize = 2;
loom::model(|| {
let chan = Arc::new(Chan {
num: AtomicUsize::new(0),
task: AtomicWaker::new(),
});
for _ in 0..NUM_NOTIFY {
let chan = chan.clone();
thread::spawn(move || {
chan.num.fetch_add(1, Relaxed);
chan.task.wake();
});
}
block_on(poll_fn(move |cx| {
chan.task.register_by_ref(cx.waker());
if NUM_NOTIFY == chan.num.load(Relaxed) {
return Ready(());
}
Pending
}));
});
}
#[test]
fn test_panicky_waker() {
use std::panic;
use std::ptr;
use std::task::{RawWaker, RawWakerVTable, Waker};
static PANICKING_VTABLE: RawWakerVTable =
RawWakerVTable::new(|_| panic!("clone"), |_| (), |_| (), |_| ());
let panicking = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &PANICKING_VTABLE)) };
// If you're working with this test (and I sure hope you never have to!),
// uncomment the following section because there will be a lot of panics
// which would otherwise log.
//
// We can't however leaved it uncommented, because it's global.
// panic::set_hook(Box::new(|_| ()));
const NUM_NOTIFY: usize = 2;
loom::model(move || {
let chan = Arc::new(Chan {
num: AtomicUsize::new(0),
task: AtomicWaker::new(),
});
for _ in 0..NUM_NOTIFY {
let chan = chan.clone();
thread::spawn(move || {
chan.num.fetch_add(1, Relaxed);
chan.task.wake();
});
}
// Note: this panic should have no effect on the overall state of the
// waker and it should proceed as normal.
//
// A thread above might race to flag a wakeup, and a WAKING state will
// be preserved if this expected panic races with that so the below
// procedure should be allowed to continue uninterrupted.
let _ = panic::catch_unwind(|| chan.task.register_by_ref(&panicking));
block_on(poll_fn(move |cx| {
chan.task.register_by_ref(cx.waker());
if NUM_NOTIFY == chan.num.load(Relaxed) {
return Ready(());
}
Pending
}));
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_broadcast.rs | tokio/src/sync/tests/loom_broadcast.rs | use crate::sync::broadcast;
use crate::sync::broadcast::error::RecvError::{Closed, Lagged};
use loom::future::block_on;
use loom::sync::Arc;
use loom::thread;
use tokio_test::{assert_err, assert_ok};
#[test]
fn broadcast_send() {
loom::model(|| {
let (tx1, mut rx) = broadcast::channel(2);
let tx1 = Arc::new(tx1);
let tx2 = tx1.clone();
let th1 = thread::spawn(move || {
block_on(async {
assert_ok!(tx1.send("one"));
assert_ok!(tx1.send("two"));
assert_ok!(tx1.send("three"));
});
});
let th2 = thread::spawn(move || {
block_on(async {
assert_ok!(tx2.send("eins"));
assert_ok!(tx2.send("zwei"));
assert_ok!(tx2.send("drei"));
});
});
block_on(async {
let mut num = 0;
loop {
match rx.recv().await {
Ok(_) => num += 1,
Err(Closed) => break,
Err(Lagged(n)) => num += n as usize,
}
}
assert_eq!(num, 6);
});
assert_ok!(th1.join());
assert_ok!(th2.join());
});
}
// An `Arc` is used as the value in order to detect memory leaks.
#[test]
fn broadcast_two() {
loom::model(|| {
let (tx, mut rx1) = broadcast::channel::<Arc<&'static str>>(16);
let mut rx2 = tx.subscribe();
let th1 = thread::spawn(move || {
block_on(async {
let v = assert_ok!(rx1.recv().await);
assert_eq!(*v, "hello");
let v = assert_ok!(rx1.recv().await);
assert_eq!(*v, "world");
match assert_err!(rx1.recv().await) {
Closed => {}
_ => panic!(),
}
});
});
let th2 = thread::spawn(move || {
block_on(async {
let v = assert_ok!(rx2.recv().await);
assert_eq!(*v, "hello");
let v = assert_ok!(rx2.recv().await);
assert_eq!(*v, "world");
match assert_err!(rx2.recv().await) {
Closed => {}
_ => panic!(),
}
});
});
assert_ok!(tx.send(Arc::new("hello")));
assert_ok!(tx.send(Arc::new("world")));
drop(tx);
assert_ok!(th1.join());
assert_ok!(th2.join());
});
}
#[test]
fn broadcast_wrap() {
loom::model(|| {
let (tx, mut rx1) = broadcast::channel(2);
let mut rx2 = tx.subscribe();
let th1 = thread::spawn(move || {
block_on(async {
let mut num = 0;
loop {
match rx1.recv().await {
Ok(_) => num += 1,
Err(Closed) => break,
Err(Lagged(n)) => num += n as usize,
}
}
assert_eq!(num, 3);
});
});
let th2 = thread::spawn(move || {
block_on(async {
let mut num = 0;
loop {
match rx2.recv().await {
Ok(_) => num += 1,
Err(Closed) => break,
Err(Lagged(n)) => num += n as usize,
}
}
assert_eq!(num, 3);
});
});
assert_ok!(tx.send("one"));
assert_ok!(tx.send("two"));
assert_ok!(tx.send("three"));
drop(tx);
assert_ok!(th1.join());
assert_ok!(th2.join());
});
}
#[test]
fn drop_rx() {
loom::model(|| {
let (tx, mut rx1) = broadcast::channel(16);
let rx2 = tx.subscribe();
let th1 = thread::spawn(move || {
block_on(async {
let v = assert_ok!(rx1.recv().await);
assert_eq!(v, "one");
let v = assert_ok!(rx1.recv().await);
assert_eq!(v, "two");
let v = assert_ok!(rx1.recv().await);
assert_eq!(v, "three");
match assert_err!(rx1.recv().await) {
Closed => {}
_ => panic!(),
}
});
});
let th2 = thread::spawn(move || {
drop(rx2);
});
assert_ok!(tx.send("one"));
assert_ok!(tx.send("two"));
assert_ok!(tx.send("three"));
drop(tx);
assert_ok!(th1.join());
assert_ok!(th2.join());
});
}
#[test]
fn drop_multiple_rx_with_overflow() {
loom::model(move || {
// It is essential to have multiple senders and receivers in this test case.
let (tx, mut rx) = broadcast::channel(1);
let _rx2 = tx.subscribe();
let _ = tx.send(());
let tx2 = tx.clone();
let th1 = thread::spawn(move || {
block_on(async {
for _ in 0..100 {
let _ = tx2.send(());
}
});
});
let _ = tx.send(());
let th2 = thread::spawn(move || {
block_on(async { while let Ok(_) = rx.recv().await {} });
});
assert_ok!(th1.join());
assert_ok!(th2.join());
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_notify.rs | tokio/src/sync/tests/loom_notify.rs | use crate::sync::Notify;
use loom::future::block_on;
use loom::sync::Arc;
use loom::thread;
use tokio_test::{assert_pending, assert_ready};
/// `util::wake_list::NUM_WAKERS`
const WAKE_LIST_SIZE: usize = 32;
#[test]
fn notify_one() {
loom::model(|| {
let tx = Arc::new(Notify::new());
let rx = tx.clone();
let th = thread::spawn(move || {
block_on(async {
rx.notified().await;
});
});
tx.notify_one();
th.join().unwrap();
});
}
#[test]
fn notify_waiters() {
loom::model(|| {
let notify = Arc::new(Notify::new());
let tx = notify.clone();
let notified1 = notify.notified();
let notified2 = notify.notified();
let th = thread::spawn(move || {
tx.notify_waiters();
});
block_on(async {
notified1.await;
notified2.await;
});
th.join().unwrap();
});
}
#[test]
fn notify_waiters_and_one() {
loom::model(|| {
let notify = Arc::new(Notify::new());
let tx1 = notify.clone();
let tx2 = notify.clone();
let th1 = thread::spawn(move || {
tx1.notify_waiters();
});
let th2 = thread::spawn(move || {
tx2.notify_one();
});
let th3 = thread::spawn(move || {
let notified = notify.notified();
block_on(async {
notified.await;
});
});
th1.join().unwrap();
th2.join().unwrap();
th3.join().unwrap();
});
}
#[test]
fn notify_multi() {
loom::model(|| {
let notify = Arc::new(Notify::new());
let mut threads = vec![];
for _ in 0..2 {
let notify = notify.clone();
threads.push(thread::spawn(move || {
block_on(async {
notify.notified().await;
notify.notify_one();
})
}));
}
notify.notify_one();
for thread in threads.drain(..) {
thread.join().unwrap();
}
block_on(async {
notify.notified().await;
});
});
}
#[test]
fn notify_drop() {
use std::future::{poll_fn, Future};
use std::task::Poll;
loom::model(|| {
let notify = Arc::new(Notify::new());
let rx1 = notify.clone();
let rx2 = notify.clone();
let th1 = thread::spawn(move || {
let mut recv = Box::pin(rx1.notified());
block_on(poll_fn(|cx| {
if recv.as_mut().poll(cx).is_ready() {
rx1.notify_one();
}
Poll::Ready(())
}));
});
let th2 = thread::spawn(move || {
block_on(async {
rx2.notified().await;
// Trigger second notification
rx2.notify_one();
rx2.notified().await;
});
});
notify.notify_one();
th1.join().unwrap();
th2.join().unwrap();
});
}
/// Polls two `Notified` futures and checks if poll results are consistent
/// with each other. If the first future is notified by a `notify_waiters`
/// call, then the second one must be notified as well.
#[test]
fn notify_waiters_poll_consistency() {
fn notify_waiters_poll_consistency_variant(poll_setting: [bool; 2]) {
let notify = Arc::new(Notify::new());
let mut notified = [
tokio_test::task::spawn(notify.notified()),
tokio_test::task::spawn(notify.notified()),
];
for i in 0..2 {
if poll_setting[i] {
assert_pending!(notified[i].poll());
}
}
let tx = notify.clone();
let th = thread::spawn(move || {
tx.notify_waiters();
});
let res1 = notified[0].poll();
let res2 = notified[1].poll();
// If res1 is ready, then res2 must also be ready.
assert!(res1.is_pending() || res2.is_ready());
th.join().unwrap();
}
// We test different scenarios in which pending futures had or had not
// been polled before the call to `notify_waiters`.
loom::model(|| notify_waiters_poll_consistency_variant([false, false]));
loom::model(|| notify_waiters_poll_consistency_variant([true, false]));
loom::model(|| notify_waiters_poll_consistency_variant([false, true]));
loom::model(|| notify_waiters_poll_consistency_variant([true, true]));
}
/// Polls two `Notified` futures and checks if poll results are consistent
/// with each other. If the first future is notified by a `notify_waiters`
/// call, then the second one must be notified as well.
///
/// Here we also add other `Notified` futures in between to force the two
/// tested futures to end up in different chunks.
#[test]
fn notify_waiters_poll_consistency_many() {
fn notify_waiters_poll_consistency_many_variant(order: [usize; 2]) {
let notify = Arc::new(Notify::new());
let mut futs = (0..WAKE_LIST_SIZE + 1)
.map(|_| tokio_test::task::spawn(notify.notified()))
.collect::<Vec<_>>();
assert_pending!(futs[order[0]].poll());
for i in 2..futs.len() {
assert_pending!(futs[i].poll());
}
assert_pending!(futs[order[1]].poll());
let tx = notify.clone();
let th = thread::spawn(move || {
tx.notify_waiters();
});
let res1 = futs[0].poll();
let res2 = futs[1].poll();
// If res1 is ready, then res2 must also be ready.
assert!(res1.is_pending() || res2.is_ready());
th.join().unwrap();
}
// We test different scenarios in which futures are polled in different order.
loom::model(|| notify_waiters_poll_consistency_many_variant([0, 1]));
loom::model(|| notify_waiters_poll_consistency_many_variant([1, 0]));
}
/// Checks if a call to `notify_waiters` is observed as atomic when combined
/// with a concurrent call to `notify_one`.
#[test]
fn notify_waiters_is_atomic() {
fn notify_waiters_is_atomic_variant(tested_fut_index: usize) {
let notify = Arc::new(Notify::new());
let mut futs = (0..WAKE_LIST_SIZE + 1)
.map(|_| tokio_test::task::spawn(notify.notified()))
.collect::<Vec<_>>();
for fut in &mut futs {
assert_pending!(fut.poll());
}
let tx = notify.clone();
let th = thread::spawn(move || {
tx.notify_waiters();
});
block_on(async {
// If awaiting one of the futures completes, then we should be
// able to assume that all pending futures are notified. Therefore
// a notification from a subsequent `notify_one` call should not
// be consumed by an old future.
futs.remove(tested_fut_index).await;
let mut new_fut = tokio_test::task::spawn(notify.notified());
assert_pending!(new_fut.poll());
notify.notify_one();
// `new_fut` must consume the notification from `notify_one`.
assert_ready!(new_fut.poll());
});
th.join().unwrap();
}
// We test different scenarios in which the tested future is at the beginning
// or at the end of the waiters queue used by `Notify`.
loom::model(|| notify_waiters_is_atomic_variant(0));
loom::model(|| notify_waiters_is_atomic_variant(32));
}
/// Checks if a single call to `notify_waiters` does not get through two `Notified`
/// futures created and awaited sequentially like this:
/// ```ignore
/// notify.notified().await;
/// notify.notified().await;
/// ```
#[test]
fn notify_waiters_sequential_notified_await() {
use crate::sync::oneshot;
loom::model(|| {
let notify = Arc::new(Notify::new());
let (tx_fst, rx_fst) = oneshot::channel();
let (tx_snd, rx_snd) = oneshot::channel();
let receiver = thread::spawn({
let notify = notify.clone();
move || {
block_on(async {
// Poll the first `Notified` to put it as the first waiter
// in the queue.
let mut first_notified = tokio_test::task::spawn(notify.notified());
assert_pending!(first_notified.poll());
// Create additional waiters to force `notify_waiters` to
// release the lock at least once.
let _task_pile = (0..WAKE_LIST_SIZE + 1)
.map(|_| {
let mut fut = tokio_test::task::spawn(notify.notified());
assert_pending!(fut.poll());
fut
})
.collect::<Vec<_>>();
// We are ready for the notify_waiters call.
tx_fst.send(()).unwrap();
first_notified.await;
// Poll the second `Notified` future to try to insert
// it to the waiters queue.
let mut second_notified = tokio_test::task::spawn(notify.notified());
assert_pending!(second_notified.poll());
// Wait for the `notify_waiters` to end and check if we
// are woken up.
rx_snd.await.unwrap();
assert_pending!(second_notified.poll());
});
}
});
// Wait for the signal and call `notify_waiters`.
block_on(rx_fst).unwrap();
notify.notify_waiters();
tx_snd.send(()).unwrap();
receiver.join().unwrap();
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_mpsc.rs | tokio/src/sync/tests/loom_mpsc.rs | use crate::sync::mpsc;
use loom::future::block_on;
use loom::sync::Arc;
use loom::thread;
use std::future::poll_fn;
use tokio_test::assert_ok;
#[test]
fn closing_tx() {
loom::model(|| {
let (tx, mut rx) = mpsc::channel(16);
thread::spawn(move || {
tx.try_send(()).unwrap();
drop(tx);
});
let v = block_on(rx.recv());
assert!(v.is_some());
let v = block_on(rx.recv());
assert!(v.is_none());
});
}
#[test]
fn closing_unbounded_tx() {
loom::model(|| {
let (tx, mut rx) = mpsc::unbounded_channel();
thread::spawn(move || {
tx.send(()).unwrap();
drop(tx);
});
let v = block_on(rx.recv());
assert!(v.is_some());
let v = block_on(rx.recv());
assert!(v.is_none());
});
}
#[test]
fn closing_bounded_rx() {
loom::model(|| {
let (tx1, rx) = mpsc::channel::<()>(16);
let tx2 = tx1.clone();
thread::spawn(move || {
drop(rx);
});
block_on(tx1.closed());
block_on(tx2.closed());
});
}
#[test]
fn closing_and_sending() {
loom::model(|| {
let (tx1, mut rx) = mpsc::channel::<()>(16);
let tx1 = Arc::new(tx1);
let tx2 = tx1.clone();
let th1 = thread::spawn(move || {
tx1.try_send(()).unwrap();
});
let th2 = thread::spawn(move || {
block_on(tx2.closed());
});
let th3 = thread::spawn(move || {
let v = block_on(rx.recv());
assert!(v.is_some());
drop(rx);
});
assert_ok!(th1.join());
assert_ok!(th2.join());
assert_ok!(th3.join());
});
}
#[test]
fn closing_unbounded_rx() {
loom::model(|| {
let (tx1, rx) = mpsc::unbounded_channel::<()>();
let tx2 = tx1.clone();
thread::spawn(move || {
drop(rx);
});
block_on(tx1.closed());
block_on(tx2.closed());
});
}
#[test]
fn dropping_tx() {
loom::model(|| {
let (tx, mut rx) = mpsc::channel::<()>(16);
for _ in 0..2 {
let tx = tx.clone();
thread::spawn(move || {
drop(tx);
});
}
drop(tx);
let v = block_on(rx.recv());
assert!(v.is_none());
});
}
#[test]
fn dropping_unbounded_tx() {
loom::model(|| {
let (tx, mut rx) = mpsc::unbounded_channel::<()>();
for _ in 0..2 {
let tx = tx.clone();
thread::spawn(move || {
drop(tx);
});
}
drop(tx);
let v = block_on(rx.recv());
assert!(v.is_none());
});
}
#[test]
fn try_recv() {
loom::model(|| {
use crate::sync::{mpsc, Semaphore};
use loom::sync::{Arc, Mutex};
const PERMITS: usize = 2;
const TASKS: usize = 2;
const CYCLES: usize = 1;
struct Context {
sem: Arc<Semaphore>,
tx: mpsc::Sender<()>,
rx: Mutex<mpsc::Receiver<()>>,
}
fn run(ctx: &Context) {
block_on(async {
let permit = ctx.sem.acquire().await;
assert_ok!(ctx.rx.lock().unwrap().try_recv());
crate::task::yield_now().await;
assert_ok!(ctx.tx.clone().try_send(()));
drop(permit);
});
}
let (tx, rx) = mpsc::channel(PERMITS);
let sem = Arc::new(Semaphore::new(PERMITS));
let ctx = Arc::new(Context {
sem,
tx,
rx: Mutex::new(rx),
});
for _ in 0..PERMITS {
assert_ok!(ctx.tx.clone().try_send(()));
}
let mut threads = Vec::new();
for _ in 0..TASKS {
let ctx = ctx.clone();
threads.push(thread::spawn(move || {
run(&ctx);
}));
}
run(&ctx);
for thread in threads {
thread.join().unwrap();
}
});
}
#[test]
fn len_nonzero_after_send() {
loom::model(|| {
let (send, recv) = mpsc::channel(10);
let send2 = send.clone();
let join = thread::spawn(move || {
block_on(send2.send("message2")).unwrap();
});
block_on(send.send("message1")).unwrap();
assert!(recv.len() != 0);
join.join().unwrap();
});
}
#[test]
fn nonempty_after_send() {
loom::model(|| {
let (send, recv) = mpsc::channel(10);
let send2 = send.clone();
let join = thread::spawn(move || {
block_on(send2.send("message2")).unwrap();
});
block_on(send.send("message1")).unwrap();
assert!(!recv.is_empty());
join.join().unwrap();
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_oneshot.rs | tokio/src/sync/tests/loom_oneshot.rs | use crate::sync::oneshot;
use loom::future::block_on;
use loom::thread;
use std::future::poll_fn;
use std::task::Poll::{Pending, Ready};
#[test]
fn smoke() {
loom::model(|| {
let (tx, rx) = oneshot::channel();
thread::spawn(move || {
tx.send(1).unwrap();
});
let value = block_on(rx).unwrap();
assert_eq!(1, value);
});
}
#[test]
fn changing_rx_task() {
loom::model(|| {
let (tx, mut rx) = oneshot::channel();
thread::spawn(move || {
tx.send(1).unwrap();
});
let rx = thread::spawn(move || {
let ready = block_on(poll_fn(|cx| match Pin::new(&mut rx).poll(cx) {
Ready(Ok(value)) => {
assert_eq!(1, value);
Ready(true)
}
Ready(Err(_)) => unimplemented!(),
Pending => Ready(false),
}));
if ready {
None
} else {
Some(rx)
}
})
.join()
.unwrap();
if let Some(rx) = rx {
// Previous task parked, use a new task...
let value = block_on(rx).unwrap();
assert_eq!(1, value);
}
});
}
#[test]
fn try_recv_close() {
// reproduces https://github.com/tokio-rs/tokio/issues/4225
loom::model(|| {
let (tx, mut rx) = oneshot::channel();
thread::spawn(move || {
let _ = tx.send(());
});
rx.close();
let _ = rx.try_recv();
})
}
#[test]
fn recv_closed() {
// reproduces https://github.com/tokio-rs/tokio/issues/4225
loom::model(|| {
let (tx, mut rx) = oneshot::channel();
thread::spawn(move || {
let _ = tx.send(1);
});
rx.close();
let _ = block_on(rx);
});
}
// TODO: Move this into `oneshot` proper.
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
struct OnClose<'a> {
tx: &'a mut oneshot::Sender<i32>,
}
impl<'a> OnClose<'a> {
fn new(tx: &'a mut oneshot::Sender<i32>) -> Self {
OnClose { tx }
}
}
impl Future for OnClose<'_> {
type Output = bool;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<bool> {
let fut = self.get_mut().tx.closed();
crate::pin!(fut);
Ready(fut.poll(cx).is_ready())
}
}
#[test]
fn changing_tx_task() {
loom::model(|| {
let (mut tx, rx) = oneshot::channel::<i32>();
thread::spawn(move || {
drop(rx);
});
let tx = thread::spawn(move || {
let t1 = block_on(OnClose::new(&mut tx));
if t1 {
None
} else {
Some(tx)
}
})
.join()
.unwrap();
if let Some(mut tx) = tx {
// Previous task parked, use a new task...
block_on(OnClose::new(&mut tx));
}
});
}
#[test]
fn checking_tx_send_ok_not_drop() {
use std::borrow::Borrow;
use std::cell::Cell;
loom::thread_local! {
static IS_RX: Cell<bool> = Cell::new(true);
}
struct Msg;
impl Drop for Msg {
fn drop(&mut self) {
IS_RX.with(|is_rx: &Cell<_>| {
// On `tx.send(msg)` returning `Err(msg)`,
// we call `std::mem::forget(msg)`, so that
// `drop` is not expected to be called in the
// tx thread.
assert!(is_rx.get());
});
}
}
let mut builder = loom::model::Builder::new();
builder.preemption_bound = Some(2);
builder.check(|| {
let (tx, rx) = oneshot::channel();
// tx thread
let tx_thread_join_handle = thread::spawn(move || {
// Ensure that `Msg::drop` in this thread will see is_rx == false
IS_RX.with(|is_rx: &Cell<_>| {
is_rx.set(false);
});
if let Err(msg) = tx.send(Msg) {
std::mem::forget(msg);
}
});
// main thread is the rx thread
drop(rx);
tx_thread_join_handle.join().unwrap();
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/atomic_waker.rs | tokio/src/sync/tests/atomic_waker.rs | use crate::sync::AtomicWaker;
use tokio_test::task;
use std::task::Waker;
#[allow(unused)]
trait AssertSend: Send {}
#[allow(unused)]
trait AssertSync: Sync {}
impl AssertSend for AtomicWaker {}
impl AssertSync for AtomicWaker {}
impl AssertSend for Waker {}
impl AssertSync for Waker {}
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn basic_usage() {
let mut waker = task::spawn(AtomicWaker::new());
waker.enter(|cx, waker| waker.register_by_ref(cx.waker()));
waker.wake();
assert!(waker.is_woken());
}
#[test]
fn wake_without_register() {
let mut waker = task::spawn(AtomicWaker::new());
waker.wake();
// Registering should not result in a notification
waker.enter(|cx, waker| waker.register_by_ref(cx.waker()));
assert!(!waker.is_woken());
}
#[test]
#[cfg_attr(target_family = "wasm", ignore)] // threads not supported
fn failed_wake_synchronizes() {
for _ in 0..1000 {
failed_wake_synchronizes_inner();
}
}
fn failed_wake_synchronizes_inner() {
use futures::task::noop_waker_ref;
use std::sync::atomic::{AtomicBool, Ordering};
static DID_SYNCHRONIZE: AtomicBool = AtomicBool::new(false);
DID_SYNCHRONIZE.store(false, Ordering::Relaxed);
let waker = AtomicWaker::new();
waker.register_by_ref(noop_waker_ref());
std::thread::scope(|s| {
let jh = s.spawn(|| {
DID_SYNCHRONIZE.store(true, Ordering::Relaxed);
waker.take_waker()
});
waker.take_waker();
waker.register_by_ref(noop_waker_ref());
let did_synchronize = DID_SYNCHRONIZE.load(Ordering::Relaxed);
let did_take = jh.join().unwrap().is_some();
assert!(did_synchronize || did_take);
});
}
#[cfg(panic = "unwind")]
#[test]
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
fn atomic_waker_panic_safe() {
use std::panic;
use std::ptr;
use std::task::{RawWaker, RawWakerVTable, Waker};
static PANICKING_VTABLE: RawWakerVTable = RawWakerVTable::new(
|_| panic!("clone"),
|_| unimplemented!("wake"),
|_| unimplemented!("wake_by_ref"),
|_| (),
);
static NONPANICKING_VTABLE: RawWakerVTable = RawWakerVTable::new(
|_| RawWaker::new(ptr::null(), &NONPANICKING_VTABLE),
|_| unimplemented!("wake"),
|_| unimplemented!("wake_by_ref"),
|_| (),
);
let panicking = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &PANICKING_VTABLE)) };
let nonpanicking = unsafe { Waker::from_raw(RawWaker::new(ptr::null(), &NONPANICKING_VTABLE)) };
let atomic_waker = AtomicWaker::new();
let panicking = panic::AssertUnwindSafe(&panicking);
let result = panic::catch_unwind(|| {
let panic::AssertUnwindSafe(panicking) = panicking;
atomic_waker.register_by_ref(panicking);
});
assert!(result.is_err());
assert!(atomic_waker.take_waker().is_none());
atomic_waker.register_by_ref(&nonpanicking);
assert!(atomic_waker.take_waker().is_some());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/mod.rs | tokio/src/sync/tests/mod.rs | cfg_not_loom! {
mod atomic_waker;
mod notify;
mod semaphore_batch;
}
cfg_loom! {
mod loom_atomic_waker;
mod loom_broadcast;
mod loom_list;
mod loom_mpsc;
mod loom_notify;
mod loom_oneshot;
mod loom_semaphore_batch;
mod loom_watch;
mod loom_rwlock;
mod loom_set_once;
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/semaphore_batch.rs | tokio/src/sync/tests/semaphore_batch.rs | use crate::sync::batch_semaphore::Semaphore;
use tokio_test::*;
const MAX_PERMITS: usize = crate::sync::Semaphore::MAX_PERMITS;
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[test]
fn poll_acquire_one_available() {
let s = Semaphore::new(100);
assert_eq!(s.available_permits(), 100);
// Polling for a permit succeeds immediately
assert_ready_ok!(task::spawn(s.acquire(1)).poll());
assert_eq!(s.available_permits(), 99);
}
#[test]
fn poll_acquire_many_available() {
let s = Semaphore::new(100);
assert_eq!(s.available_permits(), 100);
// Polling for a permit succeeds immediately
assert_ready_ok!(task::spawn(s.acquire(5)).poll());
assert_eq!(s.available_permits(), 95);
assert_ready_ok!(task::spawn(s.acquire(5)).poll());
assert_eq!(s.available_permits(), 90);
}
#[test]
fn try_acquire_one_available() {
let s = Semaphore::new(100);
assert_eq!(s.available_permits(), 100);
assert_ok!(s.try_acquire(1));
assert_eq!(s.available_permits(), 99);
assert_ok!(s.try_acquire(1));
assert_eq!(s.available_permits(), 98);
}
#[test]
fn try_acquire_many_available() {
let s = Semaphore::new(100);
assert_eq!(s.available_permits(), 100);
assert_ok!(s.try_acquire(5));
assert_eq!(s.available_permits(), 95);
assert_ok!(s.try_acquire(5));
assert_eq!(s.available_permits(), 90);
}
#[test]
fn poll_acquire_one_unavailable() {
let s = Semaphore::new(1);
// Acquire the first permit
assert_ready_ok!(task::spawn(s.acquire(1)).poll());
assert_eq!(s.available_permits(), 0);
let mut acquire_2 = task::spawn(s.acquire(1));
// Try to acquire the second permit
assert_pending!(acquire_2.poll());
assert_eq!(s.available_permits(), 0);
s.release(1);
assert_eq!(s.available_permits(), 0);
assert!(acquire_2.is_woken());
assert_ready_ok!(acquire_2.poll());
assert_eq!(s.available_permits(), 0);
s.release(1);
assert_eq!(s.available_permits(), 1);
}
#[test]
fn poll_acquire_many_unavailable() {
let s = Semaphore::new(5);
// Acquire the first permit
assert_ready_ok!(task::spawn(s.acquire(1)).poll());
assert_eq!(s.available_permits(), 4);
// Try to acquire the second permit
let mut acquire_2 = task::spawn(s.acquire(5));
assert_pending!(acquire_2.poll());
assert_eq!(s.available_permits(), 0);
// Try to acquire the third permit
let mut acquire_3 = task::spawn(s.acquire(3));
assert_pending!(acquire_3.poll());
assert_eq!(s.available_permits(), 0);
s.release(1);
assert_eq!(s.available_permits(), 0);
assert!(acquire_2.is_woken());
assert_ready_ok!(acquire_2.poll());
assert!(!acquire_3.is_woken());
assert_eq!(s.available_permits(), 0);
s.release(1);
assert!(!acquire_3.is_woken());
assert_eq!(s.available_permits(), 0);
s.release(2);
assert!(acquire_3.is_woken());
assert_ready_ok!(acquire_3.poll());
}
#[test]
fn try_acquire_one_unavailable() {
let s = Semaphore::new(1);
// Acquire the first permit
assert_ok!(s.try_acquire(1));
assert_eq!(s.available_permits(), 0);
assert_err!(s.try_acquire(1));
s.release(1);
assert_eq!(s.available_permits(), 1);
assert_ok!(s.try_acquire(1));
s.release(1);
assert_eq!(s.available_permits(), 1);
}
#[test]
fn try_acquire_many_unavailable() {
let s = Semaphore::new(5);
// Acquire the first permit
assert_ok!(s.try_acquire(1));
assert_eq!(s.available_permits(), 4);
assert_err!(s.try_acquire(5));
s.release(1);
assert_eq!(s.available_permits(), 5);
assert_ok!(s.try_acquire(5));
s.release(1);
assert_eq!(s.available_permits(), 1);
s.release(1);
assert_eq!(s.available_permits(), 2);
}
#[test]
fn poll_acquire_one_zero_permits() {
let s = Semaphore::new(0);
assert_eq!(s.available_permits(), 0);
// Try to acquire the permit
let mut acquire = task::spawn(s.acquire(1));
assert_pending!(acquire.poll());
s.release(1);
assert!(acquire.is_woken());
assert_ready_ok!(acquire.poll());
}
#[test]
fn max_permits_doesnt_panic() {
Semaphore::new(MAX_PERMITS);
}
#[test]
#[should_panic]
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
fn validates_max_permits() {
Semaphore::new(MAX_PERMITS + 1);
}
#[test]
fn close_semaphore_prevents_acquire() {
let s = Semaphore::new(5);
s.close();
assert_eq!(5, s.available_permits());
assert_ready_err!(task::spawn(s.acquire(1)).poll());
assert_eq!(5, s.available_permits());
assert_ready_err!(task::spawn(s.acquire(1)).poll());
assert_eq!(5, s.available_permits());
}
#[test]
fn close_semaphore_notifies_permit1() {
let s = Semaphore::new(0);
let mut acquire = task::spawn(s.acquire(1));
assert_pending!(acquire.poll());
s.close();
assert!(acquire.is_woken());
assert_ready_err!(acquire.poll());
}
#[test]
fn close_semaphore_notifies_permit2() {
let s = Semaphore::new(2);
// Acquire a couple of permits
assert_ready_ok!(task::spawn(s.acquire(1)).poll());
assert_ready_ok!(task::spawn(s.acquire(1)).poll());
let mut acquire3 = task::spawn(s.acquire(1));
let mut acquire4 = task::spawn(s.acquire(1));
assert_pending!(acquire3.poll());
assert_pending!(acquire4.poll());
s.close();
assert!(acquire3.is_woken());
assert!(acquire4.is_woken());
assert_ready_err!(acquire3.poll());
assert_ready_err!(acquire4.poll());
assert_eq!(0, s.available_permits());
s.release(1);
assert_eq!(1, s.available_permits());
assert_ready_err!(task::spawn(s.acquire(1)).poll());
s.release(1);
assert_eq!(2, s.available_permits());
}
#[test]
fn cancel_acquire_releases_permits() {
let s = Semaphore::new(10);
s.try_acquire(4).expect("uncontended try_acquire succeeds");
assert_eq!(6, s.available_permits());
let mut acquire = task::spawn(s.acquire(8));
assert_pending!(acquire.poll());
assert_eq!(0, s.available_permits());
drop(acquire);
assert_eq!(6, s.available_permits());
assert_ok!(s.try_acquire(6));
}
#[test]
fn release_permits_at_drop() {
use crate::sync::semaphore::*;
use futures::task::ArcWake;
use std::future::Future;
use std::sync::Arc;
let sem = Arc::new(Semaphore::new(1));
struct ReleaseOnDrop(#[allow(dead_code)] Option<OwnedSemaphorePermit>);
impl ArcWake for ReleaseOnDrop {
fn wake_by_ref(_arc_self: &Arc<Self>) {}
}
let mut fut = Box::pin(async {
let _permit = sem.acquire().await.unwrap();
});
// Second iteration shouldn't deadlock.
for _ in 0..=1 {
let waker = futures::task::waker(Arc::new(ReleaseOnDrop(
sem.clone().try_acquire_owned().ok(),
)));
let mut cx = std::task::Context::from_waker(&waker);
assert!(fut.as_mut().poll(&mut cx).is_pending());
}
}
#[test]
fn forget_permits_basic() {
let s = Semaphore::new(10);
assert_eq!(s.forget_permits(4), 4);
assert_eq!(s.available_permits(), 6);
assert_eq!(s.forget_permits(10), 6);
assert_eq!(s.available_permits(), 0);
}
#[test]
fn update_permits_many_times() {
let s = Semaphore::new(5);
let mut acquire = task::spawn(s.acquire(7));
assert_pending!(acquire.poll());
s.release(5);
assert_ready_ok!(acquire.poll());
assert_eq!(s.available_permits(), 3);
assert_eq!(s.forget_permits(3), 3);
assert_eq!(s.available_permits(), 0);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_semaphore_batch.rs | tokio/src/sync/tests/loom_semaphore_batch.rs | use crate::sync::batch_semaphore::*;
use loom::future::block_on;
use loom::sync::atomic::AtomicUsize;
use loom::thread;
use std::future::{poll_fn, Future};
use std::pin::Pin;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
use std::task::Poll::Ready;
use std::task::{Context, Poll};
#[test]
fn basic_usage() {
const NUM: usize = 2;
struct Shared {
semaphore: Semaphore,
active: AtomicUsize,
}
async fn actor(shared: Arc<Shared>) {
shared.semaphore.acquire(1).await.unwrap();
let actual = shared.active.fetch_add(1, SeqCst);
assert!(actual <= NUM - 1);
let actual = shared.active.fetch_sub(1, SeqCst);
assert!(actual <= NUM);
shared.semaphore.release(1);
}
loom::model(|| {
let shared = Arc::new(Shared {
semaphore: Semaphore::new(NUM),
active: AtomicUsize::new(0),
});
for _ in 0..NUM {
let shared = shared.clone();
thread::spawn(move || {
block_on(actor(shared));
});
}
block_on(actor(shared));
});
}
#[test]
fn release() {
loom::model(|| {
let semaphore = Arc::new(Semaphore::new(1));
{
let semaphore = semaphore.clone();
thread::spawn(move || {
block_on(semaphore.acquire(1)).unwrap();
semaphore.release(1);
});
}
block_on(semaphore.acquire(1)).unwrap();
semaphore.release(1);
});
}
#[test]
fn basic_closing() {
const NUM: usize = 2;
loom::model(|| {
let semaphore = Arc::new(Semaphore::new(1));
for _ in 0..NUM {
let semaphore = semaphore.clone();
thread::spawn(move || {
for _ in 0..2 {
block_on(semaphore.acquire(1)).map_err(|_| ())?;
semaphore.release(1);
}
Ok::<(), ()>(())
});
}
semaphore.close();
});
}
#[test]
fn concurrent_close() {
const NUM: usize = 3;
loom::model(|| {
let semaphore = Arc::new(Semaphore::new(1));
for _ in 0..NUM {
let semaphore = semaphore.clone();
thread::spawn(move || {
block_on(semaphore.acquire(1)).map_err(|_| ())?;
semaphore.release(1);
semaphore.close();
Ok::<(), ()>(())
});
}
});
}
#[test]
fn concurrent_cancel() {
async fn poll_and_cancel(semaphore: Arc<Semaphore>) {
let mut acquire1 = Some(semaphore.acquire(1));
let mut acquire2 = Some(semaphore.acquire(1));
poll_fn(|cx| {
// poll the acquire future once, and then immediately throw
// it away. this simulates a situation where a future is
// polled and then cancelled, such as by a timeout.
if let Some(acquire) = acquire1.take() {
pin!(acquire);
let _ = acquire.poll(cx);
}
if let Some(acquire) = acquire2.take() {
pin!(acquire);
let _ = acquire.poll(cx);
}
Poll::Ready(())
})
.await
}
loom::model(|| {
let semaphore = Arc::new(Semaphore::new(0));
let t1 = {
let semaphore = semaphore.clone();
thread::spawn(move || block_on(poll_and_cancel(semaphore)))
};
let t2 = {
let semaphore = semaphore.clone();
thread::spawn(move || block_on(poll_and_cancel(semaphore)))
};
let t3 = {
let semaphore = semaphore.clone();
thread::spawn(move || block_on(poll_and_cancel(semaphore)))
};
t1.join().unwrap();
semaphore.release(10);
t2.join().unwrap();
t3.join().unwrap();
});
}
#[test]
fn batch() {
let mut b = loom::model::Builder::new();
b.preemption_bound = Some(1);
b.check(|| {
let semaphore = Arc::new(Semaphore::new(10));
let active = Arc::new(AtomicUsize::new(0));
let mut threads = vec![];
for _ in 0..2 {
let semaphore = semaphore.clone();
let active = active.clone();
threads.push(thread::spawn(move || {
for n in &[4, 10, 8] {
block_on(semaphore.acquire(*n)).unwrap();
active.fetch_add(*n as usize, SeqCst);
let num_active = active.load(SeqCst);
assert!(num_active <= 10);
thread::yield_now();
active.fetch_sub(*n as usize, SeqCst);
semaphore.release(*n as usize);
}
}));
}
for thread in threads.into_iter() {
thread.join().unwrap();
}
assert_eq!(10, semaphore.available_permits());
});
}
#[test]
fn release_during_acquire() {
loom::model(|| {
let semaphore = Arc::new(Semaphore::new(10));
semaphore
.try_acquire(8)
.expect("try_acquire should succeed; semaphore uncontended");
let semaphore2 = semaphore.clone();
let thread = thread::spawn(move || block_on(semaphore2.acquire(4)).unwrap());
semaphore.release(8);
thread.join().unwrap();
semaphore.release(4);
assert_eq!(10, semaphore.available_permits());
})
}
#[test]
fn concurrent_permit_updates() {
loom::model(move || {
let semaphore = Arc::new(Semaphore::new(5));
let t1 = {
let semaphore = semaphore.clone();
thread::spawn(move || semaphore.release(3))
};
let t2 = {
let semaphore = semaphore.clone();
thread::spawn(move || {
semaphore
.try_acquire(1)
.expect("try_acquire should succeed")
})
};
let t3 = {
let semaphore = semaphore.clone();
thread::spawn(move || semaphore.forget_permits(2))
};
t1.join().unwrap();
t2.join().unwrap();
t3.join().unwrap();
assert_eq!(semaphore.available_permits(), 5);
})
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_set_once.rs | tokio/src/sync/tests/loom_set_once.rs | use crate::sync::SetOnce;
use loom::future::block_on;
use loom::sync::atomic::AtomicU32;
use loom::thread;
use std::sync::atomic::Ordering;
use std::sync::Arc;
#[derive(Clone)]
struct DropCounter {
pub drops: Arc<AtomicU32>,
}
impl DropCounter {
pub fn new() -> Self {
Self {
drops: Arc::new(AtomicU32::new(0)),
}
}
fn assert_num_drops(&self, value: u32) {
assert_eq!(value, self.drops.load(Ordering::Relaxed));
}
}
impl Drop for DropCounter {
fn drop(&mut self) {
self.drops.fetch_add(1, Ordering::Relaxed);
}
}
#[test]
fn set_once_drop_test() {
loom::model(|| {
let set_once = Arc::new(SetOnce::new());
let set_once_clone = Arc::clone(&set_once);
let drop_counter = DropCounter::new();
let counter_cl = drop_counter.clone();
let thread = thread::spawn(move || set_once_clone.set(counter_cl).is_ok());
let foo = drop_counter.clone();
let set = set_once.set(foo).is_ok();
let res = thread.join().unwrap();
drop(set_once);
drop_counter.assert_num_drops(2);
assert!(res != set);
});
}
#[test]
fn set_once_wait_test() {
loom::model(|| {
let tx = Arc::new(SetOnce::new());
let rx_one = tx.clone();
let rx_two = tx.clone();
let thread = thread::spawn(move || {
assert!(rx_one.set(2).is_ok());
});
block_on(async {
assert_eq!(*rx_two.wait().await, 2);
});
thread.join().unwrap();
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_watch.rs | tokio/src/sync/tests/loom_watch.rs | use crate::sync::watch;
use loom::future::block_on;
use loom::thread;
use std::sync::Arc;
#[test]
fn smoke() {
loom::model(|| {
let (tx, mut rx1) = watch::channel(1);
let mut rx2 = rx1.clone();
let mut rx3 = rx1.clone();
let mut rx4 = rx1.clone();
let mut rx5 = rx1.clone();
let th = thread::spawn(move || {
tx.send(2).unwrap();
});
block_on(rx1.changed()).unwrap();
assert_eq!(*rx1.borrow(), 2);
block_on(rx2.changed()).unwrap();
assert_eq!(*rx2.borrow(), 2);
block_on(rx3.changed()).unwrap();
assert_eq!(*rx3.borrow(), 2);
block_on(rx4.changed()).unwrap();
assert_eq!(*rx4.borrow(), 2);
block_on(rx5.changed()).unwrap();
assert_eq!(*rx5.borrow(), 2);
th.join().unwrap();
})
}
#[test]
fn wait_for_test() {
loom::model(move || {
let (tx, mut rx) = watch::channel(false);
let tx_arc = Arc::new(tx);
let tx1 = tx_arc.clone();
let tx2 = tx_arc.clone();
let th1 = thread::spawn(move || {
for _ in 0..2 {
tx1.send_modify(|_x| {});
}
});
let th2 = thread::spawn(move || {
tx2.send(true).unwrap();
});
assert_eq!(*block_on(rx.wait_for(|x| *x)).unwrap(), true);
th1.join().unwrap();
th2.join().unwrap();
});
}
#[test]
fn wait_for_returns_correct_value() {
loom::model(move || {
let (tx, mut rx) = watch::channel(0);
let jh = thread::spawn(move || {
tx.send(1).unwrap();
tx.send(2).unwrap();
tx.send(3).unwrap();
});
// Stop at the first value we are called at.
let mut stopped_at = usize::MAX;
let returned = *block_on(rx.wait_for(|x| {
stopped_at = *x;
true
}))
.unwrap();
// Check that it returned the same value as the one we returned
// `true` for.
assert_eq!(stopped_at, returned);
jh.join().unwrap();
});
}
#[test]
fn multiple_sender_drop_concurrently() {
loom::model(move || {
let (tx1, rx) = watch::channel(0);
let tx2 = tx1.clone();
let jh = thread::spawn(move || {
drop(tx2);
});
assert!(rx.has_changed().is_ok());
drop(tx1);
jh.join().unwrap();
// Check if all sender are dropped and closed flag is set.
assert!(rx.has_changed().is_err());
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/tests/loom_rwlock.rs | tokio/src/sync/tests/loom_rwlock.rs | use crate::sync::rwlock::*;
use loom::future::block_on;
use loom::thread;
use std::sync::Arc;
#[test]
fn concurrent_write() {
let b = loom::model::Builder::new();
b.check(|| {
let rwlock = Arc::new(RwLock::<u32>::new(0));
let rwclone = rwlock.clone();
let t1 = thread::spawn(move || {
block_on(async {
let mut guard = rwclone.write().await;
*guard += 5;
});
});
let rwclone = rwlock.clone();
let t2 = thread::spawn(move || {
block_on(async {
let mut guard = rwclone.write_owned().await;
*guard += 5;
});
});
t1.join().expect("thread 1 write should not panic");
t2.join().expect("thread 2 write should not panic");
//when all threads have finished the value on the lock should be 10
let guard = block_on(rwlock.read());
assert_eq!(10, *guard);
});
}
#[test]
fn concurrent_read_write() {
let b = loom::model::Builder::new();
b.check(|| {
let rwlock = Arc::new(RwLock::<u32>::new(0));
let rwclone = rwlock.clone();
let t1 = thread::spawn(move || {
block_on(async {
let mut guard = rwclone.write().await;
*guard += 5;
});
});
let rwclone = rwlock.clone();
let t2 = thread::spawn(move || {
block_on(async {
let mut guard = rwclone.write_owned().await;
*guard += 5;
});
});
let rwclone = rwlock.clone();
let t3 = thread::spawn(move || {
block_on(async {
let guard = rwclone.read().await;
//at this state the value on the lock may either be 0, 5, or 10
assert!(*guard == 0 || *guard == 5 || *guard == 10);
});
});
{
let guard = block_on(rwlock.clone().read_owned());
//at this state the value on the lock may either be 0, 5, or 10
assert!(*guard == 0 || *guard == 5 || *guard == 10);
}
t1.join().expect("thread 1 write should not panic");
t2.join().expect("thread 2 write should not panic");
t3.join().expect("thread 3 read should not panic");
let guard = block_on(rwlock.read());
//when all threads have finished the value on the lock should be 10
assert_eq!(10, *guard);
});
}
#[test]
fn downgrade() {
loom::model(|| {
let lock = Arc::new(RwLock::new(1));
let n = block_on(lock.write());
let cloned_lock = lock.clone();
let handle = thread::spawn(move || {
let mut guard = block_on(cloned_lock.write());
*guard = 2;
});
let n = n.downgrade();
assert_eq!(*n, 1);
drop(n);
handle.join().unwrap();
assert_eq!(*block_on(lock.read()), 2);
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/chan.rs | tokio/src/sync/mpsc/chan.rs | use crate::loom::cell::UnsafeCell;
use crate::loom::future::AtomicWaker;
use crate::loom::sync::atomic::AtomicUsize;
use crate::loom::sync::Arc;
use crate::runtime::park::CachedParkThread;
use crate::sync::mpsc::error::TryRecvError;
use crate::sync::mpsc::{bounded, list, unbounded};
use crate::sync::notify::Notify;
use crate::util::cacheline::CachePadded;
use std::fmt;
use std::panic;
use std::process;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
use std::task::Poll::{Pending, Ready};
use std::task::{ready, Context, Poll};
/// Channel sender.
pub(crate) struct Tx<T, S> {
inner: Arc<Chan<T, S>>,
}
impl<T, S: fmt::Debug> fmt::Debug for Tx<T, S> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Tx").field("inner", &self.inner).finish()
}
}
/// Channel receiver.
pub(crate) struct Rx<T, S: Semaphore> {
inner: Arc<Chan<T, S>>,
}
impl<T, S: Semaphore + fmt::Debug> fmt::Debug for Rx<T, S> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Rx").field("inner", &self.inner).finish()
}
}
pub(crate) trait Semaphore {
fn is_idle(&self) -> bool;
fn add_permit(&self);
fn add_permits(&self, n: usize);
fn close(&self);
fn is_closed(&self) -> bool;
}
pub(super) struct Chan<T, S> {
/// Handle to the push half of the lock-free list.
tx: CachePadded<list::Tx<T>>,
/// Receiver waker. Notified when a value is pushed into the channel.
rx_waker: CachePadded<AtomicWaker>,
/// Notifies all tasks listening for the receiver being dropped.
notify_rx_closed: Notify,
/// Coordinates access to channel's capacity.
semaphore: S,
/// Tracks the number of outstanding sender handles.
///
/// When this drops to zero, the send half of the channel is closed.
tx_count: AtomicUsize,
/// Tracks the number of outstanding weak sender handles.
tx_weak_count: AtomicUsize,
/// Only accessed by `Rx` handle.
rx_fields: UnsafeCell<RxFields<T>>,
}
impl<T, S> fmt::Debug for Chan<T, S>
where
S: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Chan")
.field("tx", &*self.tx)
.field("semaphore", &self.semaphore)
.field("rx_waker", &*self.rx_waker)
.field("tx_count", &self.tx_count)
.field("rx_fields", &"...")
.finish()
}
}
/// Fields only accessed by `Rx` handle.
struct RxFields<T> {
/// Channel receiver. This field is only accessed by the `Receiver` type.
list: list::Rx<T>,
/// `true` if `Rx::close` is called.
rx_closed: bool,
}
impl<T> fmt::Debug for RxFields<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("RxFields")
.field("list", &self.list)
.field("rx_closed", &self.rx_closed)
.finish()
}
}
unsafe impl<T: Send, S: Send> Send for Chan<T, S> {}
unsafe impl<T: Send, S: Sync> Sync for Chan<T, S> {}
impl<T, S> panic::RefUnwindSafe for Chan<T, S> {}
impl<T, S> panic::UnwindSafe for Chan<T, S> {}
pub(crate) fn channel<T, S: Semaphore>(semaphore: S) -> (Tx<T, S>, Rx<T, S>) {
let (tx, rx) = list::channel();
let chan = Arc::new(Chan {
notify_rx_closed: Notify::new(),
tx: CachePadded::new(tx),
semaphore,
rx_waker: CachePadded::new(AtomicWaker::new()),
tx_count: AtomicUsize::new(1),
tx_weak_count: AtomicUsize::new(0),
rx_fields: UnsafeCell::new(RxFields {
list: rx,
rx_closed: false,
}),
});
(Tx::new(chan.clone()), Rx::new(chan))
}
// ===== impl Tx =====
impl<T, S> Tx<T, S> {
fn new(chan: Arc<Chan<T, S>>) -> Tx<T, S> {
Tx { inner: chan }
}
pub(super) fn strong_count(&self) -> usize {
self.inner.tx_count.load(Acquire)
}
pub(super) fn weak_count(&self) -> usize {
self.inner.tx_weak_count.load(Relaxed)
}
pub(super) fn downgrade(&self) -> Arc<Chan<T, S>> {
self.inner.increment_weak_count();
self.inner.clone()
}
// Returns the upgraded channel or None if the upgrade failed.
pub(super) fn upgrade(chan: Arc<Chan<T, S>>) -> Option<Self> {
let mut tx_count = chan.tx_count.load(Acquire);
loop {
if tx_count == 0 {
// channel is closed
return None;
}
match chan
.tx_count
.compare_exchange_weak(tx_count, tx_count + 1, AcqRel, Acquire)
{
Ok(_) => return Some(Tx { inner: chan }),
Err(prev_count) => tx_count = prev_count,
}
}
}
pub(super) fn semaphore(&self) -> &S {
&self.inner.semaphore
}
/// Send a message and notify the receiver.
pub(crate) fn send(&self, value: T) {
self.inner.send(value);
}
/// Wake the receive half
pub(crate) fn wake_rx(&self) {
self.inner.rx_waker.wake();
}
/// Returns `true` if senders belong to the same channel.
pub(crate) fn same_channel(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.inner, &other.inner)
}
}
impl<T, S: Semaphore> Tx<T, S> {
pub(crate) fn is_closed(&self) -> bool {
self.inner.semaphore.is_closed()
}
pub(crate) async fn closed(&self) {
// In order to avoid a race condition, we first request a notification,
// **then** check whether the semaphore is closed. If the semaphore is
// closed the notification request is dropped.
let notified = self.inner.notify_rx_closed.notified();
if self.inner.semaphore.is_closed() {
return;
}
notified.await;
}
}
impl<T, S> Clone for Tx<T, S> {
fn clone(&self) -> Tx<T, S> {
// Using a Relaxed ordering here is sufficient as the caller holds a
// strong ref to `self`, preventing a concurrent decrement to zero.
self.inner.tx_count.fetch_add(1, Relaxed);
Tx {
inner: self.inner.clone(),
}
}
}
impl<T, S> Drop for Tx<T, S> {
fn drop(&mut self) {
if self.inner.tx_count.fetch_sub(1, AcqRel) != 1 {
return;
}
// Close the list, which sends a `Close` message
self.inner.tx.close();
// Notify the receiver
self.wake_rx();
}
}
// ===== impl Rx =====
impl<T, S: Semaphore> Rx<T, S> {
fn new(chan: Arc<Chan<T, S>>) -> Rx<T, S> {
Rx { inner: chan }
}
pub(crate) fn close(&mut self) {
self.inner.rx_fields.with_mut(|rx_fields_ptr| {
let rx_fields = unsafe { &mut *rx_fields_ptr };
if rx_fields.rx_closed {
return;
}
rx_fields.rx_closed = true;
});
self.inner.semaphore.close();
self.inner.notify_rx_closed.notify_waiters();
}
pub(crate) fn is_closed(&self) -> bool {
// There two internal states that can represent a closed channel
//
// 1. When `close` is called.
// In this case, the inner semaphore will be closed.
//
// 2. When all senders are dropped.
// In this case, the semaphore remains unclosed, and the `index` in the list won't
// reach the tail position. It is necessary to check the list if the last block is
// `closed`.
self.inner.semaphore.is_closed() || self.inner.tx_count.load(Acquire) == 0
}
pub(crate) fn is_empty(&self) -> bool {
self.inner.rx_fields.with(|rx_fields_ptr| {
let rx_fields = unsafe { &*rx_fields_ptr };
rx_fields.list.is_empty(&self.inner.tx)
})
}
pub(crate) fn len(&self) -> usize {
self.inner.rx_fields.with(|rx_fields_ptr| {
let rx_fields = unsafe { &*rx_fields_ptr };
rx_fields.list.len(&self.inner.tx)
})
}
/// Receive the next value
pub(crate) fn recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
use super::block::Read;
ready!(crate::trace::trace_leaf(cx));
// Keep track of task budget
let coop = ready!(crate::task::coop::poll_proceed(cx));
self.inner.rx_fields.with_mut(|rx_fields_ptr| {
let rx_fields = unsafe { &mut *rx_fields_ptr };
macro_rules! try_recv {
() => {
match rx_fields.list.pop(&self.inner.tx) {
Some(Read::Value(value)) => {
self.inner.semaphore.add_permit();
coop.made_progress();
return Ready(Some(value));
}
Some(Read::Closed) => {
// TODO: This check may not be required as it most
// likely can only return `true` at this point. A
// channel is closed when all tx handles are
// dropped. Dropping a tx handle releases memory,
// which ensures that if dropping the tx handle is
// visible, then all messages sent are also visible.
assert!(self.inner.semaphore.is_idle());
coop.made_progress();
return Ready(None);
}
None => {} // fall through
}
};
}
try_recv!();
self.inner.rx_waker.register_by_ref(cx.waker());
// It is possible that a value was pushed between attempting to read
// and registering the task, so we have to check the channel a
// second time here.
try_recv!();
if rx_fields.rx_closed && self.inner.semaphore.is_idle() {
coop.made_progress();
Ready(None)
} else {
Pending
}
})
}
/// Receives up to `limit` values into `buffer`
///
/// For `limit > 0`, receives up to limit values into `buffer`.
/// For `limit == 0`, immediately returns Ready(0).
pub(crate) fn recv_many(
&mut self,
cx: &mut Context<'_>,
buffer: &mut Vec<T>,
limit: usize,
) -> Poll<usize> {
use super::block::Read;
ready!(crate::trace::trace_leaf(cx));
// Keep track of task budget
let coop = ready!(crate::task::coop::poll_proceed(cx));
if limit == 0 {
coop.made_progress();
return Ready(0usize);
}
let mut remaining = limit;
let initial_length = buffer.len();
self.inner.rx_fields.with_mut(|rx_fields_ptr| {
let rx_fields = unsafe { &mut *rx_fields_ptr };
macro_rules! try_recv {
() => {
while remaining > 0 {
match rx_fields.list.pop(&self.inner.tx) {
Some(Read::Value(value)) => {
remaining -= 1;
buffer.push(value);
}
Some(Read::Closed) => {
let number_added = buffer.len() - initial_length;
if number_added > 0 {
self.inner.semaphore.add_permits(number_added);
}
// TODO: This check may not be required as it most
// likely can only return `true` at this point. A
// channel is closed when all tx handles are
// dropped. Dropping a tx handle releases memory,
// which ensures that if dropping the tx handle is
// visible, then all messages sent are also visible.
assert!(self.inner.semaphore.is_idle());
coop.made_progress();
return Ready(number_added);
}
None => {
break; // fall through
}
}
}
let number_added = buffer.len() - initial_length;
if number_added > 0 {
self.inner.semaphore.add_permits(number_added);
coop.made_progress();
return Ready(number_added);
}
};
}
try_recv!();
self.inner.rx_waker.register_by_ref(cx.waker());
// It is possible that a value was pushed between attempting to read
// and registering the task, so we have to check the channel a
// second time here.
try_recv!();
if rx_fields.rx_closed && self.inner.semaphore.is_idle() {
assert!(buffer.is_empty());
coop.made_progress();
Ready(0usize)
} else {
Pending
}
})
}
/// Try to receive the next value.
pub(crate) fn try_recv(&mut self) -> Result<T, TryRecvError> {
use super::list::TryPopResult;
self.inner.rx_fields.with_mut(|rx_fields_ptr| {
let rx_fields = unsafe { &mut *rx_fields_ptr };
macro_rules! try_recv {
() => {
match rx_fields.list.try_pop(&self.inner.tx) {
TryPopResult::Ok(value) => {
self.inner.semaphore.add_permit();
return Ok(value);
}
TryPopResult::Closed => return Err(TryRecvError::Disconnected),
// If close() was called, an empty queue should report Disconnected.
TryPopResult::Empty if rx_fields.rx_closed => {
return Err(TryRecvError::Disconnected)
}
TryPopResult::Empty => return Err(TryRecvError::Empty),
TryPopResult::Busy => {} // fall through
}
};
}
try_recv!();
// If a previous `poll_recv` call has set a waker, we wake it here.
// This allows us to put our own CachedParkThread waker in the
// AtomicWaker slot instead.
//
// This is not a spurious wakeup to `poll_recv` since we just got a
// Busy from `try_pop`, which only happens if there are messages in
// the queue.
self.inner.rx_waker.wake();
// Park the thread until the problematic send has completed.
let mut park = CachedParkThread::new();
let waker = park.waker().unwrap();
loop {
self.inner.rx_waker.register_by_ref(&waker);
// It is possible that the problematic send has now completed,
// so we have to check for messages again.
try_recv!();
park.park();
}
})
}
pub(super) fn semaphore(&self) -> &S {
&self.inner.semaphore
}
pub(super) fn sender_strong_count(&self) -> usize {
self.inner.tx_count.load(Acquire)
}
pub(super) fn sender_weak_count(&self) -> usize {
self.inner.tx_weak_count.load(Relaxed)
}
}
impl<T, S: Semaphore> Drop for Rx<T, S> {
fn drop(&mut self) {
use super::block::Read::Value;
self.close();
self.inner.rx_fields.with_mut(|rx_fields_ptr| {
let rx_fields = unsafe { &mut *rx_fields_ptr };
struct Guard<'a, T, S: Semaphore> {
list: &'a mut list::Rx<T>,
tx: &'a list::Tx<T>,
sem: &'a S,
}
impl<'a, T, S: Semaphore> Guard<'a, T, S> {
fn drain(&mut self) {
// call T's destructor.
while let Some(Value(_)) = self.list.pop(self.tx) {
self.sem.add_permit();
}
}
}
impl<'a, T, S: Semaphore> Drop for Guard<'a, T, S> {
fn drop(&mut self) {
self.drain();
}
}
let mut guard = Guard {
list: &mut rx_fields.list,
tx: &self.inner.tx,
sem: &self.inner.semaphore,
};
guard.drain();
});
}
}
// ===== impl Chan =====
impl<T, S> Chan<T, S> {
fn send(&self, value: T) {
// Push the value
self.tx.push(value);
// Notify the rx task
self.rx_waker.wake();
}
pub(super) fn decrement_weak_count(&self) {
self.tx_weak_count.fetch_sub(1, Relaxed);
}
pub(super) fn increment_weak_count(&self) {
self.tx_weak_count.fetch_add(1, Relaxed);
}
pub(super) fn strong_count(&self) -> usize {
self.tx_count.load(Acquire)
}
pub(super) fn weak_count(&self) -> usize {
self.tx_weak_count.load(Relaxed)
}
}
impl<T, S> Drop for Chan<T, S> {
fn drop(&mut self) {
use super::block::Read::Value;
// Safety: the only owner of the rx fields is Chan, and being
// inside its own Drop means we're the last ones to touch it.
self.rx_fields.with_mut(|rx_fields_ptr| {
let rx_fields = unsafe { &mut *rx_fields_ptr };
while let Some(Value(_)) = rx_fields.list.pop(&self.tx) {}
unsafe { rx_fields.list.free_blocks() };
});
}
}
// ===== impl Semaphore for (::Semaphore, capacity) =====
impl Semaphore for bounded::Semaphore {
fn add_permit(&self) {
self.semaphore.release(1);
}
fn add_permits(&self, n: usize) {
self.semaphore.release(n)
}
fn is_idle(&self) -> bool {
self.semaphore.available_permits() == self.bound
}
fn close(&self) {
self.semaphore.close();
}
fn is_closed(&self) -> bool {
self.semaphore.is_closed()
}
}
// ===== impl Semaphore for AtomicUsize =====
impl Semaphore for unbounded::Semaphore {
fn add_permit(&self) {
let prev = self.0.fetch_sub(2, Release);
if prev >> 1 == 0 {
// Something went wrong
process::abort();
}
}
fn add_permits(&self, n: usize) {
let prev = self.0.fetch_sub(n << 1, Release);
if (prev >> 1) < n {
// Something went wrong
process::abort();
}
}
fn is_idle(&self) -> bool {
self.0.load(Acquire) >> 1 == 0
}
fn close(&self) {
self.0.fetch_or(1, Release);
}
fn is_closed(&self) -> bool {
self.0.load(Acquire) & 1 == 1
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/unbounded.rs | tokio/src/sync/mpsc/unbounded.rs | use crate::loom::sync::{atomic::AtomicUsize, Arc};
use crate::sync::mpsc::chan;
use crate::sync::mpsc::error::{SendError, TryRecvError};
use std::fmt;
use std::task::{Context, Poll};
/// Send values to the associated `UnboundedReceiver`.
///
/// Instances are created by the [`unbounded_channel`] function.
pub struct UnboundedSender<T> {
chan: chan::Tx<T, Semaphore>,
}
/// An unbounded sender that does not prevent the channel from being closed.
///
/// If all [`UnboundedSender`] instances of a channel were dropped and only
/// `WeakUnboundedSender` instances remain, the channel is closed.
///
/// In order to send messages, the `WeakUnboundedSender` needs to be upgraded using
/// [`WeakUnboundedSender::upgrade`], which returns `Option<UnboundedSender>`. It returns `None`
/// if all `UnboundedSender`s have been dropped, and otherwise it returns an `UnboundedSender`.
///
/// [`UnboundedSender`]: UnboundedSender
/// [`WeakUnboundedSender::upgrade`]: WeakUnboundedSender::upgrade
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc::unbounded_channel;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, _rx) = unbounded_channel::<i32>();
/// let tx_weak = tx.downgrade();
///
/// // Upgrading will succeed because `tx` still exists.
/// assert!(tx_weak.upgrade().is_some());
///
/// // If we drop `tx`, then it will fail.
/// drop(tx);
/// assert!(tx_weak.clone().upgrade().is_none());
/// # }
/// ```
pub struct WeakUnboundedSender<T> {
chan: Arc<chan::Chan<T, Semaphore>>,
}
impl<T> Clone for UnboundedSender<T> {
fn clone(&self) -> Self {
UnboundedSender {
chan: self.chan.clone(),
}
}
}
impl<T> fmt::Debug for UnboundedSender<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("UnboundedSender")
.field("chan", &self.chan)
.finish()
}
}
/// Receive values from the associated `UnboundedSender`.
///
/// Instances are created by the [`unbounded_channel`] function.
///
/// This receiver can be turned into a `Stream` using [`UnboundedReceiverStream`].
///
/// [`UnboundedReceiverStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.UnboundedReceiverStream.html
pub struct UnboundedReceiver<T> {
/// The channel receiver
chan: chan::Rx<T, Semaphore>,
}
impl<T> fmt::Debug for UnboundedReceiver<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("UnboundedReceiver")
.field("chan", &self.chan)
.finish()
}
}
/// Creates an unbounded mpsc channel for communicating between asynchronous
/// tasks without backpressure.
///
/// A `send` on this channel will always succeed as long as the receive half has
/// not been closed. If the receiver falls behind, messages will be arbitrarily
/// buffered.
///
/// **Note** that the amount of available system memory is an implicit bound to
/// the channel. Using an `unbounded` channel has the ability of causing the
/// process to run out of memory. In this case, the process will be aborted.
pub fn unbounded_channel<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
let (tx, rx) = chan::channel(Semaphore(AtomicUsize::new(0)));
let tx = UnboundedSender::new(tx);
let rx = UnboundedReceiver::new(rx);
(tx, rx)
}
/// No capacity
#[derive(Debug)]
pub(crate) struct Semaphore(pub(crate) AtomicUsize);
impl<T> UnboundedReceiver<T> {
pub(crate) fn new(chan: chan::Rx<T, Semaphore>) -> UnboundedReceiver<T> {
UnboundedReceiver { chan }
}
/// Receives the next value for this receiver.
///
/// This method returns `None` if the channel has been closed and there are
/// no remaining messages in the channel's buffer. This indicates that no
/// further values can ever be received from this `Receiver`. The channel is
/// closed when all senders have been dropped, or when [`close`] is called.
///
/// If there are no messages in the channel's buffer, but the channel has
/// not yet been closed, this method will sleep until a message is sent or
/// the channel is closed.
///
/// # Cancel safety
///
/// This method is cancel safe. If `recv` is used as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, it is guaranteed that no messages were received on this
/// channel.
///
/// [`close`]: Self::close
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::unbounded_channel();
///
/// tokio::spawn(async move {
/// tx.send("hello").unwrap();
/// });
///
/// assert_eq!(Some("hello"), rx.recv().await);
/// assert_eq!(None, rx.recv().await);
/// # }
/// ```
///
/// Values are buffered:
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::unbounded_channel();
///
/// tx.send("hello").unwrap();
/// tx.send("world").unwrap();
///
/// assert_eq!(Some("hello"), rx.recv().await);
/// assert_eq!(Some("world"), rx.recv().await);
/// # }
/// ```
pub async fn recv(&mut self) -> Option<T> {
use std::future::poll_fn;
poll_fn(|cx| self.poll_recv(cx)).await
}
/// Receives the next values for this receiver and extends `buffer`.
///
/// This method extends `buffer` by no more than a fixed number of values
/// as specified by `limit`. If `limit` is zero, the function returns
/// immediately with `0`. The return value is the number of values added to
/// `buffer`.
///
/// For `limit > 0`, if there are no messages in the channel's queue,
/// but the channel has not yet been closed, this method will sleep
/// until a message is sent or the channel is closed.
///
/// For non-zero values of `limit`, this method will never return `0` unless
/// the channel has been closed and there are no remaining messages in the
/// channel's queue. This indicates that no further values can ever be
/// received from this `Receiver`. The channel is closed when all senders
/// have been dropped, or when [`close`] is called.
///
/// The capacity of `buffer` is increased as needed.
///
/// # Cancel safety
///
/// This method is cancel safe. If `recv_many` is used as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, it is guaranteed that no messages were received on this
/// channel.
///
/// [`close`]: Self::close
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut buffer: Vec<&str> = Vec::with_capacity(2);
/// let limit = 2;
/// let (tx, mut rx) = mpsc::unbounded_channel();
/// let tx2 = tx.clone();
/// tx2.send("first").unwrap();
/// tx2.send("second").unwrap();
/// tx2.send("third").unwrap();
///
/// // Call `recv_many` to receive up to `limit` (2) values.
/// assert_eq!(2, rx.recv_many(&mut buffer, limit).await);
/// assert_eq!(vec!["first", "second"], buffer);
///
/// // If the buffer is full, the next call to `recv_many`
/// // reserves additional capacity.
/// assert_eq!(1, rx.recv_many(&mut buffer, limit).await);
///
/// tokio::spawn(async move {
/// tx.send("fourth").unwrap();
/// });
///
/// // 'tx' is dropped, but `recv_many`
/// // is guaranteed not to return 0 as the channel
/// // is not yet closed.
/// assert_eq!(1, rx.recv_many(&mut buffer, limit).await);
/// assert_eq!(vec!["first", "second", "third", "fourth"], buffer);
///
/// // Once the last sender is dropped, the channel is
/// // closed and `recv_many` returns 0, capacity unchanged.
/// drop(tx2);
/// assert_eq!(0, rx.recv_many(&mut buffer, limit).await);
/// assert_eq!(vec!["first", "second", "third", "fourth"], buffer);
/// # }
/// ```
pub async fn recv_many(&mut self, buffer: &mut Vec<T>, limit: usize) -> usize {
use std::future::poll_fn;
poll_fn(|cx| self.chan.recv_many(cx, buffer, limit)).await
}
/// Tries to receive the next value for this receiver.
///
/// This method returns the [`Empty`] error if the channel is currently
/// empty, but there are still outstanding [senders] or [permits].
///
/// This method returns the [`Disconnected`] error if the channel is
/// currently empty, and there are no outstanding [senders] or [permits].
///
/// Unlike the [`poll_recv`] method, this method will never return an
/// [`Empty`] error spuriously.
///
/// [`Empty`]: crate::sync::mpsc::error::TryRecvError::Empty
/// [`Disconnected`]: crate::sync::mpsc::error::TryRecvError::Disconnected
/// [`poll_recv`]: Self::poll_recv
/// [senders]: crate::sync::mpsc::Sender
/// [permits]: crate::sync::mpsc::Permit
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
/// use tokio::sync::mpsc::error::TryRecvError;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::unbounded_channel();
///
/// tx.send("hello").unwrap();
///
/// assert_eq!(Ok("hello"), rx.try_recv());
/// assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
///
/// tx.send("hello").unwrap();
/// // Drop the last sender, closing the channel.
/// drop(tx);
///
/// assert_eq!(Ok("hello"), rx.try_recv());
/// assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
/// # }
/// ```
pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
self.chan.try_recv()
}
/// Blocking receive to call outside of asynchronous contexts.
///
/// # Panics
///
/// This function panics if called within an asynchronous execution
/// context.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::thread;
/// use tokio::sync::mpsc;
///
/// #[tokio::main]
/// async fn main() {
/// let (tx, mut rx) = mpsc::unbounded_channel::<u8>();
///
/// let sync_code = thread::spawn(move || {
/// assert_eq!(Some(10), rx.blocking_recv());
/// });
///
/// let _ = tx.send(10);
/// sync_code.join().unwrap();
/// }
/// # }
/// ```
#[track_caller]
#[cfg(feature = "sync")]
#[cfg_attr(docsrs, doc(alias = "recv_blocking"))]
pub fn blocking_recv(&mut self) -> Option<T> {
crate::future::block_on(self.recv())
}
/// Variant of [`Self::recv_many`] for blocking contexts.
///
/// The same conditions as in [`Self::blocking_recv`] apply.
#[track_caller]
#[cfg(feature = "sync")]
#[cfg_attr(docsrs, doc(alias = "recv_many_blocking"))]
pub fn blocking_recv_many(&mut self, buffer: &mut Vec<T>, limit: usize) -> usize {
crate::future::block_on(self.recv_many(buffer, limit))
}
/// Closes the receiving half of a channel, without dropping it.
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered.
///
/// To guarantee that no messages are dropped, after calling `close()`,
/// `recv()` must be called until `None` is returned.
pub fn close(&mut self) {
self.chan.close();
}
/// Checks if a channel is closed.
///
/// This method returns `true` if the channel has been closed. The channel is closed
/// when all [`UnboundedSender`] have been dropped, or when [`UnboundedReceiver::close`] is called.
///
/// [`UnboundedSender`]: crate::sync::mpsc::UnboundedSender
/// [`UnboundedReceiver::close`]: crate::sync::mpsc::UnboundedReceiver::close
///
/// # Examples
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (_tx, mut rx) = mpsc::unbounded_channel::<()>();
/// assert!(!rx.is_closed());
///
/// rx.close();
///
/// assert!(rx.is_closed());
/// # }
/// ```
pub fn is_closed(&self) -> bool {
self.chan.is_closed()
}
/// Checks if a channel is empty.
///
/// This method returns `true` if the channel has no messages.
///
/// # Examples
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::unbounded_channel();
/// assert!(rx.is_empty());
///
/// tx.send(0).unwrap();
/// assert!(!rx.is_empty());
/// # }
///
/// ```
pub fn is_empty(&self) -> bool {
self.chan.is_empty()
}
/// Returns the number of messages in the channel.
///
/// # Examples
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::unbounded_channel();
/// assert_eq!(0, rx.len());
///
/// tx.send(0).unwrap();
/// assert_eq!(1, rx.len());
/// # }
/// ```
pub fn len(&self) -> usize {
self.chan.len()
}
/// Polls to receive the next message on this channel.
///
/// This method returns:
///
/// * `Poll::Pending` if no messages are available but the channel is not
/// closed, or if a spurious failure happens.
/// * `Poll::Ready(Some(message))` if a message is available.
/// * `Poll::Ready(None)` if the channel has been closed and all messages
/// sent before it was closed have been received.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when a message is sent on any
/// receiver, or when the channel is closed. Note that on multiple calls to
/// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context`
/// passed to the most recent call is scheduled to receive a wakeup.
///
/// If this method returns `Poll::Pending` due to a spurious failure, then
/// the `Waker` will be notified when the situation causing the spurious
/// failure has been resolved. Note that receiving such a wakeup does not
/// guarantee that the next call will succeed — it could fail with another
/// spurious failure.
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
self.chan.recv(cx)
}
/// Polls to receive multiple messages on this channel, extending the provided buffer.
///
/// This method returns:
/// * `Poll::Pending` if no messages are available but the channel is not closed, or if a
/// spurious failure happens.
/// * `Poll::Ready(count)` where `count` is the number of messages successfully received and
/// stored in `buffer`. This can be less than, or equal to, `limit`.
/// * `Poll::Ready(0)` if `limit` is set to zero or when the channel is closed.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when a message is sent on any
/// receiver, or when the channel is closed. Note that on multiple calls to
/// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context`
/// passed to the most recent call is scheduled to receive a wakeup.
///
/// Note that this method does not guarantee that exactly `limit` messages
/// are received. Rather, if at least one message is available, it returns
/// as many messages as it can up to the given limit. This method returns
/// zero only if the channel is closed (or if `limit` is zero).
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::task::{Context, Poll};
/// use std::pin::Pin;
/// use tokio::sync::mpsc;
/// use futures::Future;
///
/// struct MyReceiverFuture<'a> {
/// receiver: mpsc::UnboundedReceiver<i32>,
/// buffer: &'a mut Vec<i32>,
/// limit: usize,
/// }
///
/// impl<'a> Future for MyReceiverFuture<'a> {
/// type Output = usize; // Number of messages received
///
/// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
/// let MyReceiverFuture { receiver, buffer, limit } = &mut *self;
///
/// // Now `receiver` and `buffer` are mutable references, and `limit` is copied
/// match receiver.poll_recv_many(cx, *buffer, *limit) {
/// Poll::Pending => Poll::Pending,
/// Poll::Ready(count) => Poll::Ready(count),
/// }
/// }
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::unbounded_channel::<i32>();
/// let mut buffer = Vec::new();
///
/// let my_receiver_future = MyReceiverFuture {
/// receiver: rx,
/// buffer: &mut buffer,
/// limit: 3,
/// };
///
/// for i in 0..10 {
/// tx.send(i).expect("Unable to send integer");
/// }
///
/// let count = my_receiver_future.await;
/// assert_eq!(count, 3);
/// assert_eq!(buffer, vec![0,1,2])
/// # }
/// # }
/// ```
pub fn poll_recv_many(
&mut self,
cx: &mut Context<'_>,
buffer: &mut Vec<T>,
limit: usize,
) -> Poll<usize> {
self.chan.recv_many(cx, buffer, limit)
}
/// Returns the number of [`UnboundedSender`] handles.
pub fn sender_strong_count(&self) -> usize {
self.chan.sender_strong_count()
}
/// Returns the number of [`WeakUnboundedSender`] handles.
pub fn sender_weak_count(&self) -> usize {
self.chan.sender_weak_count()
}
}
impl<T> UnboundedSender<T> {
pub(crate) fn new(chan: chan::Tx<T, Semaphore>) -> UnboundedSender<T> {
UnboundedSender { chan }
}
/// Attempts to send a message on this `UnboundedSender` without blocking.
///
/// This method is not marked as `async` because sending a message to an unbounded channel
/// never requires any form of waiting. This is due to the channel's infinite capacity,
/// allowing the `send` operation to complete immediately. As a result, the `send` method can be
/// used in both synchronous and asynchronous code without issues.
///
/// If the receive half of the channel is closed, either due to [`close`]
/// being called or the [`UnboundedReceiver`] having been dropped, this
/// function returns an error. The error includes the value passed to `send`.
///
/// [`close`]: UnboundedReceiver::close
/// [`UnboundedReceiver`]: UnboundedReceiver
pub fn send(&self, message: T) -> Result<(), SendError<T>> {
if !self.inc_num_messages() {
return Err(SendError(message));
}
self.chan.send(message);
Ok(())
}
fn inc_num_messages(&self) -> bool {
use std::process;
use std::sync::atomic::Ordering::{AcqRel, Acquire};
let mut curr = self.chan.semaphore().0.load(Acquire);
loop {
if curr & 1 == 1 {
return false;
}
if curr == usize::MAX ^ 1 {
// Overflowed the ref count. There is no safe way to recover, so
// abort the process. In practice, this should never happen.
process::abort()
}
match self
.chan
.semaphore()
.0
.compare_exchange(curr, curr + 2, AcqRel, Acquire)
{
Ok(_) => return true,
Err(actual) => {
curr = actual;
}
}
}
}
/// Completes when the receiver has dropped.
///
/// This allows the producers to get notified when interest in the produced
/// values is canceled and immediately stop doing work.
///
/// # Cancel safety
///
/// This method is cancel safe. Once the channel is closed, it stays closed
/// forever and all future calls to `closed` will return immediately.
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx1, rx) = mpsc::unbounded_channel::<()>();
/// let tx2 = tx1.clone();
/// let tx3 = tx1.clone();
/// let tx4 = tx1.clone();
/// let tx5 = tx1.clone();
/// tokio::spawn(async move {
/// drop(rx);
/// });
///
/// futures::join!(
/// tx1.closed(),
/// tx2.closed(),
/// tx3.closed(),
/// tx4.closed(),
/// tx5.closed()
/// );
/// println!("Receiver dropped");
/// # }
/// ```
pub async fn closed(&self) {
self.chan.closed().await;
}
/// Checks if the channel has been closed. This happens when the
/// [`UnboundedReceiver`] is dropped, or when the
/// [`UnboundedReceiver::close`] method is called.
///
/// [`UnboundedReceiver`]: crate::sync::mpsc::UnboundedReceiver
/// [`UnboundedReceiver::close`]: crate::sync::mpsc::UnboundedReceiver::close
///
/// ```
/// let (tx, rx) = tokio::sync::mpsc::unbounded_channel::<()>();
/// assert!(!tx.is_closed());
///
/// let tx2 = tx.clone();
/// assert!(!tx2.is_closed());
///
/// drop(rx);
/// assert!(tx.is_closed());
/// assert!(tx2.is_closed());
/// ```
pub fn is_closed(&self) -> bool {
self.chan.is_closed()
}
/// Returns `true` if senders belong to the same channel.
///
/// # Examples
///
/// ```
/// let (tx, rx) = tokio::sync::mpsc::unbounded_channel::<()>();
/// let tx2 = tx.clone();
/// assert!(tx.same_channel(&tx2));
///
/// let (tx3, rx3) = tokio::sync::mpsc::unbounded_channel::<()>();
/// assert!(!tx3.same_channel(&tx2));
/// ```
pub fn same_channel(&self, other: &Self) -> bool {
self.chan.same_channel(&other.chan)
}
/// Converts the `UnboundedSender` to a [`WeakUnboundedSender`] that does not count
/// towards RAII semantics, i.e. if all `UnboundedSender` instances of the
/// channel were dropped and only `WeakUnboundedSender` instances remain,
/// the channel is closed.
#[must_use = "Downgrade creates a WeakSender without destroying the original non-weak sender."]
pub fn downgrade(&self) -> WeakUnboundedSender<T> {
WeakUnboundedSender {
chan: self.chan.downgrade(),
}
}
/// Returns the number of [`UnboundedSender`] handles.
pub fn strong_count(&self) -> usize {
self.chan.strong_count()
}
/// Returns the number of [`WeakUnboundedSender`] handles.
pub fn weak_count(&self) -> usize {
self.chan.weak_count()
}
}
impl<T> Clone for WeakUnboundedSender<T> {
fn clone(&self) -> Self {
self.chan.increment_weak_count();
WeakUnboundedSender {
chan: self.chan.clone(),
}
}
}
impl<T> Drop for WeakUnboundedSender<T> {
fn drop(&mut self) {
self.chan.decrement_weak_count();
}
}
impl<T> WeakUnboundedSender<T> {
/// Tries to convert a `WeakUnboundedSender` into an [`UnboundedSender`].
/// This will return `Some` if there are other `Sender` instances alive and
/// the channel wasn't previously dropped, otherwise `None` is returned.
pub fn upgrade(&self) -> Option<UnboundedSender<T>> {
chan::Tx::upgrade(self.chan.clone()).map(UnboundedSender::new)
}
/// Returns the number of [`UnboundedSender`] handles.
pub fn strong_count(&self) -> usize {
self.chan.strong_count()
}
/// Returns the number of [`WeakUnboundedSender`] handles.
pub fn weak_count(&self) -> usize {
self.chan.weak_count()
}
}
impl<T> fmt::Debug for WeakUnboundedSender<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("WeakUnboundedSender").finish()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/list.rs | tokio/src/sync/mpsc/list.rs | //! A concurrent, lock-free, FIFO list.
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize};
use crate::loom::thread;
use crate::sync::mpsc::block::{self, Block};
use std::fmt;
use std::ptr::NonNull;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
/// List queue transmit handle.
pub(crate) struct Tx<T> {
/// Tail in the `Block` mpmc list.
block_tail: AtomicPtr<Block<T>>,
/// Position to push the next message. This references a block and offset
/// into the block.
tail_position: AtomicUsize,
}
/// List queue receive handle
pub(crate) struct Rx<T> {
/// Pointer to the block being processed.
head: NonNull<Block<T>>,
/// Next slot index to process.
index: usize,
/// Pointer to the next block pending release.
free_head: NonNull<Block<T>>,
}
/// Return value of `Rx::try_pop`.
pub(crate) enum TryPopResult<T> {
/// Successfully popped a value.
Ok(T),
/// The channel is empty.
///
/// Note that `list.rs` only tracks the close state set by senders. If the
/// channel is closed by `Rx::close()`, then `TryPopResult::Empty` is still
/// returned, and the close state needs to be handled by `chan.rs`.
Empty,
/// The channel is empty and closed.
///
/// Returned when the send half is closed (all senders dropped).
Closed,
/// The channel is not empty, but the first value is being written.
Busy,
}
pub(crate) fn channel<T>() -> (Tx<T>, Rx<T>) {
// Create the initial block shared between the tx and rx halves.
let initial_block = Block::new(0);
let initial_block_ptr = Box::into_raw(initial_block);
let tx = Tx {
block_tail: AtomicPtr::new(initial_block_ptr),
tail_position: AtomicUsize::new(0),
};
let head = NonNull::new(initial_block_ptr).unwrap();
let rx = Rx {
head,
index: 0,
free_head: head,
};
(tx, rx)
}
impl<T> Tx<T> {
/// Pushes a value into the list.
pub(crate) fn push(&self, value: T) {
// First, claim a slot for the value. `Acquire` is used here to
// synchronize with the `fetch_add` in `reclaim_blocks`.
let slot_index = self.tail_position.fetch_add(1, Acquire);
// Load the current block and write the value
let block = self.find_block(slot_index);
unsafe {
// Write the value to the block
block.as_ref().write(slot_index, value);
}
}
/// Closes the send half of the list.
///
/// Similar process as pushing a value, but instead of writing the value &
/// setting the ready flag, the `TX_CLOSED` flag is set on the block.
pub(crate) fn close(&self) {
// First, claim a slot for the value. This is the last slot that will be
// claimed.
let slot_index = self.tail_position.fetch_add(1, Acquire);
let block = self.find_block(slot_index);
unsafe { block.as_ref().tx_close() }
}
fn find_block(&self, slot_index: usize) -> NonNull<Block<T>> {
// The start index of the block that contains `index`.
let start_index = block::start_index(slot_index);
// The index offset into the block
let offset = block::offset(slot_index);
// Load the current head of the block
let mut block_ptr = self.block_tail.load(Acquire);
let block = unsafe { &*block_ptr };
// Calculate the distance between the tail ptr and the target block
let distance = block.distance(start_index);
// Decide if this call to `find_block` should attempt to update the
// `block_tail` pointer.
//
// Updating `block_tail` is not always performed in order to reduce
// contention.
//
// When set, as the routine walks the linked list, it attempts to update
// `block_tail`. If the update cannot be performed, `try_updating_tail`
// is unset.
let mut try_updating_tail = distance > offset;
// Walk the linked list of blocks until the block with `start_index` is
// found.
loop {
let block = unsafe { &(*block_ptr) };
if block.is_at_index(start_index) {
return unsafe { NonNull::new_unchecked(block_ptr) };
}
let next_block = block
.load_next(Acquire)
// There is no allocated next block, grow the linked list.
.unwrap_or_else(|| block.grow());
// If the block is **not** final, then the tail pointer cannot be
// advanced any more.
try_updating_tail &= block.is_final();
if try_updating_tail {
// Advancing `block_tail` must happen when walking the linked
// list. `block_tail` may not advance passed any blocks that are
// not "final". At the point a block is finalized, it is unknown
// if there are any prior blocks that are unfinalized, which
// makes it impossible to advance `block_tail`.
//
// While walking the linked list, `block_tail` can be advanced
// as long as finalized blocks are traversed.
//
// Release ordering is used to ensure that any subsequent reads
// are able to see the memory pointed to by `block_tail`.
//
// Acquire is not needed as any "actual" value is not accessed.
// At this point, the linked list is walked to acquire blocks.
if self
.block_tail
.compare_exchange(block_ptr, next_block.as_ptr(), Release, Relaxed)
.is_ok()
{
// Synchronize with any senders
let tail_position = self.tail_position.fetch_add(0, Release);
unsafe {
block.tx_release(tail_position);
}
} else {
// A concurrent sender is also working on advancing
// `block_tail` and this thread is falling behind.
//
// Stop trying to advance the tail pointer
try_updating_tail = false;
}
}
block_ptr = next_block.as_ptr();
thread::yield_now();
}
}
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// - The `block` was created by [`Box::into_raw`].
/// - The `block` is not currently part of any linked list.
/// - The `block` is a valid pointer to a [`Block<T>`].
pub(crate) unsafe fn reclaim_block(&self, mut block: NonNull<Block<T>>) {
// The block has been removed from the linked list and ownership
// is reclaimed.
//
// Before dropping the block, see if it can be reused by
// inserting it back at the end of the linked list.
//
// First, reset the data
//
// Safety: caller guarantees the block is valid and not in any list.
unsafe {
block.as_mut().reclaim();
}
let mut reused = false;
// Attempt to insert the block at the end
//
// Walk at most three times
let curr_ptr = self.block_tail.load(Acquire);
// The pointer can never be null
debug_assert!(!curr_ptr.is_null());
// Safety: curr_ptr is never null.
let mut curr = unsafe { NonNull::new_unchecked(curr_ptr) };
// TODO: Unify this logic with Block::grow
for _ in 0..3 {
match unsafe { curr.as_ref().try_push(&mut block, AcqRel, Acquire) } {
Ok(()) => {
reused = true;
break;
}
Err(next) => {
curr = next;
}
}
}
if !reused {
// Safety:
//
// 1. Caller guarantees the block is valid and not in any list.
// 2. The block was created by `Box::into_raw`.
let _ = unsafe { Box::from_raw(block.as_ptr()) };
}
}
pub(crate) fn is_closed(&self) -> bool {
let tail = self.block_tail.load(Acquire);
unsafe {
let tail_block = &*tail;
tail_block.is_closed()
}
}
}
impl<T> fmt::Debug for Tx<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Tx")
.field("block_tail", &self.block_tail.load(Relaxed))
.field("tail_position", &self.tail_position.load(Relaxed))
.finish()
}
}
impl<T> Rx<T> {
pub(crate) fn is_empty(&self, tx: &Tx<T>) -> bool {
let block = unsafe { self.head.as_ref() };
if block.has_value(self.index) {
return false;
}
// It is possible that a block has no value "now" but the list is still not empty.
// To be sure, it is necessary to check the length of the list.
self.len(tx) == 0
}
pub(crate) fn len(&self, tx: &Tx<T>) -> usize {
// When all the senders are dropped, there will be a last block in the tail position,
// but it will be closed
let tail_position = tx.tail_position.load(Acquire);
tail_position - self.index - (tx.is_closed() as usize)
}
/// Pops the next value off the queue.
pub(crate) fn pop(&mut self, tx: &Tx<T>) -> Option<block::Read<T>> {
// Advance `head`, if needed
if !self.try_advancing_head() {
return None;
}
self.reclaim_blocks(tx);
unsafe {
let block = self.head.as_ref();
let ret = block.read(self.index);
if let Some(block::Read::Value(..)) = ret {
self.index = self.index.wrapping_add(1);
}
ret
}
}
/// Pops the next value off the queue, detecting whether the block
/// is busy or empty on failure.
///
/// This function exists because `Rx::pop` can return `None` even if the
/// channel's queue contains a message that has been completely written.
/// This can happen if the fully delivered message is behind another message
/// that is in the middle of being written to the block, since the channel
/// can't return the messages out of order.
pub(crate) fn try_pop(&mut self, tx: &Tx<T>) -> TryPopResult<T> {
let tail_position = tx.tail_position.load(Acquire);
let result = self.pop(tx);
match result {
Some(block::Read::Value(t)) => TryPopResult::Ok(t),
Some(block::Read::Closed) => TryPopResult::Closed,
None if tail_position == self.index => TryPopResult::Empty,
None => TryPopResult::Busy,
}
}
/// Tries advancing the block pointer to the block referenced by `self.index`.
///
/// Returns `true` if successful, `false` if there is no next block to load.
fn try_advancing_head(&mut self) -> bool {
let block_index = block::start_index(self.index);
loop {
let next_block = {
let block = unsafe { self.head.as_ref() };
if block.is_at_index(block_index) {
return true;
}
block.load_next(Acquire)
};
let next_block = match next_block {
Some(next_block) => next_block,
None => {
return false;
}
};
self.head = next_block;
thread::yield_now();
}
}
fn reclaim_blocks(&mut self, tx: &Tx<T>) {
while self.free_head != self.head {
unsafe {
// Get a handle to the block that will be freed and update
// `free_head` to point to the next block.
let block = self.free_head;
let observed_tail_position = block.as_ref().observed_tail_position();
let required_index = match observed_tail_position {
Some(i) => i,
None => return,
};
if required_index > self.index {
return;
}
// We may read the next pointer with `Relaxed` ordering as it is
// guaranteed that the `reclaim_blocks` routine trails the `recv`
// routine. Any memory accessed by `reclaim_blocks` has already
// been acquired by `recv`.
let next_block = block.as_ref().load_next(Relaxed);
// Update the free list head
self.free_head = next_block.unwrap();
// Push the emptied block onto the back of the queue, making it
// available to senders.
tx.reclaim_block(block);
}
thread::yield_now();
}
}
/// Effectively `Drop` all the blocks. Should only be called once, when
/// the list is dropping.
pub(super) unsafe fn free_blocks(&mut self) {
debug_assert_ne!(self.free_head, NonNull::dangling());
let mut cur = Some(self.free_head);
#[cfg(debug_assertions)]
{
// to trigger the debug assert above so as to catch that we
// don't call `free_blocks` more than once.
self.free_head = NonNull::dangling();
self.head = NonNull::dangling();
}
while let Some(block) = cur {
cur = unsafe { block.as_ref() }.load_next(Relaxed);
drop(unsafe { Box::from_raw(block.as_ptr()) });
}
}
}
impl<T> fmt::Debug for Rx<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Rx")
.field("head", &self.head)
.field("index", &self.index)
.field("free_head", &self.free_head)
.finish()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/block.rs | tokio/src/sync/mpsc/block.rs | use crate::loom::cell::UnsafeCell;
use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize};
use std::alloc::Layout;
use std::mem::MaybeUninit;
use std::ops;
use std::ptr::{self, NonNull};
use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Release};
/// A block in a linked list.
///
/// Each block in the list can hold up to `BLOCK_CAP` messages.
pub(crate) struct Block<T> {
/// The header fields.
header: BlockHeader<T>,
/// Array containing values pushed into the block. Values are stored in a
/// continuous array in order to improve cache line behavior when reading.
/// The values must be manually dropped.
values: Values<T>,
}
/// Extra fields for a `Block<T>`.
struct BlockHeader<T> {
/// The start index of this block.
///
/// Slots in this block have indices in `start_index .. start_index + BLOCK_CAP`.
start_index: usize,
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
/// Bitfield tracking slots that are ready to have their values consumed.
ready_slots: AtomicUsize,
/// The observed `tail_position` value *after* the block has been passed by
/// `block_tail`.
observed_tail_position: UnsafeCell<usize>,
}
pub(crate) enum Read<T> {
Value(T),
Closed,
}
#[repr(transparent)]
struct Values<T>([UnsafeCell<MaybeUninit<T>>; BLOCK_CAP]);
use super::BLOCK_CAP;
/// Masks an index to get the block identifier.
const BLOCK_MASK: usize = !(BLOCK_CAP - 1);
/// Masks an index to get the value offset in a block.
const SLOT_MASK: usize = BLOCK_CAP - 1;
/// Flag tracking that a block has gone through the sender's release routine.
///
/// When this is set, the receiver may consider freeing the block.
const RELEASED: usize = 1 << BLOCK_CAP;
/// Flag tracking all senders dropped.
///
/// When this flag is set, the send half of the channel has closed.
const TX_CLOSED: usize = RELEASED << 1;
/// Mask covering all bits used to track slot readiness.
const READY_MASK: usize = RELEASED - 1;
/// Returns the index of the first slot in the block referenced by `slot_index`.
#[inline(always)]
pub(crate) fn start_index(slot_index: usize) -> usize {
BLOCK_MASK & slot_index
}
/// Returns the offset into the block referenced by `slot_index`.
#[inline(always)]
pub(crate) fn offset(slot_index: usize) -> usize {
SLOT_MASK & slot_index
}
generate_addr_of_methods! {
impl<T> Block<T> {
unsafe fn addr_of_header(self: NonNull<Self>) -> NonNull<BlockHeader<T>> {
&self.header
}
unsafe fn addr_of_values(self: NonNull<Self>) -> NonNull<Values<T>> {
&self.values
}
}
}
impl<T> Block<T> {
pub(crate) fn new(start_index: usize) -> Box<Block<T>> {
unsafe {
// Allocate the block on the heap.
// SAFETY: The size of the Block<T> is non-zero, since it is at least the size of the header.
let block = std::alloc::alloc(Layout::new::<Block<T>>()) as *mut Block<T>;
let block = match NonNull::new(block) {
Some(block) => block,
None => std::alloc::handle_alloc_error(Layout::new::<Block<T>>()),
};
// Write the header to the block.
Block::addr_of_header(block).as_ptr().write(BlockHeader {
// The absolute index in the channel of the first slot in the block.
start_index,
// Pointer to the next block in the linked list.
next: AtomicPtr::new(ptr::null_mut()),
ready_slots: AtomicUsize::new(0),
observed_tail_position: UnsafeCell::new(0),
});
// Initialize the values array.
Values::initialize(Block::addr_of_values(block));
// Convert the pointer to a `Box`.
// Safety: The raw pointer was allocated using the global allocator, and with
// the layout for a `Block<T>`, so it's valid to convert it to box.
Box::from_raw(block.as_ptr())
}
}
/// Returns `true` if the block matches the given index.
pub(crate) fn is_at_index(&self, index: usize) -> bool {
debug_assert!(offset(index) == 0);
self.header.start_index == index
}
/// Returns the number of blocks between `self` and the block at the
/// specified index.
///
/// `start_index` must represent a block *after* `self`.
pub(crate) fn distance(&self, other_index: usize) -> usize {
debug_assert!(offset(other_index) == 0);
other_index.wrapping_sub(self.header.start_index) / BLOCK_CAP
}
/// Reads the value at the given offset.
///
/// Returns `None` if the slot is empty.
///
/// # Safety
///
/// To maintain safety, the caller must ensure:
///
/// * No concurrent access to the slot.
pub(crate) unsafe fn read(&self, slot_index: usize) -> Option<Read<T>> {
let offset = offset(slot_index);
let ready_bits = self.header.ready_slots.load(Acquire);
if !is_ready(ready_bits, offset) {
if is_tx_closed(ready_bits) {
return Some(Read::Closed);
}
return None;
}
// Get the value
//
// Safety:
//
// 1. The caller guarantees that there is no concurrent access to the slot.
// 2. The `UnsafeCell` always give us a valid pointer to the value.
let value = self.values[offset].with(|ptr| unsafe { ptr::read(ptr) });
// Safety: the ready bit is set, so the value has been initialized.
Some(Read::Value(unsafe { value.assume_init() }))
}
/// Returns true if *this* block has a value in the given slot.
///
/// Always returns false when given an index from a different block.
pub(crate) fn has_value(&self, slot_index: usize) -> bool {
if slot_index < self.header.start_index {
return false;
}
if slot_index >= self.header.start_index + super::BLOCK_CAP {
return false;
}
let offset = offset(slot_index);
let ready_bits = self.header.ready_slots.load(Acquire);
is_ready(ready_bits, offset)
}
/// Writes a value to the block at the given offset.
///
/// # Safety
///
/// To maintain safety, the caller must ensure:
///
/// * The slot is empty.
/// * No concurrent access to the slot.
pub(crate) unsafe fn write(&self, slot_index: usize, value: T) {
// Get the offset into the block
let slot_offset = offset(slot_index);
self.values[slot_offset].with_mut(|ptr| {
// Safety: the caller guarantees that there is no concurrent access to the slot
unsafe {
ptr::write(ptr, MaybeUninit::new(value));
}
});
// Release the value. After this point, the slot ref may no longer
// be used. It is possible for the receiver to free the memory at
// any point.
self.set_ready(slot_offset);
}
/// Signal to the receiver that the sender half of the list is closed.
pub(crate) unsafe fn tx_close(&self) {
self.header.ready_slots.fetch_or(TX_CLOSED, Release);
}
pub(crate) unsafe fn is_closed(&self) -> bool {
let ready_bits = self.header.ready_slots.load(Acquire);
is_tx_closed(ready_bits)
}
/// Resets the block to a blank state. This enables reusing blocks in the
/// channel.
///
/// # Safety
///
/// To maintain safety, the caller must ensure:
///
/// * All slots are empty.
/// * The caller holds a unique pointer to the block.
pub(crate) unsafe fn reclaim(&mut self) {
self.header.start_index = 0;
self.header.next = AtomicPtr::new(ptr::null_mut());
self.header.ready_slots = AtomicUsize::new(0);
}
/// Releases the block to the rx half for freeing.
///
/// This function is called by the tx half once it can be guaranteed that no
/// more senders will attempt to access the block.
///
/// # Safety
///
/// To maintain safety, the caller must ensure:
///
/// * The block will no longer be accessed by any sender.
pub(crate) unsafe fn tx_release(&self, tail_position: usize) {
// Track the observed tail_position. Any sender targeting a greater
// tail_position is guaranteed to not access this block.
self.header
.observed_tail_position
// Safety:
//
// 1. The caller guarantees unique access to the block.
// 2. The `UnsafeCell` always gives us a valid pointer.
.with_mut(|ptr| unsafe { *ptr = tail_position });
// Set the released bit, signalling to the receiver that it is safe to
// free the block's memory as soon as all slots **prior** to
// `observed_tail_position` have been filled.
self.header.ready_slots.fetch_or(RELEASED, Release);
}
/// Mark a slot as ready
fn set_ready(&self, slot: usize) {
let mask = 1 << slot;
self.header.ready_slots.fetch_or(mask, Release);
}
/// Returns `true` when all slots have their `ready` bits set.
///
/// This indicates that the block is in its final state and will no longer
/// be mutated.
pub(crate) fn is_final(&self) -> bool {
self.header.ready_slots.load(Acquire) & READY_MASK == READY_MASK
}
/// Returns the `observed_tail_position` value, if set
pub(crate) fn observed_tail_position(&self) -> Option<usize> {
if 0 == RELEASED & self.header.ready_slots.load(Acquire) {
None
} else {
Some(
self.header
.observed_tail_position
.with(|ptr| unsafe { *ptr }),
)
}
}
/// Loads the next block
pub(crate) fn load_next(&self, ordering: Ordering) -> Option<NonNull<Block<T>>> {
let ret = NonNull::new(self.header.next.load(ordering));
debug_assert!(unsafe {
ret.map_or(true, |block| {
block.as_ref().header.start_index == self.header.start_index.wrapping_add(BLOCK_CAP)
})
});
ret
}
/// Pushes `block` as the next block in the link.
///
/// Returns Ok if successful, otherwise, a pointer to the next block in
/// the list is returned.
///
/// This requires that the next pointer is null.
///
/// # Ordering
///
/// This performs a compare-and-swap on `next` using `AcqRel` ordering.
///
/// # Safety
///
/// To maintain safety, the caller must ensure:
///
/// * `block` is not freed until it has been removed from the list.
pub(crate) unsafe fn try_push(
&self,
block: &mut NonNull<Block<T>>,
success: Ordering,
failure: Ordering,
) -> Result<(), NonNull<Block<T>>> {
// Safety: caller guarantees that `block` is valid.
unsafe { block.as_mut() }.header.start_index =
self.header.start_index.wrapping_add(BLOCK_CAP);
let next_ptr = self
.header
.next
.compare_exchange(ptr::null_mut(), block.as_ptr(), success, failure)
.unwrap_or_else(|x| x);
match NonNull::new(next_ptr) {
Some(next_ptr) => Err(next_ptr),
None => Ok(()),
}
}
/// Grows the `Block` linked list by allocating and appending a new block.
///
/// The next block in the linked list is returned. This may or may not be
/// the one allocated by the function call.
///
/// # Implementation
///
/// It is assumed that `self.next` is null. A new block is allocated with
/// `start_index` set to be the next block. A compare-and-swap is performed
/// with `AcqRel` memory ordering. If the compare-and-swap is successful, the
/// newly allocated block is released to other threads walking the block
/// linked list. If the compare-and-swap fails, the current thread acquires
/// the next block in the linked list, allowing the current thread to access
/// the slots.
pub(crate) fn grow(&self) -> NonNull<Block<T>> {
// Create the new block. It is assumed that the block will become the
// next one after `&self`. If this turns out to not be the case,
// `start_index` is updated accordingly.
let new_block = Block::new(self.header.start_index + BLOCK_CAP);
let mut new_block = unsafe { NonNull::new_unchecked(Box::into_raw(new_block)) };
// Attempt to store the block. The first compare-and-swap attempt is
// "unrolled" due to minor differences in logic
//
// `AcqRel` is used as the ordering **only** when attempting the
// compare-and-swap on self.next.
//
// If the compare-and-swap fails, then the actual value of the cell is
// returned from this function and accessed by the caller. Given this,
// the memory must be acquired.
//
// `Release` ensures that the newly allocated block is available to
// other threads acquiring the next pointer.
let next = NonNull::new(
self.header
.next
.compare_exchange(ptr::null_mut(), new_block.as_ptr(), AcqRel, Acquire)
.unwrap_or_else(|x| x),
);
let next = match next {
Some(next) => next,
None => {
// The compare-and-swap succeeded and the newly allocated block
// is successfully pushed.
return new_block;
}
};
// There already is a next block in the linked list. The newly allocated
// block could be dropped and the discovered next block returned;
// however, that would be wasteful. Instead, the linked list is walked
// by repeatedly attempting to compare-and-swap the pointer into the
// `next` register until the compare-and-swap succeed.
//
// Care is taken to update new_block's start_index field as appropriate.
let mut curr = next;
// TODO: Should this iteration be capped?
loop {
let actual = unsafe { curr.as_ref().try_push(&mut new_block, AcqRel, Acquire) };
curr = match actual {
Ok(()) => {
return next;
}
Err(curr) => curr,
};
crate::loom::thread::yield_now();
}
}
}
/// Returns `true` if the specified slot has a value ready to be consumed.
fn is_ready(bits: usize, slot: usize) -> bool {
let mask = 1 << slot;
mask == mask & bits
}
/// Returns `true` if the closed flag has been set.
fn is_tx_closed(bits: usize) -> bool {
TX_CLOSED == bits & TX_CLOSED
}
impl<T> Values<T> {
/// Initialize a `Values` struct from a pointer.
///
/// # Safety
///
/// The raw pointer must be valid for writing a `Values<T>`.
unsafe fn initialize(_value: NonNull<Values<T>>) {
// When fuzzing, `UnsafeCell` needs to be initialized.
if_loom! {
let p = _value.as_ptr() as *mut UnsafeCell<MaybeUninit<T>>;
for i in 0..BLOCK_CAP {
unsafe {
p.add(i).write(UnsafeCell::new(MaybeUninit::uninit()));
}
}
}
}
}
impl<T> ops::Index<usize> for Values<T> {
type Output = UnsafeCell<MaybeUninit<T>>;
fn index(&self, index: usize) -> &Self::Output {
self.0.index(index)
}
}
#[cfg(all(test, not(loom)))]
#[test]
fn assert_no_stack_overflow() {
// https://github.com/tokio-rs/tokio/issues/5293
struct Foo {
_a: [u8; 2_000_000],
}
assert_eq!(
Layout::new::<MaybeUninit<Block<Foo>>>(),
Layout::new::<Block<Foo>>()
);
let _block = Block::<Foo>::new(0);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/error.rs | tokio/src/sync/mpsc/error.rs | //! Channel error types.
use std::error::Error;
use std::fmt;
/// Error returned by [`Sender::send`](super::Sender::send).
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct SendError<T>(pub T);
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SendError").finish_non_exhaustive()
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "channel closed")
}
}
impl<T> Error for SendError<T> {}
// ===== TrySendError =====
/// Error returned by [`Sender::try_send`](super::Sender::try_send).
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum TrySendError<T> {
/// The data could not be sent on the channel because the channel is
/// currently full and sending would require blocking.
Full(T),
/// The receive half of the channel was explicitly closed or has been
/// dropped.
Closed(T),
}
impl<T> TrySendError<T> {
/// Consume the `TrySendError`, returning the unsent value.
pub fn into_inner(self) -> T {
match self {
TrySendError::Full(val) => val,
TrySendError::Closed(val) => val,
}
}
}
impl<T> fmt::Debug for TrySendError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TrySendError::Full(..) => "Full(..)".fmt(f),
TrySendError::Closed(..) => "Closed(..)".fmt(f),
}
}
}
impl<T> fmt::Display for TrySendError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"{}",
match self {
TrySendError::Full(..) => "no available capacity",
TrySendError::Closed(..) => "channel closed",
}
)
}
}
impl<T> Error for TrySendError<T> {}
impl<T> From<SendError<T>> for TrySendError<T> {
fn from(src: SendError<T>) -> TrySendError<T> {
TrySendError::Closed(src.0)
}
}
// ===== TryRecvError =====
/// Error returned by [`Receiver::try_recv`](super::Receiver::try_recv).
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum TryRecvError {
/// This **channel** is currently empty, but the **Sender**(s) have not yet
/// disconnected, so data may yet become available.
Empty,
/// The **channel**'s sending half has become disconnected, and there will
/// never be any more data received on it.
Disconnected,
}
impl fmt::Display for TryRecvError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
TryRecvError::Empty => "receiving on an empty channel".fmt(fmt),
TryRecvError::Disconnected => "receiving on a closed channel".fmt(fmt),
}
}
}
impl Error for TryRecvError {}
// ===== RecvError =====
/// Error returned by `Receiver`.
#[derive(Debug, Clone)]
#[doc(hidden)]
#[deprecated(note = "This type is unused because recv returns an Option.")]
pub struct RecvError(());
#[allow(deprecated)]
impl fmt::Display for RecvError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "channel closed")
}
}
#[allow(deprecated)]
impl Error for RecvError {}
cfg_time! {
// ===== SendTimeoutError =====
#[derive(PartialEq, Eq, Clone, Copy)]
/// Error returned by [`Sender::send_timeout`](super::Sender::send_timeout).
pub enum SendTimeoutError<T> {
/// The data could not be sent on the channel because the channel is
/// full, and the timeout to send has elapsed.
Timeout(T),
/// The receive half of the channel was explicitly closed or has been
/// dropped.
Closed(T),
}
impl<T> SendTimeoutError<T> {
/// Consume the `SendTimeoutError`, returning the unsent value.
pub fn into_inner(self) -> T {
match self {
SendTimeoutError::Timeout(val) => val,
SendTimeoutError::Closed(val) => val,
}
}
}
impl<T> fmt::Debug for SendTimeoutError<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SendTimeoutError::Timeout(..) => "Timeout(..)".fmt(f),
SendTimeoutError::Closed(..) => "Closed(..)".fmt(f),
}
}
}
impl<T> fmt::Display for SendTimeoutError<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
fmt,
"{}",
match self {
SendTimeoutError::Timeout(..) => "timed out waiting on send operation",
SendTimeoutError::Closed(..) => "channel closed",
}
)
}
}
impl<T> Error for SendTimeoutError<T> {}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/bounded.rs | tokio/src/sync/mpsc/bounded.rs | use crate::loom::sync::Arc;
use crate::sync::batch_semaphore::{self as semaphore, TryAcquireError};
use crate::sync::mpsc::chan;
use crate::sync::mpsc::error::{SendError, TryRecvError, TrySendError};
cfg_time! {
use crate::sync::mpsc::error::SendTimeoutError;
use crate::time::Duration;
}
use std::fmt;
use std::task::{Context, Poll};
/// Sends values to the associated `Receiver`.
///
/// Instances are created by the [`channel`] function.
///
/// To convert the `Sender` into a `Sink` or use it in a poll function, you can
/// use the [`PollSender`] utility.
///
/// [`PollSender`]: https://docs.rs/tokio-util/latest/tokio_util/sync/struct.PollSender.html
pub struct Sender<T> {
chan: chan::Tx<T, Semaphore>,
}
/// A sender that does not prevent the channel from being closed.
///
/// If all [`Sender`] instances of a channel were dropped and only `WeakSender`
/// instances remain, the channel is closed.
///
/// In order to send messages, the `WeakSender` needs to be upgraded using
/// [`WeakSender::upgrade`], which returns `Option<Sender>`. It returns `None`
/// if all `Sender`s have been dropped, and otherwise it returns a `Sender`.
///
/// [`Sender`]: Sender
/// [`WeakSender::upgrade`]: WeakSender::upgrade
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc::channel;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, _rx) = channel::<i32>(15);
/// let tx_weak = tx.downgrade();
///
/// // Upgrading will succeed because `tx` still exists.
/// assert!(tx_weak.upgrade().is_some());
///
/// // If we drop `tx`, then it will fail.
/// drop(tx);
/// assert!(tx_weak.clone().upgrade().is_none());
/// # }
/// ```
pub struct WeakSender<T> {
chan: Arc<chan::Chan<T, Semaphore>>,
}
/// Permits to send one value into the channel.
///
/// `Permit` values are returned by [`Sender::reserve()`] and [`Sender::try_reserve()`]
/// and are used to guarantee channel capacity before generating a message to send.
///
/// [`Sender::reserve()`]: Sender::reserve
/// [`Sender::try_reserve()`]: Sender::try_reserve
pub struct Permit<'a, T> {
chan: &'a chan::Tx<T, Semaphore>,
}
/// An [`Iterator`] of [`Permit`] that can be used to hold `n` slots in the channel.
///
/// `PermitIterator` values are returned by [`Sender::reserve_many()`] and [`Sender::try_reserve_many()`]
/// and are used to guarantee channel capacity before generating `n` messages to send.
///
/// [`Sender::reserve_many()`]: Sender::reserve_many
/// [`Sender::try_reserve_many()`]: Sender::try_reserve_many
pub struct PermitIterator<'a, T> {
chan: &'a chan::Tx<T, Semaphore>,
n: usize,
}
/// Owned permit to send one value into the channel.
///
/// This is identical to the [`Permit`] type, except that it moves the sender
/// rather than borrowing it.
///
/// `OwnedPermit` values are returned by [`Sender::reserve_owned()`] and
/// [`Sender::try_reserve_owned()`] and are used to guarantee channel capacity
/// before generating a message to send.
///
/// [`Permit`]: Permit
/// [`Sender::reserve_owned()`]: Sender::reserve_owned
/// [`Sender::try_reserve_owned()`]: Sender::try_reserve_owned
pub struct OwnedPermit<T> {
chan: Option<chan::Tx<T, Semaphore>>,
}
/// Receives values from the associated `Sender`.
///
/// Instances are created by the [`channel`] function.
///
/// This receiver can be turned into a `Stream` using [`ReceiverStream`].
///
/// [`ReceiverStream`]: https://docs.rs/tokio-stream/0.1/tokio_stream/wrappers/struct.ReceiverStream.html
pub struct Receiver<T> {
/// The channel receiver.
chan: chan::Rx<T, Semaphore>,
}
/// Creates a bounded mpsc channel for communicating between asynchronous tasks
/// with backpressure.
///
/// The channel will buffer up to the provided number of messages. Once the
/// buffer is full, attempts to send new messages will wait until a message is
/// received from the channel. The provided buffer capacity must be at least 1.
///
/// All data sent on `Sender` will become available on `Receiver` in the same
/// order as it was sent.
///
/// The `Sender` can be cloned to `send` to the same channel from multiple code
/// locations. Only one `Receiver` is supported.
///
/// If the `Receiver` is disconnected while trying to `send`, the `send` method
/// will return a `SendError`. Similarly, if `Sender` is disconnected while
/// trying to `recv`, the `recv` method will return `None`.
///
/// # Panics
///
/// Panics if the buffer capacity is 0, or too large. Currently the maximum
/// capacity is [`Semaphore::MAX_PERMITS`].
///
/// [`Semaphore::MAX_PERMITS`]: crate::sync::Semaphore::MAX_PERMITS
///
/// # Examples
///
/// ```rust
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel(100);
///
/// tokio::spawn(async move {
/// for i in 0..10 {
/// if let Err(_) = tx.send(i).await {
/// println!("receiver dropped");
/// return;
/// }
/// }
/// });
///
/// while let Some(i) = rx.recv().await {
/// println!("got = {}", i);
/// }
/// # }
/// ```
#[track_caller]
pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
assert!(buffer > 0, "mpsc bounded channel requires buffer > 0");
let semaphore = Semaphore {
semaphore: semaphore::Semaphore::new(buffer),
bound: buffer,
};
let (tx, rx) = chan::channel(semaphore);
let tx = Sender::new(tx);
let rx = Receiver::new(rx);
(tx, rx)
}
/// Channel semaphore is a tuple of the semaphore implementation and a `usize`
/// representing the channel bound.
#[derive(Debug)]
pub(crate) struct Semaphore {
pub(crate) semaphore: semaphore::Semaphore,
pub(crate) bound: usize,
}
impl<T> Receiver<T> {
pub(crate) fn new(chan: chan::Rx<T, Semaphore>) -> Receiver<T> {
Receiver { chan }
}
/// Receives the next value for this receiver.
///
/// This method returns `None` if the channel has been closed and there are
/// no remaining messages in the channel's buffer. This indicates that no
/// further values can ever be received from this `Receiver`. The channel is
/// closed when all senders have been dropped, or when [`close`] is called.
///
/// If there are no messages in the channel's buffer, but the channel has
/// not yet been closed, this method will sleep until a message is sent or
/// the channel is closed. Note that if [`close`] is called, but there are
/// still outstanding [`Permits`] from before it was closed, the channel is
/// not considered closed by `recv` until the permits are released.
///
/// # Cancel safety
///
/// This method is cancel safe. If `recv` is used as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, it is guaranteed that no messages were received on this
/// channel.
///
/// [`close`]: Self::close
/// [`Permits`]: struct@crate::sync::mpsc::Permit
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel(100);
///
/// tokio::spawn(async move {
/// tx.send("hello").await.unwrap();
/// });
///
/// assert_eq!(Some("hello"), rx.recv().await);
/// assert_eq!(None, rx.recv().await);
/// # }
/// ```
///
/// Values are buffered:
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel(100);
///
/// tx.send("hello").await.unwrap();
/// tx.send("world").await.unwrap();
///
/// assert_eq!(Some("hello"), rx.recv().await);
/// assert_eq!(Some("world"), rx.recv().await);
/// # }
/// ```
pub async fn recv(&mut self) -> Option<T> {
use std::future::poll_fn;
poll_fn(|cx| self.chan.recv(cx)).await
}
/// Receives the next values for this receiver and extends `buffer`.
///
/// This method extends `buffer` by no more than a fixed number of values
/// as specified by `limit`. If `limit` is zero, the function immediately
/// returns `0`. The return value is the number of values added to `buffer`.
///
/// For `limit > 0`, if there are no messages in the channel's queue, but
/// the channel has not yet been closed, this method will sleep until a
/// message is sent or the channel is closed. Note that if [`close`] is
/// called, but there are still outstanding [`Permits`] from before it was
/// closed, the channel is not considered closed by `recv_many` until the
/// permits are released.
///
/// For non-zero values of `limit`, this method will never return `0` unless
/// the channel has been closed and there are no remaining messages in the
/// channel's queue. This indicates that no further values can ever be
/// received from this `Receiver`. The channel is closed when all senders
/// have been dropped, or when [`close`] is called.
///
/// The capacity of `buffer` is increased as needed.
///
/// # Cancel safety
///
/// This method is cancel safe. If `recv_many` is used as the event in a
/// [`tokio::select!`](crate::select) statement and some other branch
/// completes first, it is guaranteed that no messages were received on this
/// channel.
///
/// [`close`]: Self::close
/// [`Permits`]: struct@crate::sync::mpsc::Permit
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let mut buffer: Vec<&str> = Vec::with_capacity(2);
/// let limit = 2;
/// let (tx, mut rx) = mpsc::channel(100);
/// let tx2 = tx.clone();
/// tx2.send("first").await.unwrap();
/// tx2.send("second").await.unwrap();
/// tx2.send("third").await.unwrap();
///
/// // Call `recv_many` to receive up to `limit` (2) values.
/// assert_eq!(2, rx.recv_many(&mut buffer, limit).await);
/// assert_eq!(vec!["first", "second"], buffer);
///
/// // If the buffer is full, the next call to `recv_many`
/// // reserves additional capacity.
/// assert_eq!(1, rx.recv_many(&mut buffer, 1).await);
///
/// tokio::spawn(async move {
/// tx.send("fourth").await.unwrap();
/// });
///
/// // 'tx' is dropped, but `recv_many`
/// // is guaranteed not to return 0 as the channel
/// // is not yet closed.
/// assert_eq!(1, rx.recv_many(&mut buffer, 1).await);
/// assert_eq!(vec!["first", "second", "third", "fourth"], buffer);
///
/// // Once the last sender is dropped, the channel is
/// // closed and `recv_many` returns 0, capacity unchanged.
/// drop(tx2);
/// assert_eq!(0, rx.recv_many(&mut buffer, limit).await);
/// assert_eq!(vec!["first", "second", "third", "fourth"], buffer);
/// # }
/// ```
pub async fn recv_many(&mut self, buffer: &mut Vec<T>, limit: usize) -> usize {
use std::future::poll_fn;
poll_fn(|cx| self.chan.recv_many(cx, buffer, limit)).await
}
/// Tries to receive the next value for this receiver.
///
/// This method returns the [`Empty`] error if the channel is currently
/// empty, but there are still outstanding [senders] or [permits].
///
/// This method returns the [`Disconnected`] error if the channel is
/// currently empty, and there are no outstanding [senders] or [permits].
///
/// Unlike the [`poll_recv`] method, this method will never return an
/// [`Empty`] error spuriously.
///
/// [`Empty`]: crate::sync::mpsc::error::TryRecvError::Empty
/// [`Disconnected`]: crate::sync::mpsc::error::TryRecvError::Disconnected
/// [`poll_recv`]: Self::poll_recv
/// [senders]: crate::sync::mpsc::Sender
/// [permits]: crate::sync::mpsc::Permit
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
/// use tokio::sync::mpsc::error::TryRecvError;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel(100);
///
/// tx.send("hello").await.unwrap();
///
/// assert_eq!(Ok("hello"), rx.try_recv());
/// assert_eq!(Err(TryRecvError::Empty), rx.try_recv());
///
/// tx.send("hello").await.unwrap();
/// // Drop the last sender, closing the channel.
/// drop(tx);
///
/// assert_eq!(Ok("hello"), rx.try_recv());
/// assert_eq!(Err(TryRecvError::Disconnected), rx.try_recv());
/// # }
/// ```
pub fn try_recv(&mut self) -> Result<T, TryRecvError> {
self.chan.try_recv()
}
/// Blocking receive to call outside of asynchronous contexts.
///
/// This method returns `None` if the channel has been closed and there are
/// no remaining messages in the channel's buffer. This indicates that no
/// further values can ever be received from this `Receiver`. The channel is
/// closed when all senders have been dropped, or when [`close`] is called.
///
/// If there are no messages in the channel's buffer, but the channel has
/// not yet been closed, this method will block until a message is sent or
/// the channel is closed.
///
/// This method is intended for use cases where you are sending from
/// asynchronous code to synchronous code, and will work even if the sender
/// is not using [`blocking_send`] to send the message.
///
/// Note that if [`close`] is called, but there are still outstanding
/// [`Permits`] from before it was closed, the channel is not considered
/// closed by `blocking_recv` until the permits are released.
///
/// [`close`]: Self::close
/// [`Permits`]: struct@crate::sync::mpsc::Permit
/// [`blocking_send`]: fn@crate::sync::mpsc::Sender::blocking_send
///
/// # Panics
///
/// This function panics if called within an asynchronous execution
/// context.
///
/// # Examples
///
/// ```
/// # #[cfg(not(target_family = "wasm"))]
/// # {
/// use std::thread;
/// use tokio::runtime::Runtime;
/// use tokio::sync::mpsc;
///
/// fn main() {
/// let (tx, mut rx) = mpsc::channel::<u8>(10);
///
/// let sync_code = thread::spawn(move || {
/// assert_eq!(Some(10), rx.blocking_recv());
/// });
///
/// Runtime::new()
/// .unwrap()
/// .block_on(async move {
/// let _ = tx.send(10).await;
/// });
/// sync_code.join().unwrap()
/// }
/// # }
/// ```
#[track_caller]
#[cfg(feature = "sync")]
#[cfg_attr(docsrs, doc(alias = "recv_blocking"))]
pub fn blocking_recv(&mut self) -> Option<T> {
crate::future::block_on(self.recv())
}
/// Variant of [`Self::recv_many`] for blocking contexts.
///
/// The same conditions as in [`Self::blocking_recv`] apply.
#[track_caller]
#[cfg(feature = "sync")]
#[cfg_attr(docsrs, doc(alias = "recv_many_blocking"))]
pub fn blocking_recv_many(&mut self, buffer: &mut Vec<T>, limit: usize) -> usize {
crate::future::block_on(self.recv_many(buffer, limit))
}
/// Closes the receiving half of a channel without dropping it.
///
/// This prevents any further messages from being sent on the channel while
/// still enabling the receiver to drain messages that are buffered. Any
/// outstanding [`Permit`] values will still be able to send messages.
///
/// To guarantee that no messages are dropped, after calling `close()`,
/// `recv()` must be called until `None` is returned. If there are
/// outstanding [`Permit`] or [`OwnedPermit`] values, the `recv` method will
/// not return `None` until those are released.
///
/// [`Permit`]: Permit
/// [`OwnedPermit`]: OwnedPermit
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel(20);
///
/// tokio::spawn(async move {
/// let mut i = 0;
/// while let Ok(permit) = tx.reserve().await {
/// permit.send(i);
/// i += 1;
/// }
/// });
///
/// rx.close();
///
/// while let Some(msg) = rx.recv().await {
/// println!("got {}", msg);
/// }
///
/// // Channel closed and no messages are lost.
/// # }
/// ```
pub fn close(&mut self) {
self.chan.close();
}
/// Checks if a channel is closed.
///
/// This method returns `true` if the channel has been closed. The channel is closed
/// when all [`Sender`] have been dropped, or when [`Receiver::close`] is called.
///
/// [`Sender`]: crate::sync::mpsc::Sender
/// [`Receiver::close`]: crate::sync::mpsc::Receiver::close
///
/// # Examples
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (_tx, mut rx) = mpsc::channel::<()>(10);
/// assert!(!rx.is_closed());
///
/// rx.close();
///
/// assert!(rx.is_closed());
/// # }
/// ```
pub fn is_closed(&self) -> bool {
self.chan.is_closed()
}
/// Checks if a channel is empty.
///
/// This method returns `true` if the channel has no messages.
///
/// # Examples
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::channel(10);
/// assert!(rx.is_empty());
///
/// tx.send(0).await.unwrap();
/// assert!(!rx.is_empty());
/// # }
///
/// ```
pub fn is_empty(&self) -> bool {
self.chan.is_empty()
}
/// Returns the number of messages in the channel.
///
/// # Examples
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::channel(10);
/// assert_eq!(0, rx.len());
///
/// tx.send(0).await.unwrap();
/// assert_eq!(1, rx.len());
/// # }
/// ```
pub fn len(&self) -> usize {
self.chan.len()
}
/// Returns the current capacity of the channel.
///
/// The capacity goes down when the sender sends a value by calling [`Sender::send`] or by reserving
/// capacity with [`Sender::reserve`]. The capacity goes up when values are received.
/// This is distinct from [`max_capacity`], which always returns buffer capacity initially
/// specified when calling [`channel`].
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel::<()>(5);
///
/// assert_eq!(rx.capacity(), 5);
///
/// // Making a reservation drops the capacity by one.
/// let permit = tx.reserve().await.unwrap();
/// assert_eq!(rx.capacity(), 4);
/// assert_eq!(rx.len(), 0);
///
/// // Sending and receiving a value increases the capacity by one.
/// permit.send(());
/// assert_eq!(rx.len(), 1);
/// rx.recv().await.unwrap();
/// assert_eq!(rx.capacity(), 5);
///
/// // Directly sending a message drops the capacity by one.
/// tx.send(()).await.unwrap();
/// assert_eq!(rx.capacity(), 4);
/// assert_eq!(rx.len(), 1);
///
/// // Receiving the message increases the capacity by one.
/// rx.recv().await.unwrap();
/// assert_eq!(rx.capacity(), 5);
/// assert_eq!(rx.len(), 0);
/// # }
/// ```
/// [`capacity`]: Receiver::capacity
/// [`max_capacity`]: Receiver::max_capacity
pub fn capacity(&self) -> usize {
self.chan.semaphore().semaphore.available_permits()
}
/// Returns the maximum buffer capacity of the channel.
///
/// The maximum capacity is the buffer capacity initially specified when calling
/// [`channel`]. This is distinct from [`capacity`], which returns the *current*
/// available buffer capacity: as messages are sent and received, the value
/// returned by [`capacity`] will go up or down, whereas the value
/// returned by [`max_capacity`] will remain constant.
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::channel::<()>(5);
///
/// // both max capacity and capacity are the same at first
/// assert_eq!(rx.max_capacity(), 5);
/// assert_eq!(rx.capacity(), 5);
///
/// // Making a reservation doesn't change the max capacity.
/// let permit = tx.reserve().await.unwrap();
/// assert_eq!(rx.max_capacity(), 5);
/// // but drops the capacity by one
/// assert_eq!(rx.capacity(), 4);
/// # }
/// ```
/// [`capacity`]: Receiver::capacity
/// [`max_capacity`]: Receiver::max_capacity
pub fn max_capacity(&self) -> usize {
self.chan.semaphore().bound
}
/// Polls to receive the next message on this channel.
///
/// This method returns:
///
/// * `Poll::Pending` if no messages are available but the channel is not
/// closed, or if a spurious failure happens.
/// * `Poll::Ready(Some(message))` if a message is available.
/// * `Poll::Ready(None)` if the channel has been closed and all messages
/// sent before it was closed have been received.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when a message is sent on any
/// receiver, or when the channel is closed. Note that on multiple calls to
/// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context`
/// passed to the most recent call is scheduled to receive a wakeup.
///
/// If this method returns `Poll::Pending` due to a spurious failure, then
/// the `Waker` will be notified when the situation causing the spurious
/// failure has been resolved. Note that receiving such a wakeup does not
/// guarantee that the next call will succeed — it could fail with another
/// spurious failure.
pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll<Option<T>> {
self.chan.recv(cx)
}
/// Polls to receive multiple messages on this channel, extending the provided buffer.
///
/// This method returns:
/// * `Poll::Pending` if no messages are available but the channel is not closed, or if a
/// spurious failure happens.
/// * `Poll::Ready(count)` where `count` is the number of messages successfully received and
/// stored in `buffer`. This can be less than, or equal to, `limit`.
/// * `Poll::Ready(0)` if `limit` is set to zero or when the channel is closed.
///
/// When the method returns `Poll::Pending`, the `Waker` in the provided
/// `Context` is scheduled to receive a wakeup when a message is sent on any
/// receiver, or when the channel is closed. Note that on multiple calls to
/// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context`
/// passed to the most recent call is scheduled to receive a wakeup.
///
/// Note that this method does not guarantee that exactly `limit` messages
/// are received. Rather, if at least one message is available, it returns
/// as many messages as it can up to the given limit. This method returns
/// zero only if the channel is closed (or if `limit` is zero).
///
/// # Examples
///
/// ```
/// use std::task::{Context, Poll};
/// use std::pin::Pin;
/// use tokio::sync::mpsc;
/// use futures::Future;
///
/// struct MyReceiverFuture<'a> {
/// receiver: mpsc::Receiver<i32>,
/// buffer: &'a mut Vec<i32>,
/// limit: usize,
/// }
///
/// impl<'a> Future for MyReceiverFuture<'a> {
/// type Output = usize; // Number of messages received
///
/// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
/// let MyReceiverFuture { receiver, buffer, limit } = &mut *self;
///
/// // Now `receiver` and `buffer` are mutable references, and `limit` is copied
/// match receiver.poll_recv_many(cx, *buffer, *limit) {
/// Poll::Pending => Poll::Pending,
/// Poll::Ready(count) => Poll::Ready(count),
/// }
/// }
/// }
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, rx) = mpsc::channel(32);
/// let mut buffer = Vec::new();
///
/// let my_receiver_future = MyReceiverFuture {
/// receiver: rx,
/// buffer: &mut buffer,
/// limit: 3,
/// };
///
/// for i in 0..10 {
/// tx.send(i).await.unwrap();
/// }
///
/// let count = my_receiver_future.await;
/// assert_eq!(count, 3);
/// assert_eq!(buffer, vec![0,1,2])
/// # }
/// ```
pub fn poll_recv_many(
&mut self,
cx: &mut Context<'_>,
buffer: &mut Vec<T>,
limit: usize,
) -> Poll<usize> {
self.chan.recv_many(cx, buffer, limit)
}
/// Returns the number of [`Sender`] handles.
pub fn sender_strong_count(&self) -> usize {
self.chan.sender_strong_count()
}
/// Returns the number of [`WeakSender`] handles.
pub fn sender_weak_count(&self) -> usize {
self.chan.sender_weak_count()
}
}
impl<T> fmt::Debug for Receiver<T> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Receiver")
.field("chan", &self.chan)
.finish()
}
}
impl<T> Unpin for Receiver<T> {}
impl<T> Sender<T> {
pub(crate) fn new(chan: chan::Tx<T, Semaphore>) -> Sender<T> {
Sender { chan }
}
/// Sends a value, waiting until there is capacity.
///
/// A successful send occurs when it is determined that the other end of the
/// channel has not hung up already. An unsuccessful send would be one where
/// the corresponding receiver has already been closed. Note that a return
/// value of `Err` means that the data will never be received, but a return
/// value of `Ok` does not mean that the data will be received. It is
/// possible for the corresponding receiver to hang up immediately after
/// this function returns `Ok`.
///
/// # Errors
///
/// If the receive half of the channel is closed, either due to [`close`]
/// being called or the [`Receiver`] handle dropping, the function returns
/// an error. The error includes the value passed to `send`.
///
/// [`close`]: Receiver::close
/// [`Receiver`]: Receiver
///
/// # Cancel safety
///
/// If `send` is used as the event in a [`tokio::select!`](crate::select)
/// statement and some other branch completes first, then it is guaranteed
/// that the message was not sent. **However, in that case, the message
/// is dropped and will be lost.**
///
/// To avoid losing messages, use [`reserve`](Self::reserve) to reserve
/// capacity, then use the returned [`Permit`] to send the message.
///
/// This channel uses a queue to ensure that calls to `send` and `reserve`
/// complete in the order they were requested. Cancelling a call to
/// `send` makes you lose your place in the queue.
///
/// # Examples
///
/// In the following example, each call to `send` will block until the
/// previously sent value was received.
///
/// ```rust
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx, mut rx) = mpsc::channel(1);
///
/// tokio::spawn(async move {
/// for i in 0..10 {
/// if let Err(_) = tx.send(i).await {
/// println!("receiver dropped");
/// return;
/// }
/// }
/// });
///
/// while let Some(i) = rx.recv().await {
/// println!("got = {}", i);
/// }
/// # }
/// ```
pub async fn send(&self, value: T) -> Result<(), SendError<T>> {
match self.reserve().await {
Ok(permit) => {
permit.send(value);
Ok(())
}
Err(_) => Err(SendError(value)),
}
}
/// Completes when the receiver has dropped.
///
/// This allows the producers to get notified when interest in the produced
/// values is canceled and immediately stop doing work.
///
/// # Cancel safety
///
/// This method is cancel safe. Once the channel is closed, it stays closed
/// forever and all future calls to `closed` will return immediately.
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// let (tx1, rx) = mpsc::channel::<()>(1);
/// let tx2 = tx1.clone();
/// let tx3 = tx1.clone();
/// let tx4 = tx1.clone();
/// let tx5 = tx1.clone();
/// tokio::spawn(async move {
/// drop(rx);
/// });
///
/// futures::join!(
/// tx1.closed(),
/// tx2.closed(),
/// tx3.closed(),
/// tx4.closed(),
/// tx5.closed()
/// );
/// println!("Receiver dropped");
/// # }
/// ```
pub async fn closed(&self) {
self.chan.closed().await;
}
/// Attempts to immediately send a message on this `Sender`.
///
/// This method differs from [`send`] by returning immediately if the channel's
/// buffer is full or no receiver is waiting to acquire some data. Compared
/// with [`send`], this function has two failure cases instead of one (one for
/// disconnection, one for a full buffer).
///
/// # Errors
///
/// If the channel capacity has been reached, i.e., the channel has `n`
/// buffered values where `n` is the argument passed to [`channel`], then an
/// error is returned.
///
/// If the receive half of the channel is closed, either due to [`close`]
/// being called or the [`Receiver`] handle dropping, the function returns
/// an error. The error includes the value passed to `send`.
///
/// [`send`]: Sender::send
/// [`channel`]: channel
/// [`close`]: Receiver::close
///
/// # Examples
///
/// ```
/// use tokio::sync::mpsc;
///
/// # #[tokio::main(flavor = "current_thread")]
/// # async fn main() {
/// // Create a channel with buffer size 1
/// let (tx1, mut rx) = mpsc::channel(1);
/// let tx2 = tx1.clone();
///
/// tokio::spawn(async move {
/// tx1.send(1).await.unwrap();
/// tx1.send(2).await.unwrap();
/// // task waits until the receiver receives a value.
/// });
///
/// tokio::spawn(async move {
/// // This will return an error and send
/// // no message if the buffer is full
/// let _ = tx2.try_send(3);
/// });
///
/// let mut msg;
/// msg = rx.recv().await.unwrap();
/// println!("message {} received", msg);
///
/// msg = rx.recv().await.unwrap();
/// println!("message {} received", msg);
///
/// // Third message may have never been sent
/// match rx.recv().await {
/// Some(msg) => println!("message {} received", msg),
/// None => println!("the third message was never sent"),
/// }
/// # }
/// ```
pub fn try_send(&self, message: T) -> Result<(), TrySendError<T>> {
match self.chan.semaphore().semaphore.try_acquire(1) {
Ok(()) => {}
Err(TryAcquireError::Closed) => return Err(TrySendError::Closed(message)),
Err(TryAcquireError::NoPermits) => return Err(TrySendError::Full(message)),
}
// Send the message
self.chan.send(message);
Ok(())
}
/// Sends a value, waiting until there is capacity, but only for a limited time.
///
/// Shares the same success and error conditions as [`send`], adding one more
/// condition for an unsuccessful send, which is when the provided timeout has
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/mpsc/mod.rs | tokio/src/sync/mpsc/mod.rs | #![cfg_attr(not(feature = "sync"), allow(dead_code, unreachable_pub))]
//! A multi-producer, single-consumer queue for sending values between
//! asynchronous tasks.
//!
//! This module provides two variants of the channel: bounded and unbounded. The
//! bounded variant has a limit on the number of messages that the channel can
//! store, and if this limit is reached, trying to send another message will
//! wait until a message is received from the channel. An unbounded channel has
//! an infinite capacity, so the `send` method will always complete immediately.
//! This makes the [`UnboundedSender`] usable from both synchronous and
//! asynchronous code.
//!
//! Similar to the `mpsc` channels provided by `std`, the channel constructor
//! functions provide separate send and receive handles, [`Sender`] and
//! [`Receiver`] for the bounded channel, [`UnboundedSender`] and
//! [`UnboundedReceiver`] for the unbounded channel. If there is no message to read,
//! the current task will be notified when a new value is sent. [`Sender`] and
//! [`UnboundedSender`] allow sending values into the channel. If the bounded
//! channel is at capacity, the send is rejected and the task will be notified
//! when additional capacity is available. In other words, the channel provides
//! backpressure.
//!
//! This channel is also suitable for the single-producer single-consumer
//! use-case. (Unless you only need to send one message, in which case you
//! should use the [oneshot] channel.)
//!
//! # Disconnection
//!
//! When all [`Sender`] handles have been dropped, it is no longer
//! possible to send values into the channel. This is considered the termination
//! event of the stream. As such, `Receiver::poll` returns `Ok(Ready(None))`.
//!
//! If the [`Receiver`] handle is dropped, then messages can no longer
//! be read out of the channel. In this case, all further attempts to send will
//! result in an error. Additionally, all unread messages will be drained from the
//! channel and dropped.
//!
//! # Clean Shutdown
//!
//! When the [`Receiver`] is dropped, it is possible for unprocessed messages to
//! remain in the channel. Instead, it is usually desirable to perform a "clean"
//! shutdown. To do this, the receiver first calls `close`, which will prevent
//! any further messages to be sent into the channel. Then, the receiver
//! consumes the channel to completion, at which point the receiver can be
//! dropped.
//!
//! # Communicating between sync and async code
//!
//! When you want to communicate between synchronous and asynchronous code, there
//! are two situations to consider:
//!
//! **Bounded channel**: If you need a bounded channel, you should use a bounded
//! Tokio `mpsc` channel for both directions of communication. Instead of calling
//! the async [`send`][bounded-send] or [`recv`][bounded-recv] methods, in
//! synchronous code you will need to use the [`blocking_send`][blocking-send] or
//! [`blocking_recv`][blocking-recv] methods.
//!
//! **Unbounded channel**: You should use the kind of channel that matches where
//! the receiver is. So for sending a message _from async to sync_, you should
//! use [the standard library unbounded channel][std-unbounded] or
//! [crossbeam][crossbeam-unbounded]. Similarly, for sending a message _from sync
//! to async_, you should use an unbounded Tokio `mpsc` channel.
//!
//! Please be aware that the above remarks were written with the `mpsc` channel
//! in mind, but they can also be generalized to other kinds of channels. In
//! general, any channel method that isn't marked async can be called anywhere,
//! including outside of the runtime. For example, sending a message on a
//! [oneshot] channel from outside the runtime is perfectly fine.
//!
//! # Multiple runtimes
//!
//! The `mpsc` channel is runtime agnostic. You can freely move it between
//! different instances of the Tokio runtime or even use it from non-Tokio
//! runtimes.
//!
//! When used in a Tokio runtime, it participates in
//! [cooperative scheduling](crate::task::coop#cooperative-scheduling) to avoid
//! starvation. This feature does not apply when used from non-Tokio runtimes.
//!
//! As an exception, methods ending in `_timeout` are not runtime agnostic
//! because they require access to the Tokio timer. See the documentation of
//! each `*_timeout` method for more information on its use.
//!
//! # Allocation behavior
//!
//! <div class="warning">The implementation details described in this section may change in future
//! Tokio releases.</div>
//!
//! The mpsc channel stores elements in blocks. Blocks are organized in a linked list. Sending
//! pushes new elements onto the block at the front of the list, and receiving pops them off the
//! one at the back. A block can hold 32 messages on a 64-bit target and 16 messages on a 32-bit
//! target. This number is independent of channel and message size. Each block also stores 4
//! pointer-sized values for bookkeeping (so on a 64-bit machine, each message has 1 byte of
//! overhead).
//!
//! When all values in a block have been received, it becomes empty. It will then be freed, unless
//! the channel's first block (where newly-sent elements are being stored) has no next block. In
//! that case, the empty block is reused as the next block.
//!
//! [`Sender`]: crate::sync::mpsc::Sender
//! [`Receiver`]: crate::sync::mpsc::Receiver
//! [bounded-send]: crate::sync::mpsc::Sender::send()
//! [bounded-recv]: crate::sync::mpsc::Receiver::recv()
//! [blocking-send]: crate::sync::mpsc::Sender::blocking_send()
//! [blocking-recv]: crate::sync::mpsc::Receiver::blocking_recv()
//! [`UnboundedSender`]: crate::sync::mpsc::UnboundedSender
//! [`UnboundedReceiver`]: crate::sync::mpsc::UnboundedReceiver
//! [oneshot]: crate::sync::oneshot
//! [`Handle::block_on`]: crate::runtime::Handle::block_on()
//! [std-unbounded]: std::sync::mpsc::channel
//! [crossbeam-unbounded]: https://docs.rs/crossbeam/*/crossbeam/channel/fn.unbounded.html
//! [`send_timeout`]: crate::sync::mpsc::Sender::send_timeout
pub(super) mod block;
mod bounded;
pub use self::bounded::{
channel, OwnedPermit, Permit, PermitIterator, Receiver, Sender, WeakSender,
};
mod chan;
pub(super) mod list;
mod unbounded;
pub use self::unbounded::{
unbounded_channel, UnboundedReceiver, UnboundedSender, WeakUnboundedSender,
};
pub mod error;
/// The number of values a block can contain.
///
/// This value must be a power of 2. It also must be smaller than the number of
/// bits in `usize`.
#[cfg(all(target_pointer_width = "64", not(loom)))]
const BLOCK_CAP: usize = 32;
#[cfg(all(not(target_pointer_width = "64"), not(loom)))]
const BLOCK_CAP: usize = 16;
#[cfg(loom)]
const BLOCK_CAP: usize = 2;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/task/atomic_waker.rs | tokio/src/sync/task/atomic_waker.rs | #![cfg_attr(any(loom, not(feature = "sync")), allow(dead_code, unreachable_pub))]
use crate::loom::cell::UnsafeCell;
use crate::loom::hint;
use crate::loom::sync::atomic::AtomicUsize;
use std::fmt;
use std::panic::{resume_unwind, AssertUnwindSafe, RefUnwindSafe, UnwindSafe};
use std::sync::atomic::Ordering::{AcqRel, Acquire, Release};
use std::task::Waker;
/// A synchronization primitive for task waking.
///
/// `AtomicWaker` will coordinate concurrent wakes with the consumer
/// potentially "waking" the underlying task. This is useful in scenarios
/// where a computation completes in another thread and wants to wake the
/// consumer, but the consumer is in the process of being migrated to a new
/// logical task.
///
/// Consumers should call `register` before checking the result of a computation
/// and producers should call `wake` after producing the computation (this
/// differs from the usual `thread::park` pattern). It is also permitted for
/// `wake` to be called **before** `register`. This results in a no-op.
///
/// A single `AtomicWaker` may be reused for any number of calls to `register` or
/// `wake`.
pub(crate) struct AtomicWaker {
state: AtomicUsize,
waker: UnsafeCell<Option<Waker>>,
}
impl RefUnwindSafe for AtomicWaker {}
impl UnwindSafe for AtomicWaker {}
// `AtomicWaker` is a multi-consumer, single-producer transfer cell. The cell
// stores a `Waker` value produced by calls to `register` and many threads can
// race to take the waker by calling `wake`.
//
// If a new `Waker` instance is produced by calling `register` before an existing
// one is consumed, then the existing one is overwritten.
//
// While `AtomicWaker` is single-producer, the implementation ensures memory
// safety. In the event of concurrent calls to `register`, there will be a
// single winner whose waker will get stored in the cell. The losers will not
// have their tasks woken. As such, callers should ensure to add synchronization
// to calls to `register`.
//
// The implementation uses a single `AtomicUsize` value to coordinate access to
// the `Waker` cell. There are two bits that are operated on independently. These
// are represented by `REGISTERING` and `WAKING`.
//
// The `REGISTERING` bit is set when a producer enters the critical section. The
// `WAKING` bit is set when a consumer enters the critical section. Neither
// bit being set is represented by `WAITING`.
//
// A thread obtains an exclusive lock on the waker cell by transitioning the
// state from `WAITING` to `REGISTERING` or `WAKING`, depending on the
// operation the thread wishes to perform. When this transition is made, it is
// guaranteed that no other thread will access the waker cell.
//
// # Registering
//
// On a call to `register`, an attempt to transition the state from WAITING to
// REGISTERING is made. On success, the caller obtains a lock on the waker cell.
//
// If the lock is obtained, then the thread sets the waker cell to the waker
// provided as an argument. Then it attempts to transition the state back from
// `REGISTERING` -> `WAITING`.
//
// If this transition is successful, then the registering process is complete
// and the next call to `wake` will observe the waker.
//
// If the transition fails, then there was a concurrent call to `wake` that
// was unable to access the waker cell (due to the registering thread holding the
// lock). To handle this, the registering thread removes the waker it just set
// from the cell and calls `wake` on it. This call to wake represents the
// attempt to wake by the other thread (that set the `WAKING` bit). The
// state is then transitioned from `REGISTERING | WAKING` back to `WAITING`.
// This transition must succeed because, at this point, the state cannot be
// transitioned by another thread.
//
// # Waking
//
// On a call to `wake`, an attempt to transition the state from `WAITING` to
// `WAKING` is made. On success, the caller obtains a lock on the waker cell.
//
// If the lock is obtained, then the thread takes ownership of the current value
// in the waker cell, and calls `wake` on it. The state is then transitioned
// back to `WAITING`. This transition must succeed as, at this point, the state
// cannot be transitioned by another thread.
//
// If the thread is unable to obtain the lock, the `WAKING` bit is still set.
// This is because it has either been set by the current thread but the previous
// value included the `REGISTERING` bit **or** a concurrent thread is in the
// `WAKING` critical section. Either way, no action must be taken.
//
// If the current thread is the only concurrent call to `wake` and another
// thread is in the `register` critical section, when the other thread **exits**
// the `register` critical section, it will observe the `WAKING` bit and
// handle the waker itself.
//
// If another thread is in the `waker` critical section, then it will handle
// waking the caller task.
//
// # A potential race (is safely handled).
//
// Imagine the following situation:
//
// * Thread A obtains the `wake` lock and wakes a task.
//
// * Before thread A releases the `wake` lock, the woken task is scheduled.
//
// * Thread B attempts to wake the task. In theory this should result in the
// task being woken, but it cannot because thread A still holds the wake
// lock.
//
// This case is handled by requiring users of `AtomicWaker` to call `register`
// **before** attempting to observe the application state change that resulted
// in the task being woken. The wakers also change the application state
// before calling wake.
//
// Because of this, the task will do one of two things.
//
// 1) Observe the application state change that Thread B is waking on. In
// this case, it is OK for Thread B's wake to be lost.
//
// 2) Call register before attempting to observe the application state. Since
// Thread A still holds the `wake` lock, the call to `register` will result
// in the task waking itself and get scheduled again.
/// Idle state.
const WAITING: usize = 0;
/// A new waker value is being registered with the `AtomicWaker` cell.
const REGISTERING: usize = 0b01;
/// The task currently registered with the `AtomicWaker` cell is being woken.
const WAKING: usize = 0b10;
impl AtomicWaker {
/// Create an `AtomicWaker`
pub(crate) fn new() -> AtomicWaker {
AtomicWaker {
state: AtomicUsize::new(WAITING),
waker: UnsafeCell::new(None),
}
}
/*
/// Registers the current waker to be notified on calls to `wake`.
pub(crate) fn register(&self, waker: Waker) {
self.do_register(waker);
}
*/
/// Registers the provided waker to be notified on calls to `wake`.
///
/// The new waker will take place of any previous wakers that were registered
/// by previous calls to `register`. Any calls to `wake` that happen after
/// a call to `register` (as defined by the memory ordering rules), will
/// wake the `register` caller's task.
///
/// It is safe to call `register` with multiple other threads concurrently
/// calling `wake`. This will result in the `register` caller's current
/// task being woken once.
///
/// This function is safe to call concurrently, but this is generally a bad
/// idea. Concurrent calls to `register` will attempt to register different
/// tasks to be woken. One of the callers will win and have its task set,
/// but there is no guarantee as to which caller will succeed.
pub(crate) fn register_by_ref(&self, waker: &Waker) {
self.do_register(waker);
}
fn do_register<W>(&self, waker: W)
where
W: WakerRef,
{
fn catch_unwind<F: FnOnce() -> R, R>(f: F) -> std::thread::Result<R> {
std::panic::catch_unwind(AssertUnwindSafe(f))
}
match self
.state
.compare_exchange(WAITING, REGISTERING, Acquire, Acquire)
.unwrap_or_else(|x| x)
{
WAITING => {
unsafe {
// If `into_waker` panics (because it's code outside of
// AtomicWaker) we need to prime a guard that is called on
// unwind to restore the waker to a WAITING state. Otherwise
// any future calls to register will incorrectly be stuck
// believing it's being updated by someone else.
let new_waker_or_panic = catch_unwind(move || waker.into_waker());
// Set the field to contain the new waker, or if
// `into_waker` panicked, leave the old value.
let mut maybe_panic = None;
let mut old_waker = None;
match new_waker_or_panic {
Ok(new_waker) => {
old_waker = self.waker.with_mut(|t| (*t).take());
self.waker.with_mut(|t| *t = Some(new_waker));
}
Err(panic) => maybe_panic = Some(panic),
}
// Release the lock. If the state transitioned to include
// the `WAKING` bit, this means that a wake has been
// called concurrently, so we have to remove the waker and
// wake it.`
//
// Start by assuming that the state is `REGISTERING` as this
// is what we jut set it to.
let res = self
.state
.compare_exchange(REGISTERING, WAITING, AcqRel, Acquire);
match res {
Ok(_) => {
// We don't want to give the caller the panic if it
// was someone else who put in that waker.
let _ = catch_unwind(move || {
drop(old_waker);
});
}
Err(actual) => {
// This branch can only be reached if a
// concurrent thread called `wake`. In this
// case, `actual` **must** be `REGISTERING |
// WAKING`.
debug_assert_eq!(actual, REGISTERING | WAKING);
// Take the waker to wake once the atomic operation has
// completed.
let mut waker = self.waker.with_mut(|t| (*t).take());
// Just swap, because no one could change state
// while state == `Registering | `Waking`
self.state.swap(WAITING, AcqRel);
// If `into_waker` panicked, then the waker in the
// waker slot is actually the old waker.
if maybe_panic.is_some() {
old_waker = waker.take();
}
// We don't want to give the caller the panic if it
// was someone else who put in that waker.
if let Some(old_waker) = old_waker {
let _ = catch_unwind(move || {
old_waker.wake();
});
}
// The atomic swap was complete, now wake the waker
// and return.
//
// If this panics, we end up in a consumed state and
// return the panic to the caller.
if let Some(waker) = waker {
debug_assert!(maybe_panic.is_none());
waker.wake();
}
}
}
if let Some(panic) = maybe_panic {
// If `into_waker` panicked, return the panic to the caller.
resume_unwind(panic);
}
}
}
WAKING => {
// Currently in the process of waking the task, i.e.,
// `wake` is currently being called on the old waker.
// So, we call wake on the new waker.
//
// If this panics, someone else is responsible for restoring the
// state of the waker.
waker.wake();
// This is equivalent to a spin lock, so use a spin hint.
hint::spin_loop();
}
state => {
// In this case, a concurrent thread is holding the
// "registering" lock. This probably indicates a bug in the
// caller's code as racing to call `register` doesn't make much
// sense.
//
// We just want to maintain memory safety. It is ok to drop the
// call to `register`.
debug_assert!(state == REGISTERING || state == REGISTERING | WAKING);
}
}
}
/// Wakes the task that last called `register`.
///
/// If `register` has not been called yet, then this does nothing.
pub(crate) fn wake(&self) {
if let Some(waker) = self.take_waker() {
// If wake panics, we've consumed the waker which is a legitimate
// outcome.
waker.wake();
}
}
/// Attempts to take the `Waker` value out of the `AtomicWaker` with the
/// intention that the caller will wake the task later.
pub(crate) fn take_waker(&self) -> Option<Waker> {
// AcqRel ordering is used in order to acquire the value of the `waker`
// cell as well as to establish a `release` ordering with whatever
// memory the `AtomicWaker` is associated with.
match self.state.fetch_or(WAKING, AcqRel) {
WAITING => {
// SAFETY: the waking lock has been acquired.
let waker = unsafe { self.waker.with_mut(|t| (*t).take()) };
// Release the lock.
let old_state = self.state.swap(WAITING, Release);
debug_assert!(old_state == WAKING);
waker
}
state => {
// There is a concurrent thread currently updating the
// associated waker.
//
// Nothing more to do as the `WAKING` bit has been set. It
// doesn't matter if there are concurrent registering threads or
// not.
//
debug_assert!(
state == REGISTERING || state == REGISTERING | WAKING || state == WAKING
);
None
}
}
}
}
impl Default for AtomicWaker {
fn default() -> Self {
AtomicWaker::new()
}
}
impl fmt::Debug for AtomicWaker {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "AtomicWaker")
}
}
unsafe impl Send for AtomicWaker {}
unsafe impl Sync for AtomicWaker {}
trait WakerRef {
fn wake(self);
fn into_waker(self) -> Waker;
}
impl WakerRef for Waker {
fn wake(self) {
self.wake();
}
fn into_waker(self) -> Waker {
self
}
}
impl WakerRef for &Waker {
fn wake(self) {
self.wake_by_ref();
}
fn into_waker(self) -> Waker {
self.clone()
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/src/sync/task/mod.rs | tokio/src/sync/task/mod.rs | //! Thread-safe task notification primitives.
mod atomic_waker;
pub(crate) use self::atomic_waker::AtomicWaker;
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/tcp_shutdown.rs | tokio/tests/tcp_shutdown.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi"), not(miri)))] // Wasi doesn't support bind
// No `socket` on miri.
use std::time::Duration;
use tokio::io::{self, AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::oneshot::channel;
use tokio_test::assert_ok;
#[tokio::test]
async fn shutdown() {
let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(srv.local_addr());
let handle = tokio::spawn(async move {
let mut stream = assert_ok!(TcpStream::connect(&addr).await);
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
let mut buf = [0u8; 1];
let n = assert_ok!(stream.read(&mut buf).await);
assert_eq!(n, 0);
});
let (mut stream, _) = assert_ok!(srv.accept().await);
let (mut rd, mut wr) = stream.split();
let n = assert_ok!(io::copy(&mut rd, &mut wr).await);
assert_eq!(n, 0);
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
handle.await.unwrap()
}
#[tokio::test]
#[expect(deprecated)] // set_linger is deprecated
async fn shutdown_after_tcp_reset() {
let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(srv.local_addr());
let (connected_tx, connected_rx) = channel();
let (dropped_tx, dropped_rx) = channel();
let handle = tokio::spawn(async move {
let mut stream = assert_ok!(TcpStream::connect(&addr).await);
connected_tx.send(()).unwrap();
dropped_rx.await.unwrap();
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
});
let (stream, _) = assert_ok!(srv.accept().await);
// By setting linger to 0 we will trigger a TCP reset
stream.set_linger(Some(Duration::new(0, 0))).unwrap();
connected_rx.await.unwrap();
drop(stream);
dropped_tx.send(()).unwrap();
handle.await.unwrap();
}
#[tokio::test]
async fn shutdown_multiple_calls() {
let srv = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(srv.local_addr());
let (connected_tx, connected_rx) = channel();
let handle = tokio::spawn(async move {
let mut stream = assert_ok!(TcpStream::connect(&addr).await);
connected_tx.send(()).unwrap();
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
});
let (mut stream, _) = assert_ok!(srv.accept().await);
connected_rx.await.unwrap();
assert_ok!(AsyncWriteExt::shutdown(&mut stream).await);
handle.await.unwrap();
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_write.rs | tokio/tests/io_write.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_test::assert_ok;
use bytes::BytesMut;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write() {
struct Wr {
buf: BytesMut,
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
assert_eq!(self.cnt, 0);
self.buf.extend(&buf[0..4]);
Ok(4).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {
buf: BytesMut::with_capacity(64),
cnt: 0,
};
let n = assert_ok!(wr.write(b"hello world").await);
assert_eq!(n, 4);
assert_eq!(wr.buf, b"hell"[..]);
}
#[tokio::test]
async fn write_cursor() {
use std::io::Cursor;
let mut wr = Cursor::new(Vec::new());
let n = assert_ok!(wr.write(b"hello world").await);
assert_eq!(n, 11);
assert_eq!(wr.get_ref().as_slice(), &b"hello world"[..]);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/time_interval.rs | tokio/tests/time_interval.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::{Stream, StreamExt};
use tokio::time::{self, Duration, Instant, Interval, MissedTickBehavior};
use tokio_test::{assert_pending, assert_ready, assert_ready_eq, task};
// Takes the `Interval` task, `start` variable, and optional time deltas
// For each time delta, it polls the `Interval` and asserts that the result is
// equal to `start` + the specific time delta. Then it asserts that the
// `Interval` is pending.
macro_rules! check_interval_poll {
($i:ident, $start:ident, $($delta:expr),*$(,)?) => {
$(
assert_ready_eq!(poll_next(&mut $i), $start + ms($delta));
)*
assert_pending!(poll_next(&mut $i));
};
($i:ident, $start:ident) => {
check_interval_poll!($i, $start,);
};
}
#[tokio::test]
#[should_panic]
async fn interval_zero_duration() {
let _ = time::interval_at(Instant::now(), ms(0));
}
// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 |
// Actual ticks: | work -----| delay | work | work | work -| work -----|
// Poll behavior: | | | | | | | |
// | | | | | | | |
// Ready(s) | | Ready(s + 2p) | | | |
// Pending | Ready(s + 3p) | | |
// Ready(s + p) Ready(s + 4p) | |
// Ready(s + 5p) |
// Ready(s + 6p)
#[tokio::test(start_paused = true)]
async fn burst() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(650)).await;
check_interval_poll!(i, start, 600, 900);
time::advance(ms(200)).await;
check_interval_poll!(i, start);
time::advance(ms(100)).await;
check_interval_poll!(i, start, 1200);
time::advance(ms(250)).await;
check_interval_poll!(i, start, 1500);
time::advance(ms(300)).await;
check_interval_poll!(i, start, 1800);
}
// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 |
// Actual ticks: | work -----| delay | work -----| work -----| work -----|
// Poll behavior: | | | | | | | |
// | | | | | | | |
// Ready(s) | | Ready(s + 2p) | | | |
// Pending | Pending | | |
// Ready(s + p) Ready(s + 2p + d) | |
// Ready(s + 3p + d) |
// Ready(s + 4p + d)
#[tokio::test(start_paused = true)]
async fn delay() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
i.set_missed_tick_behavior(MissedTickBehavior::Delay);
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(650)).await;
check_interval_poll!(i, start, 600);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
// We have to add one here for the same reason as is above.
// Because `Interval` has reset its timer according to `Instant::now()`,
// we have to go forward 1 more millisecond than is expected so that the
// runtime realizes that it's time to resolve the timer.
time::advance(ms(201)).await;
// We add one because when using the `Delay` behavior, `Interval`
// adds the `period` from `Instant::now()`, which will always be off by one
// because we have to advance time by 1 (see above).
check_interval_poll!(i, start, 1251);
time::advance(ms(300)).await;
// Again, we add one.
check_interval_poll!(i, start, 1551);
time::advance(ms(300)).await;
check_interval_poll!(i, start, 1851);
}
// Expected ticks: | 1 | 2 | 3 | 4 | 5 | 6 |
// Actual ticks: | work -----| delay | work ---| work -----| work -----|
// Poll behavior: | | | | | | |
// | | | | | | |
// Ready(s) | | Ready(s + 2p) | | |
// Pending | Ready(s + 4p) | |
// Ready(s + p) Ready(s + 5p) |
// Ready(s + 6p)
#[tokio::test(start_paused = true)]
async fn skip() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
i.set_missed_tick_behavior(MissedTickBehavior::Skip);
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(650)).await;
check_interval_poll!(i, start, 600);
time::advance(ms(250)).await;
check_interval_poll!(i, start, 1200);
time::advance(ms(300)).await;
check_interval_poll!(i, start, 1500);
time::advance(ms(300)).await;
check_interval_poll!(i, start, 1800);
}
#[tokio::test(start_paused = true)]
async fn reset() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
i.reset();
time::advance(ms(250)).await;
check_interval_poll!(i, start);
time::advance(ms(50)).await;
// We add one because when using `reset` method, `Interval` adds the
// `period` from `Instant::now()`, which will always be off by one
check_interval_poll!(i, start, 701);
time::advance(ms(300)).await;
check_interval_poll!(i, start, 1001);
}
#[tokio::test(start_paused = true)]
async fn reset_immediately() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
i.reset_immediately();
// We add one because when using `reset` method, `Interval` adds the
// `period` from `Instant::now()`, which will always be off by one
check_interval_poll!(i, start, 401);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 701);
}
#[tokio::test(start_paused = true)]
async fn reset_after() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
i.reset_after(Duration::from_millis(20));
// We add one because when using `reset` method, `Interval` adds the
// `period` from `Instant::now()`, which will always be off by one
time::advance(ms(20)).await;
check_interval_poll!(i, start, 421);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 721);
}
#[tokio::test(start_paused = true)]
async fn reset_at() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
i.reset_at(Instant::now() + Duration::from_millis(40));
// We add one because when using `reset` method, `Interval` adds the
// `period` from `Instant::now()`, which will always be off by one
time::advance(ms(40)).await;
check_interval_poll!(i, start, 441);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 741);
}
#[tokio::test(start_paused = true)]
async fn reset_at_bigger_than_interval() {
let start = Instant::now();
// This is necessary because the timer is only so granular, and in order for
// all our ticks to resolve, the time needs to be 1ms ahead of what we
// expect, so that the runtime will see that it is time to resolve the timer
time::advance(ms(1)).await;
let mut i = task::spawn(time::interval_at(start, ms(300)));
check_interval_poll!(i, start, 0);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
time::advance(ms(200)).await;
check_interval_poll!(i, start, 300);
time::advance(ms(100)).await;
check_interval_poll!(i, start);
i.reset_at(Instant::now() + Duration::from_millis(1000));
// Validate the interval does not tick until 1000ms have passed
time::advance(ms(300)).await;
check_interval_poll!(i, start);
time::advance(ms(300)).await;
check_interval_poll!(i, start);
time::advance(ms(300)).await;
check_interval_poll!(i, start);
// We add one because when using `reset` method, `Interval` adds the
// `period` from `Instant::now()`, which will always be off by one
time::advance(ms(100)).await;
check_interval_poll!(i, start, 1401);
time::advance(ms(300)).await;
check_interval_poll!(i, start, 1701);
}
fn poll_next(interval: &mut task::Spawn<time::Interval>) -> Poll<Instant> {
interval.enter(|cx, mut interval| interval.poll_tick(cx))
}
fn ms(n: u64) -> Duration {
Duration::from_millis(n)
}
/// Helper struct to test the [tokio::time::Interval::poll_tick()] method.
///
/// `poll_tick()` should register the waker in the context only if it returns
/// `Poll::Pending`, not when returning `Poll::Ready`. This struct contains an
/// interval timer and counts up on every tick when used as stream. When the
/// counter is a multiple of four, it yields the current counter value.
/// Depending on the value for `wake_on_pending`, it will reschedule itself when
/// it returns `Poll::Pending` or not. When used with `wake_on_pending=false`,
/// we expect that the stream stalls because the timer will **not** reschedule
/// the next wake-up itself once it returned `Poll::Ready`.
struct IntervalStreamer {
counter: u32,
timer: Interval,
wake_on_pending: bool,
}
impl Stream for IntervalStreamer {
type Item = u32;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = Pin::into_inner(self);
if this.counter > 12 {
return Poll::Ready(None);
}
match this.timer.poll_tick(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => {
this.counter += 1;
if this.counter % 4 == 0 {
Poll::Ready(Some(this.counter))
} else {
if this.wake_on_pending {
// Schedule this task for wake-up
cx.waker().wake_by_ref();
}
Poll::Pending
}
}
}
}
}
#[tokio::test(start_paused = true)]
async fn stream_with_interval_poll_tick_self_waking() {
let stream = IntervalStreamer {
counter: 0,
timer: tokio::time::interval(tokio::time::Duration::from_millis(10)),
wake_on_pending: true,
};
let (res_tx, mut res_rx) = tokio::sync::mpsc::channel(12);
// Wrap task in timeout so that it will finish eventually even if the stream
// stalls.
tokio::spawn(tokio::time::timeout(
tokio::time::Duration::from_millis(150),
async move {
tokio::pin!(stream);
while let Some(item) = stream.next().await {
res_tx.send(item).await.ok();
}
},
));
let mut items = Vec::with_capacity(3);
while let Some(result) = res_rx.recv().await {
items.push(result);
}
// We expect the stream to yield normally and thus three items.
assert_eq!(items, vec![4, 8, 12]);
}
#[tokio::test(start_paused = true)]
async fn stream_with_interval_poll_tick_no_waking() {
let stream = IntervalStreamer {
counter: 0,
timer: tokio::time::interval(tokio::time::Duration::from_millis(10)),
wake_on_pending: false,
};
let (res_tx, mut res_rx) = tokio::sync::mpsc::channel(12);
// Wrap task in timeout so that it will finish eventually even if the stream
// stalls.
tokio::spawn(tokio::time::timeout(
tokio::time::Duration::from_millis(150),
async move {
tokio::pin!(stream);
while let Some(item) = stream.next().await {
res_tx.send(item).await.ok();
}
},
));
let mut items = Vec::with_capacity(0);
while let Some(result) = res_rx.recv().await {
items.push(result);
}
// We expect the stream to stall because it does not reschedule itself on
// `Poll::Pending` and neither does [tokio::time::Interval] reschedule the
// task when returning `Poll::Ready`.
assert_eq!(items, vec![]);
}
#[tokio::test(start_paused = true)]
async fn interval_doesnt_panic_max_duration_when_polling() {
let mut timer = task::spawn(time::interval(Duration::MAX));
assert_ready!(timer.enter(|cx, mut timer| timer.poll_tick(cx)));
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_mutex.rs | tokio/tests/sync_mutex.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(all(target_family = "wasm", not(target_os = "wasi"))))]
use tokio::test as maybe_tokio_test;
use tokio::sync::Mutex;
use tokio_test::task::spawn;
use tokio_test::{assert_pending, assert_ready};
use std::sync::Arc;
#[test]
fn straight_execution() {
let l = Mutex::new(100);
{
let mut t = spawn(l.lock());
let mut g = assert_ready!(t.poll());
assert_eq!(&*g, &100);
*g = 99;
}
{
let mut t = spawn(l.lock());
let mut g = assert_ready!(t.poll());
assert_eq!(&*g, &99);
*g = 98;
}
{
let mut t = spawn(l.lock());
let g = assert_ready!(t.poll());
assert_eq!(&*g, &98);
}
}
#[test]
fn readiness() {
let l1 = Arc::new(Mutex::new(100));
let l2 = Arc::clone(&l1);
let mut t1 = spawn(l1.lock());
let mut t2 = spawn(l2.lock());
let g = assert_ready!(t1.poll());
// We can't now acquire the lease since it's already held in g
assert_pending!(t2.poll());
// But once g unlocks, we can acquire it
drop(g);
assert!(t2.is_woken());
let _t2 = assert_ready!(t2.poll());
}
/*
#[test]
#[ignore]
fn lock() {
let mut lock = Mutex::new(false);
let mut lock2 = lock.clone();
std::thread::spawn(move || {
let l = lock2.lock();
pin_mut!(l);
let mut task = MockTask::new();
let mut g = assert_ready!(task.poll(&mut l));
std::thread::sleep(std::time::Duration::from_millis(500));
*g = true;
drop(g);
});
std::thread::sleep(std::time::Duration::from_millis(50));
let mut task = MockTask::new();
let l = lock.lock();
pin_mut!(l);
assert_pending!(task.poll(&mut l));
std::thread::sleep(std::time::Duration::from_millis(500));
assert!(task.is_woken());
let result = assert_ready!(task.poll(&mut l));
assert!(*result);
}
*/
/// Ensure a mutex is unlocked if a future holding the lock
/// is aborted prematurely.
#[tokio::test]
#[cfg(feature = "full")]
async fn aborted_future_1() {
use std::time::Duration;
use tokio::time::{interval, timeout};
let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
let iv = interval(Duration::from_millis(1000));
tokio::pin!(iv);
let _g = m2.lock().await;
iv.as_mut().tick().await;
iv.as_mut().tick().await;
})
.await
.unwrap_err();
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
let _g = m1.lock().await;
})
.await
.expect("Mutex is locked");
}
/// This test is similar to `aborted_future_1` but this time the
/// aborted future is waiting for the lock.
#[tokio::test]
#[cfg(feature = "full")]
async fn aborted_future_2() {
use std::time::Duration;
use tokio::time::timeout;
let m1: Arc<Mutex<usize>> = Arc::new(Mutex::new(0));
{
// Lock mutex
let _lock = m1.lock().await;
{
let m2 = m1.clone();
// Try to lock mutex in a future that is aborted prematurely
timeout(Duration::from_millis(1u64), async move {
let _g = m2.lock().await;
})
.await
.unwrap_err();
}
}
// This should succeed as there is no lock left for the mutex.
timeout(Duration::from_millis(1u64), async move {
let _g = m1.lock().await;
})
.await
.expect("Mutex is locked");
}
#[test]
fn try_lock() {
let m: Mutex<usize> = Mutex::new(0);
{
let g1 = m.try_lock();
assert!(g1.is_ok());
let g2 = m.try_lock();
assert!(g2.is_err());
}
let g3 = m.try_lock();
assert!(g3.is_ok());
}
#[maybe_tokio_test]
async fn debug_format() {
let s = "debug";
let m = Mutex::new(s.to_string());
assert_eq!(format!("{s:?}"), format!("{:?}", m.lock().await));
}
#[maybe_tokio_test]
async fn mutex_debug() {
let s = "data";
let m = Mutex::new(s.to_string());
assert_eq!(format!("{m:?}"), r#"Mutex { data: "data" }"#);
let _guard = m.lock().await;
assert_eq!(format!("{m:?}"), r#"Mutex { data: <locked> }"#)
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/rt_common.rs | tokio/tests/rt_common.rs | #![allow(clippy::needless_range_loop)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(not(miri))]
// Tests to run on both current-thread & multi-thread runtime variants.
macro_rules! rt_test {
($($t:tt)*) => {
mod current_thread_scheduler {
$($t)*
#[cfg(not(target_os="wasi"))]
const NUM_WORKERS: usize = 1;
fn rt() -> Arc<Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.into()
}
}
#[cfg(not(target_os = "wasi"))] // Wasi doesn't support threads
mod threaded_scheduler_4_threads {
$($t)*
const NUM_WORKERS: usize = 4;
fn rt() -> Arc<Runtime> {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(4)
.enable_all()
.build()
.unwrap()
.into()
}
}
#[cfg(not(target_os = "wasi"))] // Wasi doesn't support threads
mod threaded_scheduler_1_thread {
$($t)*
const NUM_WORKERS: usize = 1;
fn rt() -> Arc<Runtime> {
tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap()
.into()
}
}
}
}
#[test]
fn send_sync_bound() {
use tokio::runtime::Runtime;
fn is_send<T: Send + Sync>() {}
is_send::<Runtime>();
}
rt_test! {
#[cfg(not(target_os="wasi"))]
use tokio::net::{TcpListener, TcpStream};
#[cfg(not(target_os="wasi"))]
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::runtime::Runtime;
use tokio::sync::oneshot;
use tokio::{task, time};
#[cfg(not(target_os="wasi"))]
use tokio_test::assert_err;
use tokio_test::assert_ok;
use std::future::{poll_fn, Future};
use std::pin::Pin;
#[cfg(not(target_os="wasi"))]
use std::sync::mpsc;
use std::sync::Arc;
use std::task::{Context, Poll};
#[cfg(not(target_os="wasi"))]
use std::thread;
use std::time::{Duration, Instant};
#[test]
fn block_on_sync() {
let rt = rt();
let mut win = false;
rt.block_on(async {
win = true;
});
assert!(win);
}
#[cfg(not(target_os="wasi"))]
#[test]
fn block_on_async() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
thread::spawn(move || {
thread::sleep(Duration::from_millis(50));
tx.send("ZOMG").unwrap();
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
}
#[test]
fn spawn_one_bg() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
});
assert_ok!(rx.await)
});
assert_eq!(out, "ZOMG");
}
#[test]
fn spawn_one_join() {
let rt = rt();
let out = rt.block_on(async {
let (tx, rx) = oneshot::channel();
let handle = tokio::spawn(async move {
tx.send("ZOMG").unwrap();
"DONE"
});
let msg = assert_ok!(rx.await);
let out = assert_ok!(handle.await);
assert_eq!(out, "DONE");
msg
});
assert_eq!(out, "ZOMG");
}
#[test]
fn spawn_two() {
let rt = rt();
let out = rt.block_on(async {
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
tokio::spawn(async move {
assert_ok!(tx1.send("ZOMG"));
});
tokio::spawn(async move {
let msg = assert_ok!(rx1.await);
assert_ok!(tx2.send(msg));
});
assert_ok!(rx2.await)
});
assert_eq!(out, "ZOMG");
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn spawn_many_from_block_on() {
use tokio::sync::mpsc;
const ITER: usize = 200;
let rt = rt();
let out = rt.block_on(async {
let (done_tx, mut done_rx) = mpsc::unbounded_channel();
let mut txs = (0..ITER)
.map(|i| {
let (tx, rx) = oneshot::channel();
let done_tx = done_tx.clone();
tokio::spawn(async move {
let msg = assert_ok!(rx.await);
assert_eq!(i, msg);
assert_ok!(done_tx.send(msg));
});
tx
})
.collect::<Vec<_>>();
drop(done_tx);
thread::spawn(move || {
for (i, tx) in txs.drain(..).enumerate() {
assert_ok!(tx.send(i));
}
});
let mut out = vec![];
while let Some(i) = done_rx.recv().await {
out.push(i);
}
out.sort_unstable();
out
});
assert_eq!(ITER, out.len());
for i in 0..ITER {
assert_eq!(i, out[i]);
}
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn spawn_many_from_task() {
use tokio::sync::mpsc;
const ITER: usize = 500;
let rt = rt();
let out = rt.block_on(async {
tokio::spawn(async move {
let (done_tx, mut done_rx) = mpsc::unbounded_channel();
let mut txs = (0..ITER)
.map(|i| {
let (tx, rx) = oneshot::channel();
let done_tx = done_tx.clone();
tokio::spawn(async move {
let msg = assert_ok!(rx.await);
assert_eq!(i, msg);
assert_ok!(done_tx.send(msg));
});
tx
})
.collect::<Vec<_>>();
drop(done_tx);
thread::spawn(move || {
for (i, tx) in txs.drain(..).enumerate() {
assert_ok!(tx.send(i));
}
});
let mut out = vec![];
while let Some(i) = done_rx.recv().await {
out.push(i);
}
out.sort_unstable();
out
}).await.unwrap()
});
assert_eq!(ITER, out.len());
for i in 0..ITER {
assert_eq!(i, out[i]);
}
}
#[test]
fn spawn_one_from_block_on_called_on_handle() {
let rt = rt();
let (tx, rx) = oneshot::channel();
#[allow(clippy::async_yields_async)]
let handle = rt.handle().block_on(async {
tokio::spawn(async move {
tx.send("ZOMG").unwrap();
"DONE"
})
});
let out = rt.block_on(async {
let msg = assert_ok!(rx.await);
let out = assert_ok!(handle.await);
assert_eq!(out, "DONE");
msg
});
assert_eq!(out, "ZOMG");
}
#[test]
fn spawn_await_chain() {
let rt = rt();
let out = rt.block_on(async {
assert_ok!(tokio::spawn(async {
assert_ok!(tokio::spawn(async {
"hello"
}).await)
}).await)
});
assert_eq!(out, "hello");
}
#[test]
fn outstanding_tasks_dropped() {
let rt = rt();
let cnt = Arc::new(());
rt.block_on(async {
let cnt = cnt.clone();
tokio::spawn(poll_fn(move |_| {
assert_eq!(2, Arc::strong_count(&cnt));
Poll::<()>::Pending
}));
});
assert_eq!(2, Arc::strong_count(&cnt));
drop(rt);
assert_eq!(1, Arc::strong_count(&cnt));
}
#[test]
#[should_panic]
fn nested_rt() {
let rt1 = rt();
let rt2 = rt();
rt1.block_on(async { rt2.block_on(async { "hello" }) });
}
#[test]
fn create_rt_in_block_on() {
let rt1 = rt();
let rt2 = rt1.block_on(async { rt() });
let out = rt2.block_on(async { "ZOMG" });
assert_eq!(out, "ZOMG");
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn complete_block_on_under_load() {
let rt = rt();
rt.block_on(async {
let (tx, rx) = oneshot::channel();
// Spin hard
tokio::spawn(async {
loop {
yield_once().await;
}
});
thread::spawn(move || {
thread::sleep(Duration::from_millis(50));
assert_ok!(tx.send(()));
});
assert_ok!(rx.await);
});
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn complete_task_under_load() {
let rt = rt();
rt.block_on(async {
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
// Spin hard
tokio::spawn(async {
loop {
yield_once().await;
}
});
thread::spawn(move || {
thread::sleep(Duration::from_millis(50));
assert_ok!(tx1.send(()));
});
tokio::spawn(async move {
assert_ok!(rx1.await);
assert_ok!(tx2.send(()));
});
assert_ok!(rx2.await);
});
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn spawn_from_other_thread_idle() {
let rt = rt();
let handle = rt.clone();
let (tx, rx) = oneshot::channel();
thread::spawn(move || {
thread::sleep(Duration::from_millis(50));
handle.spawn(async move {
assert_ok!(tx.send(()));
});
});
rt.block_on(async move {
assert_ok!(rx.await);
});
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn spawn_from_other_thread_under_load() {
let rt = rt();
let handle = rt.clone();
let (tx, rx) = oneshot::channel();
thread::spawn(move || {
handle.spawn(async move {
assert_ok!(tx.send(()));
});
});
rt.block_on(async move {
// Spin hard
tokio::spawn(async {
loop {
yield_once().await;
}
});
assert_ok!(rx.await);
});
}
#[test]
fn sleep_at_root() {
let rt = rt();
let now = Instant::now();
let dur = Duration::from_millis(50);
rt.block_on(async move {
time::sleep(dur).await;
});
assert!(now.elapsed() >= dur);
}
#[test]
fn sleep_in_spawn() {
let rt = rt();
let now = Instant::now();
let dur = Duration::from_millis(50);
rt.block_on(async move {
let (tx, rx) = oneshot::channel();
tokio::spawn(async move {
time::sleep(dur).await;
assert_ok!(tx.send(()));
});
assert_ok!(rx.await);
});
assert!(now.elapsed() >= dur);
}
#[cfg(not(target_os="wasi"))] // Wasi does not support bind
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[test]
fn block_on_socket() {
let rt = rt();
rt.block_on(async move {
let (tx, rx) = oneshot::channel();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
let _ = listener.accept().await;
tx.send(()).unwrap();
});
TcpStream::connect(&addr).await.unwrap();
rx.await.unwrap();
});
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn spawn_from_blocking() {
let rt = rt();
let out = rt.block_on(async move {
let inner = assert_ok!(tokio::task::spawn_blocking(|| {
tokio::spawn(async move { "hello" })
}).await);
assert_ok!(inner.await)
});
assert_eq!(out, "hello")
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn spawn_blocking_from_blocking() {
let rt = rt();
let out = rt.block_on(async move {
let inner = assert_ok!(tokio::task::spawn_blocking(|| {
tokio::task::spawn_blocking(|| "hello")
}).await);
assert_ok!(inner.await)
});
assert_eq!(out, "hello")
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn sleep_from_blocking() {
let rt = rt();
rt.block_on(async move {
assert_ok!(tokio::task::spawn_blocking(|| {
let now = std::time::Instant::now();
let dur = Duration::from_millis(1);
// use the futures' block_on fn to make sure we aren't setting
// any Tokio context
futures::executor::block_on(async {
tokio::time::sleep(dur).await;
});
assert!(now.elapsed() >= dur);
}).await);
});
}
#[cfg(not(target_os="wasi"))] // Wasi does not support bind
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[test]
fn socket_from_blocking() {
let rt = rt();
rt.block_on(async move {
let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(listener.local_addr());
let peer = tokio::task::spawn_blocking(move || {
// use the futures' block_on fn to make sure we aren't setting
// any Tokio context
futures::executor::block_on(async {
assert_ok!(TcpStream::connect(addr).await);
});
});
// Wait for the client to connect
let _ = assert_ok!(listener.accept().await);
assert_ok!(peer.await);
});
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn always_active_parker() {
// This test it to show that we will always have
// an active parker even if we call block_on concurrently
let rt = rt();
let rt2 = rt.clone();
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
let jh1 = thread::spawn(move || {
rt.block_on(async move {
rx2.await.unwrap();
time::sleep(Duration::from_millis(5)).await;
tx1.send(()).unwrap();
});
});
let jh2 = thread::spawn(move || {
rt2.block_on(async move {
tx2.send(()).unwrap();
time::sleep(Duration::from_millis(5)).await;
rx1.await.unwrap();
time::sleep(Duration::from_millis(5)).await;
});
});
jh1.join().unwrap();
jh2.join().unwrap();
}
#[test]
// IOCP requires setting the "max thread" concurrency value. The sane,
// default, is to set this to the number of cores. Threads that poll I/O
// become associated with the IOCP handle. Once those threads sleep for any
// reason (mutex), they yield their ownership.
//
// This test hits an edge case on windows where more threads than cores are
// created, none of those threads ever yield due to being at capacity, so
// IOCP gets "starved".
//
// For now, this is a very edge case that is probably not a real production
// concern. There also isn't a great/obvious solution to take. For now, the
// test is disabled.
#[cfg(not(windows))]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[cfg(not(target_os="wasi"))] // Wasi does not support bind or threads
fn io_driver_called_when_under_load() {
let rt = rt();
// Create a lot of constant load. The scheduler will always be busy.
for _ in 0..100 {
rt.spawn(async {
loop {
// Don't use Tokio's `yield_now()` to avoid special defer
// logic.
std::future::poll_fn::<(), _>(|cx| {
cx.waker().wake_by_ref();
std::task::Poll::Pending
}).await;
}
});
}
// Do some I/O work
rt.block_on(async {
let listener = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
let addr = assert_ok!(listener.local_addr());
let srv = tokio::spawn(async move {
let (mut stream, _) = assert_ok!(listener.accept().await);
assert_ok!(stream.write_all(b"hello world").await);
});
let cli = tokio::spawn(async move {
let mut stream = assert_ok!(TcpStream::connect(addr).await);
let mut dst = vec![0; 11];
assert_ok!(stream.read_exact(&mut dst).await);
assert_eq!(dst, b"hello world");
});
assert_ok!(srv.await);
assert_ok!(cli.await);
});
}
/// Tests that yielded tasks are not scheduled until **after** resource
/// drivers are polled.
///
/// The OS does not guarantee when I/O events are delivered, so there may be
/// more yields than anticipated. This makes the test slightly flaky. To
/// help avoid flakiness, we run the test 10 times and only fail it after
/// 10 failures in a row.
///
/// Note that if the test fails by panicking rather than by returning false,
/// then we fail it immediately. That kind of failure should not happen
/// spuriously.
#[test]
#[cfg(not(target_os="wasi"))]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn yield_defers_until_park() {
for _ in 0..10 {
if yield_defers_until_park_inner(false) {
// test passed
return;
}
// Wait a bit and run the test again.
std::thread::sleep(std::time::Duration::from_secs(2));
}
panic!("yield_defers_until_park is failing consistently");
}
/// Same as above, but with cooperative scheduling.
#[test]
#[cfg(not(target_os="wasi"))]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn coop_yield_defers_until_park() {
for _ in 0..10 {
if yield_defers_until_park_inner(true) {
// test passed
return;
}
// Wait a bit and run the test again.
std::thread::sleep(std::time::Duration::from_secs(2));
}
panic!("yield_defers_until_park is failing consistently");
}
/// Implementation of `yield_defers_until_park` test. Returns `true` if the
/// test passed.
#[cfg(not(target_os="wasi"))]
fn yield_defers_until_park_inner(use_coop: bool) -> bool {
use std::sync::atomic::{AtomicBool, Ordering::SeqCst};
use std::sync::Barrier;
const BUDGET: usize = 128;
let rt = rt();
let flag = Arc::new(AtomicBool::new(false));
let barrier = Arc::new(Barrier::new(NUM_WORKERS));
rt.block_on(async {
// Make sure other workers cannot steal tasks
#[allow(clippy::reversed_empty_ranges)]
for _ in 0..(NUM_WORKERS-1) {
let flag = flag.clone();
let barrier = barrier.clone();
tokio::spawn(async move {
barrier.wait();
while !flag.load(SeqCst) {
std::thread::sleep(std::time::Duration::from_millis(1));
}
});
}
barrier.wait();
let (fail_test, fail_test_recv) = oneshot::channel::<()>();
let flag_clone = flag.clone();
let jh = tokio::spawn(async move {
// Create a TCP listener
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::join!(
async {
// Done in a blocking manner intentionally.
let _socket = std::net::TcpStream::connect(addr).unwrap();
// Yield until connected
let mut cnt = 0;
while !flag_clone.load(SeqCst){
if use_coop {
// Consume a good chunk of budget, which should
// force at least one yield.
for _ in 0..BUDGET {
tokio::task::consume_budget().await;
}
} else {
tokio::task::yield_now().await;
}
cnt += 1;
if cnt >= 10 {
// yielded too many times; report failure and
// sleep forever so that the `fail_test` branch
// of the `select!` below triggers.
let _ = fail_test.send(());
futures::future::pending::<()>().await;
break;
}
}
},
async {
let _ = listener.accept().await.unwrap();
flag_clone.store(true, SeqCst);
}
);
});
// Wait until the spawned task completes or fails. If no message is
// sent on `fail_test`, then the test succeeds. Otherwise, it fails.
let success = fail_test_recv.await.is_err();
if success {
// Setting flag to true ensures that the tasks we spawned at
// the beginning of the test will exit.
// If we don't do this, the test will hang since the runtime waits
// for all spawned tasks to finish when dropping.
flag.store(true, SeqCst);
// Check for panics in spawned task.
jh.abort();
jh.await.unwrap();
}
success
})
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[test]
fn client_server_block_on() {
let rt = rt();
let (tx, rx) = mpsc::channel();
rt.block_on(async move { client_server(tx).await });
assert_ok!(rx.try_recv());
assert_err!(rx.try_recv());
}
#[cfg_attr(target_os = "wasi", ignore = "Wasi does not support threads or panic recovery")]
#[cfg(panic = "unwind")]
#[test]
fn panic_in_task() {
let rt = rt();
let (tx, rx) = oneshot::channel();
struct Boom(Option<oneshot::Sender<()>>);
impl Future for Boom {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
panic!();
}
}
impl Drop for Boom {
fn drop(&mut self) {
assert!(std::thread::panicking());
self.0.take().unwrap().send(()).unwrap();
}
}
rt.spawn(Boom(Some(tx)));
assert_ok!(rt.block_on(rx));
}
#[test]
#[should_panic]
#[cfg_attr(target_os = "wasi", ignore = "Wasi does not support panic recovery")]
fn panic_in_block_on() {
let rt = rt();
rt.block_on(async { panic!() });
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
async fn yield_once() {
let mut yielded = false;
poll_fn(|cx| {
if yielded {
Poll::Ready(())
} else {
yielded = true;
cx.waker().wake_by_ref();
Poll::Pending
}
})
.await
}
#[test]
fn enter_and_spawn() {
let rt = rt();
let handle = {
let _enter = rt.enter();
tokio::spawn(async {})
};
assert_ok!(rt.block_on(handle));
}
#[test]
fn eagerly_drops_futures_on_shutdown() {
use std::sync::mpsc;
struct Never {
drop_tx: mpsc::Sender<()>,
}
impl Future for Never {
type Output = ();
fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
Poll::Pending
}
}
impl Drop for Never {
fn drop(&mut self) {
self.drop_tx.send(()).unwrap();
}
}
let rt = rt();
let (drop_tx, drop_rx) = mpsc::channel();
let (run_tx, run_rx) = oneshot::channel();
rt.block_on(async move {
tokio::spawn(async move {
assert_ok!(run_tx.send(()));
Never { drop_tx }.await
});
assert_ok!(run_rx.await);
});
drop(rt);
assert_ok!(drop_rx.recv());
}
#[test]
fn wake_while_rt_is_dropping() {
use tokio::sync::Barrier;
struct OnDrop<F: FnMut()>(F);
impl<F: FnMut()> Drop for OnDrop<F> {
fn drop(&mut self) {
(self.0)()
}
}
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
let barrier = Arc::new(Barrier::new(3));
let barrier1 = barrier.clone();
let barrier2 = barrier.clone();
let rt = rt();
rt.spawn(async move {
let mut tx2 = Some(tx2);
let _d = OnDrop(move || {
let _ = tx2.take().unwrap().send(());
});
// Ensure a waker gets stored in oneshot 1.
let _ = tokio::join!(rx1, barrier1.wait());
});
rt.spawn(async move {
let mut tx1 = Some(tx1);
let _d = OnDrop(move || {
let _ = tx1.take().unwrap().send(());
});
// Ensure a waker gets stored in oneshot 2.
let _ = tokio::join!(rx2, barrier2.wait());
});
// Wait until every oneshot channel has been polled.
rt.block_on(barrier.wait());
// Drop the rt. Regardless of which task is dropped first, its destructor will wake the
// other task.
drop(rt);
}
#[cfg(not(target_os="wasi"))] // Wasi doesn't support UDP or bind()
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[test]
fn io_notify_while_shutting_down() {
use tokio::net::UdpSocket;
use std::sync::Arc;
for _ in 1..10 {
let runtime = rt();
runtime.block_on(async {
let socket = UdpSocket::bind("127.0.0.1:0").await.unwrap();
let addr = socket.local_addr().unwrap();
let send_half = Arc::new(socket);
let recv_half = send_half.clone();
tokio::spawn(async move {
let mut buf = [0];
loop {
recv_half.recv_from(&mut buf).await.unwrap();
std::thread::sleep(Duration::from_millis(2));
}
});
tokio::spawn(async move {
let buf = [0];
loop {
send_half.send_to(&buf, &addr).await.unwrap();
tokio::time::sleep(Duration::from_millis(1)).await;
}
});
tokio::time::sleep(Duration::from_millis(5)).await;
});
}
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn shutdown_timeout() {
let (tx, rx) = oneshot::channel();
let runtime = rt();
runtime.block_on(async move {
task::spawn_blocking(move || {
tx.send(()).unwrap();
thread::sleep(Duration::from_secs(10_000));
});
rx.await.unwrap();
});
Arc::try_unwrap(runtime).unwrap().shutdown_timeout(Duration::from_millis(100));
}
#[cfg(not(target_os="wasi"))] // Wasi does not support threads
#[test]
fn shutdown_timeout_0() {
let runtime = rt();
runtime.block_on(async move {
task::spawn_blocking(move || {
thread::sleep(Duration::from_secs(10_000));
});
});
let now = Instant::now();
Arc::try_unwrap(runtime).unwrap().shutdown_timeout(Duration::from_nanos(0));
assert!(now.elapsed().as_secs() < 1);
}
#[test]
fn shutdown_wakeup_time() {
let runtime = rt();
runtime.block_on(async move {
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
});
Arc::try_unwrap(runtime).unwrap().shutdown_timeout(Duration::from_secs(10_000));
}
// This test is currently ignored on Windows because of a
// rust-lang issue in thread local storage destructors.
// See https://github.com/rust-lang/rust/issues/74875
#[test]
#[cfg(not(windows))]
#[cfg_attr(target_os = "wasi", ignore = "Wasi does not support threads")]
fn runtime_in_thread_local() {
use std::cell::RefCell;
use std::thread;
thread_local!(
static R: RefCell<Option<Runtime>> = const { RefCell::new(None) };
);
thread::spawn(|| {
R.with(|cell| {
let rt = rt();
let rt = Arc::try_unwrap(rt).unwrap();
*cell.borrow_mut() = Some(rt);
});
let _rt = rt();
}).join().unwrap();
}
#[cfg(not(target_os="wasi"))] // Wasi does not support bind
async fn client_server(tx: mpsc::Sender<()>) {
let server = assert_ok!(TcpListener::bind("127.0.0.1:0").await);
// Get the assigned address
let addr = assert_ok!(server.local_addr());
// Spawn the server
tokio::spawn(async move {
// Accept a socket
let (mut socket, _) = server.accept().await.unwrap();
// Write some data
socket.write_all(b"hello").await.unwrap();
});
let mut client = TcpStream::connect(&addr).await.unwrap();
let mut buf = vec![];
client.read_to_end(&mut buf).await.unwrap();
assert_eq!(buf, b"hello");
tx.send(()).unwrap();
}
#[cfg(not(target_os = "wasi"))] // Wasi does not support bind
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[test]
fn local_set_block_on_socket() {
let rt = rt();
let local = task::LocalSet::new();
local.block_on(&rt, async move {
let (tx, rx) = oneshot::channel();
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
task::spawn_local(async move {
let _ = listener.accept().await;
tx.send(()).unwrap();
});
TcpStream::connect(&addr).await.unwrap();
rx.await.unwrap();
});
}
#[cfg(not(target_os = "wasi"))] // Wasi does not support bind
#[cfg_attr(miri, ignore)] // No `socket` in miri.
#[test]
fn local_set_client_server_block_on() {
let rt = rt();
let (tx, rx) = mpsc::channel();
let local = task::LocalSet::new();
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | true |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/time_rt.rs | tokio/tests/time_rt.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::runtime::Runtime;
use tokio::time::*;
use std::sync::mpsc;
fn rt_combinations() -> Vec<Runtime> {
let mut rts = vec![];
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
rts.push(rt);
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_all()
.build()
.unwrap();
rts.push(rt);
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(4)
.enable_all()
.build()
.unwrap();
rts.push(rt);
#[cfg(tokio_unstable)]
{
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.enable_alt_timer()
.enable_all()
.build()
.unwrap();
rts.push(rt);
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(4)
.enable_alt_timer()
.enable_all()
.build()
.unwrap();
rts.push(rt);
}
rts
}
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))] // Wasi doesn't support threads
#[test]
fn timer_with_threaded_runtime() {
use tokio::runtime::Runtime;
{
let rt = Runtime::new().unwrap();
let (tx, rx) = mpsc::channel();
rt.spawn(async move {
let when = Instant::now() + Duration::from_millis(10);
sleep_until(when).await;
assert!(Instant::now() >= when);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[cfg(tokio_unstable)]
{
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_alt_timer()
.build()
.unwrap();
let (tx, rx) = mpsc::channel();
rt.block_on(async move {
let when = Instant::now() + Duration::from_millis(10);
sleep_until(when).await;
assert!(Instant::now() >= when);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
}
#[test]
fn timer_with_current_thread_scheduler() {
use tokio::runtime::Builder;
let rt = Builder::new_current_thread().enable_all().build().unwrap();
let (tx, rx) = mpsc::channel();
rt.block_on(async move {
let when = Instant::now() + Duration::from_millis(10);
sleep_until(when).await;
assert!(Instant::now() >= when);
tx.send(()).unwrap();
});
rx.recv().unwrap();
}
#[test]
fn starving() {
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
struct Starve<T: Future<Output = ()> + Unpin>(T, u64);
impl<T: Future<Output = ()> + Unpin> Future for Starve<T> {
type Output = u64;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<u64> {
if Pin::new(&mut self.0).poll(cx).is_ready() {
return Poll::Ready(self.1);
}
self.1 += 1;
cx.waker().wake_by_ref();
Poll::Pending
}
}
for rt in rt_combinations() {
rt.block_on(async {
let when = Instant::now() + Duration::from_millis(10);
let starve = Starve(Box::pin(sleep_until(when)), 0);
starve.await;
assert!(Instant::now() >= when);
});
}
}
#[test]
fn timeout_value() {
use tokio::sync::oneshot;
for rt in rt_combinations() {
rt.block_on(async {
let (_tx, rx) = oneshot::channel::<()>();
let now = Instant::now();
let dur = Duration::from_millis(10);
let res = timeout(dur, rx).await;
assert!(res.is_err());
assert!(Instant::now() >= now + dur);
});
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/process_smoke.rs | tokio/tests/process_smoke.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi"), not(miri)))] // Wasi/Miri cannot run system commands
use tokio::process::Command;
use tokio_test::assert_ok;
#[tokio::test]
async fn simple() {
let mut cmd;
if cfg!(windows) {
cmd = Command::new("cmd");
cmd.arg("/c");
} else {
cmd = Command::new("sh");
cmd.arg("-c");
}
let mut child = cmd.arg("exit 2").spawn().unwrap();
let id = child.id().expect("missing id");
assert!(id > 0);
let status = assert_ok!(child.wait().await);
assert_eq!(status.code(), Some(2));
// test that the `.wait()` method is fused just like the stdlib
let status = assert_ok!(child.wait().await);
assert_eq!(status.code(), Some(2));
// Can't get id after process has exited
assert_eq!(child.id(), None);
drop(child.kill());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/_require_full.rs | tokio/tests/_require_full.rs | #[cfg(not(any(feature = "full", target_family = "wasm")))]
compile_error!("run main Tokio tests with `--features full`");
// CI sets `--cfg tokio_no_parking_lot` when trying to run tests with
// `parking_lot` disabled. This check prevents "silent failure" if `parking_lot`
// accidentally gets enabled.
#[cfg(all(tokio_no_parking_lot, feature = "parking_lot"))]
compile_error!("parking_lot feature enabled when it should not be");
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/process_arg0.rs | tokio/tests/process_arg0.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", unix, not(miri)))]
use tokio::process::Command;
#[tokio::test]
async fn arg0() {
let mut cmd = Command::new("sh");
cmd.arg0("test_string").arg("-c").arg("echo $0");
let output = cmd.output().await.unwrap();
assert_eq!(output.stdout, b"test_string\n");
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_write_buf.rs | tokio/tests/io_write_buf.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio_test::assert_ok;
use bytes::BytesMut;
use std::cmp;
use std::io::{self, Cursor};
use std::pin::Pin;
use std::task::{Context, Poll};
#[tokio::test]
async fn write_all() {
struct Wr {
buf: BytesMut,
cnt: usize,
}
impl AsyncWrite for Wr {
fn poll_write(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
assert_eq!(self.cnt, 0);
let n = cmp::min(4, buf.len());
let buf = &buf[0..n];
self.cnt += 1;
self.buf.extend(buf);
Ok(buf.len()).into()
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<io::Result<()>> {
Ok(()).into()
}
}
let mut wr = Wr {
buf: BytesMut::with_capacity(64),
cnt: 0,
};
let mut buf = Cursor::new(&b"hello world"[..]);
assert_ok!(wr.write_buf(&mut buf).await);
assert_eq!(wr.buf, b"hell"[..]);
assert_eq!(wr.cnt, 1);
assert_eq!(buf.position(), 4);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/rt_handle.rs | tokio/tests/rt_handle.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::sync::Arc;
use tokio::runtime::Runtime;
use tokio::sync::{mpsc, Barrier};
#[test]
#[cfg_attr(panic = "abort", ignore)]
fn basic_enter() {
let rt1 = rt();
let rt2 = rt();
let enter1 = rt1.enter();
let enter2 = rt2.enter();
drop(enter2);
drop(enter1);
}
#[test]
#[should_panic]
#[cfg_attr(panic = "abort", ignore)]
fn interleave_enter_different_rt() {
let rt1 = rt();
let rt2 = rt();
let enter1 = rt1.enter();
let enter2 = rt2.enter();
drop(enter1);
drop(enter2);
}
#[test]
#[should_panic]
#[cfg_attr(panic = "abort", ignore)]
fn interleave_enter_same_rt() {
let rt1 = rt();
let _enter1 = rt1.enter();
let enter2 = rt1.enter();
let enter3 = rt1.enter();
drop(enter2);
drop(enter3);
}
#[test]
#[cfg(not(target_os = "wasi"))]
#[cfg_attr(panic = "abort", ignore)]
fn interleave_then_enter() {
let _ = std::panic::catch_unwind(|| {
let rt1 = rt();
let rt2 = rt();
let enter1 = rt1.enter();
let enter2 = rt2.enter();
drop(enter1);
drop(enter2);
});
// Can still enter
let rt3 = rt();
let _enter = rt3.enter();
}
// If the cycle causes a leak, then miri will catch it.
#[test]
fn drop_tasks_with_reference_cycle() {
rt().block_on(async {
let (tx, mut rx) = mpsc::channel(1);
let barrier = Arc::new(Barrier::new(3));
let barrier_a = barrier.clone();
let barrier_b = barrier.clone();
let a = tokio::spawn(async move {
let b = rx.recv().await.unwrap();
// Poll the JoinHandle once. This registers the waker.
// The other task cannot have finished at this point due to the barrier below.
futures::future::select(b, std::future::ready(())).await;
barrier_a.wait().await;
});
let b = tokio::spawn(async move {
// Poll the JoinHandle once. This registers the waker.
// The other task cannot have finished at this point due to the barrier below.
futures::future::select(a, std::future::ready(())).await;
barrier_b.wait().await;
});
tx.send(b).await.unwrap();
barrier.wait().await;
});
}
#[test]
fn runtime_id_is_same() {
let rt = rt();
let handle1 = rt.handle();
let handle2 = rt.handle();
assert_eq!(handle1.id(), handle2.id());
}
#[test]
fn runtime_ids_different() {
let rt1 = rt();
let rt2 = rt();
assert_ne!(rt1.handle().id(), rt2.handle().id());
}
fn rt() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.build()
.unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_take.rs | tokio/tests/io_take.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support panic recovery
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{self, AsyncRead, AsyncReadExt, ReadBuf};
use tokio_test::assert_ok;
mod support {
pub(crate) mod leaked_buffers;
}
use support::leaked_buffers::LeakedBuffers;
#[tokio::test]
async fn take() {
let mut buf = [0; 6];
let rd: &[u8] = b"hello world";
let mut rd = rd.take(4);
let n = assert_ok!(rd.read(&mut buf).await);
assert_eq!(n, 4);
assert_eq!(&buf, &b"hell\0\0"[..]);
}
#[tokio::test]
async fn issue_4435() {
let mut buf = [0; 8];
let rd: &[u8] = b"hello world";
let rd = rd.take(4);
tokio::pin!(rd);
let mut read_buf = ReadBuf::new(&mut buf);
read_buf.put_slice(b"AB");
std::future::poll_fn(|cx| rd.as_mut().poll_read(cx, &mut read_buf))
.await
.unwrap();
assert_eq!(&buf, &b"ABhell\0\0"[..]);
}
struct BadReader {
leaked_buffers: LeakedBuffers,
}
impl BadReader {
fn new() -> Self {
Self {
leaked_buffers: LeakedBuffers::new(),
}
}
}
impl AsyncRead for BadReader {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut Context<'_>,
read_buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
let mut buf = ReadBuf::new(unsafe { self.leaked_buffers.create(10) });
buf.put_slice(&[123; 10]);
*read_buf = buf;
Poll::Ready(Ok(()))
}
}
#[tokio::test]
#[should_panic]
async fn bad_reader_fails() {
let mut buf = Vec::with_capacity(10);
BadReader::new().take(10).read_buf(&mut buf).await.unwrap();
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_errors.rs | tokio/tests/sync_errors.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
fn is_error<T: std::error::Error + Send + Sync>() {}
#[test]
fn mpsc_error_bound() {
use tokio::sync::mpsc::error;
is_error::<error::SendError<()>>();
is_error::<error::TrySendError<()>>();
}
#[test]
fn oneshot_error_bound() {
use tokio::sync::oneshot::error;
is_error::<error::RecvError>();
is_error::<error::TryRecvError>();
}
#[test]
fn watch_error_bound() {
use tokio::sync::watch::error;
is_error::<error::SendError<()>>();
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/net_unix_pipe.rs | tokio/tests/net_unix_pipe.rs | #![cfg(feature = "full")]
#![cfg(unix)]
use tokio::io::{AsyncReadExt, AsyncWriteExt, Interest};
use tokio::net::unix::pipe;
use tokio_test::task;
use tokio_test::{assert_err, assert_ok, assert_pending, assert_ready_ok};
use std::fs::File;
use std::io;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf};
/// Helper struct which will clean up temporary files once dropped.
struct TempFifo {
path: PathBuf,
_dir: tempfile::TempDir,
}
impl TempFifo {
fn new(name: &str) -> io::Result<TempFifo> {
let dir = tempfile::Builder::new()
.prefix("tokio-fifo-tests")
.tempdir()?;
let path = dir.path().join(name);
nix::unistd::mkfifo(&path, nix::sys::stat::Mode::S_IRWXU)?;
Ok(TempFifo { path, _dir: dir })
}
}
impl AsRef<Path> for TempFifo {
fn as_ref(&self) -> &Path {
self.path.as_ref()
}
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn fifo_simple_send() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
let fifo = TempFifo::new("simple_send")?;
// Create a reading task which should wait for data from the pipe.
let mut reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
let mut read_fut = task::spawn(async move {
let mut buf = vec![0; DATA.len()];
reader.read_exact(&mut buf).await?;
Ok::<_, io::Error>(buf)
});
assert_pending!(read_fut.poll());
let mut writer = pipe::OpenOptions::new().open_sender(&fifo)?;
writer.write_all(DATA).await?;
// Let the IO driver poll events for the reader.
while !read_fut.is_woken() {
tokio::task::yield_now().await;
}
// Reading task should be ready now.
let read_data = assert_ready_ok!(read_fut.poll());
assert_eq!(&read_data, DATA);
Ok(())
}
#[tokio::test]
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn fifo_simple_send_sender_first() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
// Create a new fifo file with *no reading ends open*.
let fifo = TempFifo::new("simple_send_sender_first")?;
// Simple `open_sender` should fail with ENXIO (no such device or address).
let err = assert_err!(pipe::OpenOptions::new().open_sender(&fifo));
assert_eq!(err.raw_os_error(), Some(libc::ENXIO));
// `open_sender` in read-write mode should succeed and the pipe should be ready to write.
let mut writer = pipe::OpenOptions::new()
.read_write(true)
.open_sender(&fifo)?;
writer.write_all(DATA).await?;
// Read the written data and validate.
let mut reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
let mut read_data = vec![0; DATA.len()];
reader.read_exact(&mut read_data).await?;
assert_eq!(&read_data, DATA);
Ok(())
}
// Opens a FIFO file, write and *close the writer*.
async fn write_and_close(path: impl AsRef<Path>, msg: &[u8]) -> io::Result<()> {
let mut writer = pipe::OpenOptions::new().open_sender(path)?;
writer.write_all(msg).await?;
drop(writer); // Explicit drop.
Ok(())
}
/// Checks EOF behavior with single reader and writers sequentially opening
/// and closing a FIFO.
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn fifo_multiple_writes() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
let fifo = TempFifo::new("fifo_multiple_writes")?;
let mut reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
write_and_close(&fifo, DATA).await?;
let ev = reader.ready(Interest::READABLE).await?;
assert!(ev.is_readable());
let mut read_data = vec![0; DATA.len()];
assert_ok!(reader.read_exact(&mut read_data).await);
// Check that reader hits EOF.
let err = assert_err!(reader.read_exact(&mut read_data).await);
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
// Write more data and read again.
write_and_close(&fifo, DATA).await?;
assert_ok!(reader.read_exact(&mut read_data).await);
Ok(())
}
/// Checks behavior of a resilient reader (Receiver in O_RDWR access mode)
/// with writers sequentially opening and closing a FIFO.
#[tokio::test]
#[cfg(any(target_os = "linux", target_os = "android"))]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
async fn fifo_resilient_reader() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
let fifo = TempFifo::new("fifo_resilient_reader")?;
// Open reader in read-write access mode.
let mut reader = pipe::OpenOptions::new()
.read_write(true)
.open_receiver(&fifo)?;
write_and_close(&fifo, DATA).await?;
let ev = reader.ready(Interest::READABLE).await?;
let mut read_data = vec![0; DATA.len()];
reader.read_exact(&mut read_data).await?;
// Check that reader didn't hit EOF.
assert!(!ev.is_read_closed());
// Resilient reader can asynchronously wait for the next writer.
let mut second_read_fut = task::spawn(reader.read_exact(&mut read_data));
assert_pending!(second_read_fut.poll());
// Write more data and read again.
write_and_close(&fifo, DATA).await?;
assert_ok!(second_read_fut.await);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `O_NONBLOCK` for open64 in miri.
async fn open_detects_not_a_fifo() -> io::Result<()> {
let dir = tempfile::Builder::new()
.prefix("tokio-fifo-tests")
.tempdir()
.unwrap();
let path = dir.path().join("not_a_fifo");
// Create an ordinary file.
File::create(&path)?;
// Check if Sender detects invalid file type.
let err = assert_err!(pipe::OpenOptions::new().open_sender(&path));
assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
// Check if Receiver detects invalid file type.
let err = assert_err!(pipe::OpenOptions::new().open_sender(&path));
assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn from_file() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
let fifo = TempFifo::new("from_file")?;
// Construct a Receiver from a File.
let file = std::fs::OpenOptions::new()
.read(true)
.custom_flags(libc::O_NONBLOCK)
.open(&fifo)?;
let mut reader = pipe::Receiver::from_file(file)?;
// Construct a Sender from a File.
let file = std::fs::OpenOptions::new()
.write(true)
.custom_flags(libc::O_NONBLOCK)
.open(&fifo)?;
let mut writer = pipe::Sender::from_file(file)?;
// Write and read some data to test async.
let mut read_fut = task::spawn(async move {
let mut buf = vec![0; DATA.len()];
reader.read_exact(&mut buf).await?;
Ok::<_, io::Error>(buf)
});
assert_pending!(read_fut.poll());
writer.write_all(DATA).await?;
let read_data = assert_ok!(read_fut.await);
assert_eq!(&read_data, DATA);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `fstat` in miri.
async fn from_file_detects_not_a_fifo() -> io::Result<()> {
let dir = tempfile::Builder::new()
.prefix("tokio-fifo-tests")
.tempdir()
.unwrap();
let path = dir.path().join("not_a_fifo");
// Create an ordinary file.
File::create(&path)?;
// Check if Sender detects invalid file type.
let file = std::fs::OpenOptions::new().write(true).open(&path)?;
let err = assert_err!(pipe::Sender::from_file(file));
assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
// Check if Receiver detects invalid file type.
let file = std::fs::OpenOptions::new().read(true).open(&path)?;
let err = assert_err!(pipe::Receiver::from_file(file));
assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn from_file_detects_wrong_access_mode() -> io::Result<()> {
let fifo = TempFifo::new("wrong_access_mode")?;
// Open a read end to open the fifo for writing.
let _reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
// Check if Receiver detects write-only access mode.
let write_only = std::fs::OpenOptions::new()
.write(true)
.custom_flags(libc::O_NONBLOCK)
.open(&fifo)?;
let err = assert_err!(pipe::Receiver::from_file(write_only));
assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
// Check if Sender detects read-only access mode.
let rdonly = std::fs::OpenOptions::new()
.read(true)
.custom_flags(libc::O_NONBLOCK)
.open(&fifo)?;
let err = assert_err!(pipe::Sender::from_file(rdonly));
assert_eq!(err.kind(), io::ErrorKind::InvalidInput);
Ok(())
}
fn is_nonblocking<T: AsRawFd>(fd: &T) -> io::Result<bool> {
let flags = nix::fcntl::fcntl(fd.as_raw_fd(), nix::fcntl::F_GETFL)?;
Ok((flags & libc::O_NONBLOCK) != 0)
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn from_file_sets_nonblock() -> io::Result<()> {
let fifo = TempFifo::new("sets_nonblock")?;
// Open read and write ends to let blocking files open.
let _reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
let _writer = pipe::OpenOptions::new().open_sender(&fifo)?;
// Check if Receiver sets the pipe in non-blocking mode.
let rdonly = std::fs::OpenOptions::new().read(true).open(&fifo)?;
assert!(!is_nonblocking(&rdonly)?);
let reader = pipe::Receiver::from_file(rdonly)?;
assert!(is_nonblocking(&reader)?);
// Check if Sender sets the pipe in non-blocking mode.
let write_only = std::fs::OpenOptions::new().write(true).open(&fifo)?;
assert!(!is_nonblocking(&write_only)?);
let writer = pipe::Sender::from_file(write_only)?;
assert!(is_nonblocking(&writer)?);
Ok(())
}
fn writable_by_poll(writer: &pipe::Sender) -> bool {
task::spawn(writer.writable()).poll().is_ready()
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn try_read_write() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
// Create a pipe pair over a fifo file.
let fifo = TempFifo::new("try_read_write")?;
let reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
let writer = pipe::OpenOptions::new().open_sender(&fifo)?;
// Fill the pipe buffer with `try_write`.
let mut write_data = Vec::new();
while writable_by_poll(&writer) {
match writer.try_write(DATA) {
Ok(n) => write_data.extend(&DATA[..n]),
Err(e) => {
assert_eq!(e.kind(), io::ErrorKind::WouldBlock);
break;
}
}
}
// Drain the pipe buffer with `try_read`.
let mut read_data = vec![0; write_data.len()];
let mut i = 0;
while i < write_data.len() {
reader.readable().await?;
match reader.try_read(&mut read_data[i..]) {
Ok(n) => i += n,
Err(e) => {
assert_eq!(e.kind(), io::ErrorKind::WouldBlock);
continue;
}
}
}
assert_eq!(read_data, write_data);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn try_read_write_vectored() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
// Create a pipe pair over a fifo file.
let fifo = TempFifo::new("try_read_write_vectored")?;
let reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
let writer = pipe::OpenOptions::new().open_sender(&fifo)?;
let write_bufs: Vec<_> = DATA.chunks(3).map(io::IoSlice::new).collect();
// Fill the pipe buffer with `try_write_vectored`.
let mut write_data = Vec::new();
while writable_by_poll(&writer) {
match writer.try_write_vectored(&write_bufs) {
Ok(n) => write_data.extend(&DATA[..n]),
Err(e) => {
assert_eq!(e.kind(), io::ErrorKind::WouldBlock);
break;
}
}
}
// Drain the pipe buffer with `try_read_vectored`.
let mut read_data = vec![0; write_data.len()];
let mut i = 0;
while i < write_data.len() {
reader.readable().await?;
let mut read_bufs: Vec<_> = read_data[i..]
.chunks_mut(0x10000)
.map(io::IoSliceMut::new)
.collect();
match reader.try_read_vectored(&mut read_bufs) {
Ok(n) => i += n,
Err(e) => {
assert_eq!(e.kind(), io::ErrorKind::WouldBlock);
continue;
}
}
}
assert_eq!(read_data, write_data);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `mkfifo` in miri.
async fn try_read_buf() -> std::io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the fifo";
// Create a pipe pair over a fifo file.
let fifo = TempFifo::new("try_read_write_vectored")?;
let reader = pipe::OpenOptions::new().open_receiver(&fifo)?;
let writer = pipe::OpenOptions::new().open_sender(&fifo)?;
// Fill the pipe buffer with `try_write`.
let mut write_data = Vec::new();
while writable_by_poll(&writer) {
match writer.try_write(DATA) {
Ok(n) => write_data.extend(&DATA[..n]),
Err(e) => {
assert_eq!(e.kind(), io::ErrorKind::WouldBlock);
break;
}
}
}
// Drain the pipe buffer with `try_read_buf`.
let mut read_data = vec![0; write_data.len()];
let mut i = 0;
while i < write_data.len() {
reader.readable().await?;
match reader.try_read_buf(&mut read_data) {
Ok(n) => i += n,
Err(e) => {
assert_eq!(e.kind(), io::ErrorKind::WouldBlock);
continue;
}
}
}
assert_eq!(read_data, write_data);
Ok(())
}
#[tokio::test]
async fn anon_pipe_simple_send() -> io::Result<()> {
const DATA: &[u8] = b"this is some data to write to the pipe";
let (mut writer, mut reader) = pipe::pipe()?;
// Create a reading task which should wait for data from the pipe.
let mut read_fut = task::spawn(async move {
let mut buf = vec![0; DATA.len()];
reader.read_exact(&mut buf).await?;
Ok::<_, io::Error>(buf)
});
assert_pending!(read_fut.poll());
writer.write_all(DATA).await?;
// Let the IO driver poll events for the reader.
while !read_fut.is_woken() {
tokio::task::yield_now().await;
}
// Reading task should be ready now.
let read_data = assert_ready_ok!(read_fut.poll());
assert_eq!(&read_data, DATA);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `pidfd_spawnp` in miri.
async fn anon_pipe_spawn_echo() -> std::io::Result<()> {
use tokio::process::Command;
const DATA: &str = "this is some data to write to the pipe";
let (tx, mut rx) = pipe::pipe()?;
let status = Command::new("echo")
.arg("-n")
.arg(DATA)
.stdout(tx.into_blocking_fd()?)
.status();
let mut buf = vec![0; DATA.len()];
rx.read_exact(&mut buf).await?;
assert_eq!(String::from_utf8(buf).unwrap(), DATA);
let exit_code = status.await?;
assert!(exit_code.success());
// Check if the pipe is closed.
buf = Vec::new();
let total = assert_ok!(rx.try_read(&mut buf));
assert_eq!(total, 0);
Ok(())
}
#[tokio::test]
#[cfg(target_os = "linux")]
#[cfg_attr(miri, ignore)] // No `fstat` in miri.
async fn anon_pipe_from_owned_fd() -> std::io::Result<()> {
use nix::fcntl::OFlag;
const DATA: &[u8] = b"this is some data to write to the pipe";
let (rx_fd, tx_fd) = nix::unistd::pipe2(OFlag::O_CLOEXEC | OFlag::O_NONBLOCK)?;
let mut rx = pipe::Receiver::from_owned_fd(rx_fd)?;
let mut tx = pipe::Sender::from_owned_fd(tx_fd)?;
let mut buf = vec![0; DATA.len()];
tx.write_all(DATA).await?;
rx.read_exact(&mut buf).await?;
assert_eq!(buf, DATA);
Ok(())
}
#[tokio::test]
async fn anon_pipe_into_nonblocking_fd() -> std::io::Result<()> {
let (tx, rx) = pipe::pipe()?;
let tx_fd = tx.into_nonblocking_fd()?;
let rx_fd = rx.into_nonblocking_fd()?;
assert!(is_nonblocking(&tx_fd)?);
assert!(is_nonblocking(&rx_fd)?);
Ok(())
}
#[tokio::test]
async fn anon_pipe_into_blocking_fd() -> std::io::Result<()> {
let (tx, rx) = pipe::pipe()?;
let tx_fd = tx.into_blocking_fd()?;
let rx_fd = rx.into_blocking_fd()?;
assert!(!is_nonblocking(&tx_fd)?);
assert!(!is_nonblocking(&rx_fd)?);
Ok(())
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/signal_drop_signal.rs | tokio/tests/signal_drop_signal.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(unix)]
#![cfg(not(miri))] // No `sigaction` in miri.
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal::unix::{signal, SignalKind};
#[tokio::test]
async fn dropping_signal_does_not_deregister_any_other_instances() {
let kind = SignalKind::user_defined1();
// Signals should not starve based on ordering
let first_duplicate_signal = signal(kind).expect("failed to register first duplicate signal");
let mut sig = signal(kind).expect("failed to register signal");
let second_duplicate_signal = signal(kind).expect("failed to register second duplicate signal");
drop(first_duplicate_signal);
drop(second_duplicate_signal);
send_signal(libc::SIGUSR1);
let _ = sig.recv().await;
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/rt_panic.rs | tokio/tests/rt_panic.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(not(target_os = "wasi"))] // Wasi doesn't support panic recovery
#![cfg(panic = "unwind")]
use futures::future;
use std::error::Error;
use tokio::runtime::{Builder, Handle, Runtime};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn current_handle_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Handle::current();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn into_panic_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(move || {
let rt = current_thread();
rt.block_on(async {
let handle = tokio::spawn(future::pending::<()>());
handle.abort();
let err = handle.await.unwrap_err();
assert!(!&err.is_panic());
let _ = err.into_panic();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn builder_worker_threads_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Builder::new_multi_thread().worker_threads(0).build();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn builder_max_blocking_threads_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Builder::new_multi_thread().max_blocking_threads(0).build();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn builder_global_queue_interval_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = Builder::new_multi_thread().global_queue_interval(0).build();
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
fn current_thread() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_sink.rs | tokio/tests/io_sink.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::io::AsyncWriteExt;
#[tokio::test]
async fn sink_poll_write_is_cooperative() {
tokio::select! {
biased;
_ = async {
loop {
let buf = vec![1, 2, 3];
tokio::io::sink().write_all(&buf).await.unwrap();
}
} => {},
_ = tokio::task::yield_now() => {}
}
}
#[tokio::test]
async fn sink_poll_flush_is_cooperative() {
tokio::select! {
biased;
_ = async {
loop {
tokio::io::sink().flush().await.unwrap();
}
} => {},
_ = tokio::task::yield_now() => {}
}
}
#[tokio::test]
async fn sink_poll_shutdown_is_cooperative() {
tokio::select! {
biased;
_ = async {
loop {
tokio::io::sink().shutdown().await.unwrap();
}
} => {},
_ = tokio::task::yield_now() => {}
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/tcp_socket.rs | tokio/tests/tcp_socket.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi"), not(miri)))] // Wasi doesn't support bind
// No `socket` on miri.
use std::time::Duration;
use tokio::net::TcpSocket;
use tokio_test::assert_ok;
#[tokio::test]
async fn basic_usage_v4() {
// Create server
let addr = assert_ok!("127.0.0.1:0".parse());
let srv = assert_ok!(TcpSocket::new_v4());
assert_ok!(srv.bind(addr));
let srv = assert_ok!(srv.listen(128));
// Create client & connect
let addr = srv.local_addr().unwrap();
let cli = assert_ok!(TcpSocket::new_v4());
let _cli = assert_ok!(cli.connect(addr).await);
// Accept
let _ = assert_ok!(srv.accept().await);
}
#[tokio::test]
async fn basic_usage_v6() {
// Create server
let addr = assert_ok!("[::1]:0".parse());
let srv = assert_ok!(TcpSocket::new_v6());
assert_ok!(srv.bind(addr));
let srv = assert_ok!(srv.listen(128));
// Create client & connect
let addr = srv.local_addr().unwrap();
let cli = assert_ok!(TcpSocket::new_v6());
let _cli = assert_ok!(cli.connect(addr).await);
// Accept
let _ = assert_ok!(srv.accept().await);
}
#[tokio::test]
async fn bind_before_connect() {
// Create server
let any_addr = assert_ok!("127.0.0.1:0".parse());
let srv = assert_ok!(TcpSocket::new_v4());
assert_ok!(srv.bind(any_addr));
let srv = assert_ok!(srv.listen(128));
// Create client & connect
let addr = srv.local_addr().unwrap();
let cli = assert_ok!(TcpSocket::new_v4());
assert_ok!(cli.bind(any_addr));
let _cli = assert_ok!(cli.connect(addr).await);
// Accept
let _ = assert_ok!(srv.accept().await);
}
#[tokio::test]
#[expect(deprecated)] // set_linger is deprecated
async fn basic_linger() {
// Create server
let addr = assert_ok!("127.0.0.1:0".parse());
let srv = assert_ok!(TcpSocket::new_v4());
assert_ok!(srv.bind(addr));
assert!(srv.linger().unwrap().is_none());
srv.set_linger(Some(Duration::new(0, 0))).unwrap();
assert_eq!(srv.linger().unwrap(), Some(Duration::new(0, 0)));
}
/// Macro to create a simple test to set and get a socket option.
macro_rules! test {
// Test using the `arg`ument as expected return value.
($( #[ $attr: meta ] )* $get_fn: ident, $set_fn: ident ( $arg: expr ) ) => {
test!($( #[$attr] )* $get_fn, $set_fn($arg), $arg);
};
($( #[ $attr: meta ] )* $get_fn: ident, $set_fn: ident ( $arg: expr ), $expected: expr ) => {
#[test]
$( #[$attr] )*
fn $get_fn() {
test!(__ new_v4, $get_fn, $set_fn($arg), $expected);
#[cfg(not(target_os = "vita"))]
test!(__ new_v6, $get_fn, $set_fn($arg), $expected);
}
};
// Only test using a IPv4 socket.
(IPv4 $get_fn: ident, $set_fn: ident ( $arg: expr ) ) => {
#[test]
fn $get_fn() {
test!(__ new_v4, $get_fn, $set_fn($arg), $arg);
}
};
// Only test using a IPv6 socket.
(IPv6 $get_fn: ident, $set_fn: ident ( $arg: expr ) ) => {
#[test]
fn $get_fn() {
test!(__ new_v6, $get_fn, $set_fn($arg), $arg);
}
};
// Internal to this macro.
(__ $constructor: ident, $get_fn: ident, $set_fn: ident ( $arg: expr ), $expected: expr ) => {
let socket = TcpSocket::$constructor().expect("failed to create `TcpSocket`");
let initial = socket.$get_fn().expect("failed to get initial value");
let arg = $arg;
assert_ne!(initial, arg, "initial value and argument are the same");
socket.$set_fn(arg).expect("failed to set option");
let got = socket.$get_fn().expect("failed to get value");
let expected = $expected;
assert_eq!(got, expected, "set and get values differ");
};
}
const SET_BUF_SIZE: u32 = 4096;
// Linux doubles the buffer size for kernel usage, and exposes that when
// retrieving the buffer size.
#[cfg(not(target_os = "linux"))]
const GET_BUF_SIZE: u32 = SET_BUF_SIZE;
#[cfg(target_os = "linux")]
const GET_BUF_SIZE: u32 = 2 * SET_BUF_SIZE;
test!(keepalive, set_keepalive(true));
test!(reuseaddr, set_reuseaddr(true));
#[cfg(all(
unix,
not(target_os = "solaris"),
not(target_os = "illumos"),
not(target_os = "cygwin"),
))]
test!(reuseport, set_reuseport(true));
test!(
send_buffer_size,
set_send_buffer_size(SET_BUF_SIZE),
GET_BUF_SIZE
);
test!(
recv_buffer_size,
set_recv_buffer_size(SET_BUF_SIZE),
GET_BUF_SIZE
);
test!(
#[expect(deprecated, reason = "set_linger is deprecated")]
linger,
set_linger(Some(Duration::from_secs(10)))
);
test!(nodelay, set_nodelay(true));
#[cfg(any(
target_os = "android",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "fuchsia",
target_os = "linux",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "cygwin",
))]
test!(IPv6 tclass_v6, set_tclass_v6(96));
#[cfg(not(any(
target_os = "fuchsia",
target_os = "redox",
target_os = "solaris",
target_os = "illumos",
target_os = "haiku"
)))]
test!(IPv4 tos_v4, set_tos_v4(96));
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_oneshot.rs | tokio/tests/sync_oneshot.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(all(target_family = "wasm", not(target_os = "wasi"))))]
use tokio::test as maybe_tokio_test;
use tokio::sync::oneshot;
use tokio::sync::oneshot::error::TryRecvError;
use tokio_test::*;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
#[allow(unused)]
trait AssertSend: Send {}
impl AssertSend for oneshot::Sender<i32> {}
impl AssertSend for oneshot::Receiver<i32> {}
#[allow(unused)]
trait SenderExt {
fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()>;
}
impl<T> SenderExt for oneshot::Sender<T> {
fn poll_closed(&mut self, cx: &mut Context<'_>) -> Poll<()> {
tokio::pin! {
let fut = self.closed();
}
fut.poll(cx)
}
}
#[test]
fn send_recv() {
let (tx, rx) = oneshot::channel();
let mut rx = task::spawn(rx);
assert_pending!(rx.poll());
assert_ok!(tx.send(1));
assert!(rx.is_woken());
let val = assert_ready_ok!(rx.poll());
assert_eq!(val, 1);
}
#[maybe_tokio_test]
async fn async_send_recv() {
let (tx, rx) = oneshot::channel();
assert_ok!(tx.send(1));
assert_eq!(1, assert_ok!(rx.await));
}
#[test]
fn close_tx() {
let (tx, rx) = oneshot::channel::<i32>();
let mut rx = task::spawn(rx);
assert_pending!(rx.poll());
drop(tx);
assert!(rx.is_woken());
assert_ready_err!(rx.poll());
}
#[test]
fn close_rx() {
// First, without checking poll_closed()
//
let (tx, _) = oneshot::channel();
assert_err!(tx.send(1));
// Second, via poll_closed();
let (tx, rx) = oneshot::channel();
let mut tx = task::spawn(tx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
drop(rx);
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_err!(tx.into_inner().send(1));
}
#[tokio::test]
#[cfg(feature = "full")]
async fn async_rx_closed() {
let (mut tx, rx) = oneshot::channel::<()>();
tokio::spawn(async move {
drop(rx);
});
tx.closed().await;
}
#[test]
fn explicit_close_poll() {
// First, with message sent
let (tx, rx) = oneshot::channel();
let mut rx = task::spawn(rx);
assert_ok!(tx.send(1));
rx.close();
let value = assert_ready_ok!(rx.poll());
assert_eq!(value, 1);
// Second, without the message sent
let (tx, rx) = oneshot::channel::<i32>();
let mut tx = task::spawn(tx);
let mut rx = task::spawn(rx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
rx.close();
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_err!(tx.into_inner().send(1));
assert_ready_err!(rx.poll());
// Again, but without sending the value this time
let (tx, rx) = oneshot::channel::<i32>();
let mut tx = task::spawn(tx);
let mut rx = task::spawn(rx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
rx.close();
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_ready_err!(rx.poll());
}
#[test]
fn explicit_close_try_recv() {
// First, with message sent
let (tx, mut rx) = oneshot::channel();
assert_ok!(tx.send(1));
rx.close();
let val = assert_ok!(rx.try_recv());
assert_eq!(1, val);
// Second, without the message sent
let (tx, mut rx) = oneshot::channel::<i32>();
let mut tx = task::spawn(tx);
assert_pending!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
rx.close();
assert!(tx.is_woken());
assert!(tx.is_closed());
assert_ready!(tx.enter(|cx, mut tx| tx.poll_closed(cx)));
assert_err!(rx.try_recv());
}
#[test]
#[should_panic]
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
fn close_try_recv_poll() {
let (_tx, rx) = oneshot::channel::<i32>();
let mut rx = task::spawn(rx);
rx.close();
assert_err!(rx.try_recv());
let _ = rx.poll();
}
#[test]
fn close_after_recv() {
let (tx, mut rx) = oneshot::channel::<i32>();
tx.send(17).unwrap();
assert_eq!(17, rx.try_recv().unwrap());
rx.close();
}
#[test]
fn try_recv_after_completion() {
let (tx, mut rx) = oneshot::channel::<i32>();
tx.send(17).unwrap();
assert_eq!(17, rx.try_recv().unwrap());
assert_eq!(Err(TryRecvError::Closed), rx.try_recv());
rx.close();
}
#[test]
fn try_recv_after_completion_await() {
let (tx, rx) = oneshot::channel::<i32>();
let mut rx = task::spawn(rx);
tx.send(17).unwrap();
assert_eq!(Ok(17), assert_ready!(rx.poll()));
assert_eq!(Err(TryRecvError::Closed), rx.try_recv());
rx.close();
}
#[test]
fn drops_tasks() {
let (mut tx, mut rx) = oneshot::channel::<i32>();
let mut tx_task = task::spawn(());
let mut rx_task = task::spawn(());
assert_pending!(tx_task.enter(|cx, _| tx.poll_closed(cx)));
assert_pending!(rx_task.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
drop(tx);
drop(rx);
assert_eq!(1, tx_task.waker_ref_count());
assert_eq!(1, rx_task.waker_ref_count());
}
#[test]
fn receiver_changes_task() {
let (tx, mut rx) = oneshot::channel();
let mut task1 = task::spawn(());
let mut task2 = task::spawn(());
assert_pending!(task1.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
assert_eq!(2, task1.waker_ref_count());
assert_eq!(1, task2.waker_ref_count());
assert_pending!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
assert_eq!(1, task1.waker_ref_count());
assert_eq!(2, task2.waker_ref_count());
assert_ok!(tx.send(1));
assert!(!task1.is_woken());
assert!(task2.is_woken());
assert_ready_ok!(task2.enter(|cx, _| Pin::new(&mut rx).poll(cx)));
}
#[test]
fn sender_changes_task() {
let (mut tx, rx) = oneshot::channel::<i32>();
let mut task1 = task::spawn(());
let mut task2 = task::spawn(());
assert_pending!(task1.enter(|cx, _| tx.poll_closed(cx)));
assert_eq!(2, task1.waker_ref_count());
assert_eq!(1, task2.waker_ref_count());
assert_pending!(task2.enter(|cx, _| tx.poll_closed(cx)));
assert_eq!(1, task1.waker_ref_count());
assert_eq!(2, task2.waker_ref_count());
drop(rx);
assert!(!task1.is_woken());
assert!(task2.is_woken());
assert_ready!(task2.enter(|cx, _| tx.poll_closed(cx)));
}
#[test]
fn receiver_is_terminated_send() {
let (tx, mut rx) = oneshot::channel::<i32>();
assert!(
!rx.is_terminated(),
"channel is NOT terminated before value is sent"
);
tx.send(17).unwrap();
assert!(
!rx.is_terminated(),
"channel is NOT terminated after value is sent"
);
let mut task = task::spawn(());
let poll = task.enter(|cx, _| Pin::new(&mut rx).poll(cx));
assert_ready_eq!(poll, Ok(17));
assert!(
rx.is_terminated(),
"channel IS terminated after value is read"
);
}
#[test]
fn receiver_is_terminated_try_recv() {
let (tx, mut rx) = oneshot::channel::<i32>();
assert!(
!rx.is_terminated(),
"channel is NOT terminated before value is sent"
);
tx.send(17).unwrap();
assert!(
!rx.is_terminated(),
"channel is NOT terminated after value is sent"
);
let value = rx.try_recv().expect("value is waiting");
assert_eq!(value, 17);
assert!(
rx.is_terminated(),
"channel IS terminated after value is read"
);
}
#[test]
fn receiver_is_terminated_drop() {
let (tx, mut rx) = oneshot::channel::<i32>();
assert!(
!rx.is_terminated(),
"channel is NOT terminated before sender is dropped"
);
drop(tx);
assert!(
!rx.is_terminated(),
"channel is NOT terminated after sender is dropped"
);
let mut task = task::spawn(());
let poll = task.enter(|cx, _| Pin::new(&mut rx).poll(cx));
assert_ready_err!(poll);
assert!(
rx.is_terminated(),
"channel IS terminated after value is read"
);
}
#[test]
fn receiver_is_terminated_rx_close() {
let (_tx, mut rx) = oneshot::channel::<i32>();
assert!(
!rx.is_terminated(),
"channel is NOT terminated before closing"
);
rx.close();
assert!(
!rx.is_terminated(),
"channel is NOT terminated before closing"
);
let mut task = task::spawn(());
let poll = task.enter(|cx, _| Pin::new(&mut rx).poll(cx));
assert_ready_err!(poll);
assert!(
rx.is_terminated(),
"channel IS terminated after value is read"
);
}
#[test]
fn receiver_is_empty_send() {
let (tx, mut rx) = oneshot::channel::<i32>();
assert!(rx.is_empty(), "channel IS empty before value is sent");
tx.send(17).unwrap();
assert!(!rx.is_empty(), "channel is NOT empty after value is sent");
let mut task = task::spawn(());
let poll = task.enter(|cx, _| Pin::new(&mut rx).poll(cx));
assert_ready_eq!(poll, Ok(17));
assert!(rx.is_empty(), "channel IS empty after value is read");
}
#[test]
fn receiver_is_empty_try_recv() {
let (tx, mut rx) = oneshot::channel::<i32>();
assert!(rx.is_empty(), "channel IS empty before value is sent");
tx.send(17).unwrap();
assert!(!rx.is_empty(), "channel is NOT empty after value is sent");
let value = rx.try_recv().expect("value is waiting");
assert_eq!(value, 17);
assert!(rx.is_empty(), "channel IS empty after value is read");
}
#[test]
fn receiver_is_empty_drop() {
let (tx, mut rx) = oneshot::channel::<i32>();
assert!(rx.is_empty(), "channel IS empty before sender is dropped");
drop(tx);
assert!(rx.is_empty(), "channel IS empty after sender is dropped");
let mut task = task::spawn(());
let poll = task.enter(|cx, _| Pin::new(&mut rx).poll(cx));
assert_ready_err!(poll);
assert!(rx.is_empty(), "channel IS empty after value is read");
}
#[test]
fn receiver_is_empty_rx_close() {
let (_tx, mut rx) = oneshot::channel::<i32>();
assert!(rx.is_empty());
rx.close();
assert!(rx.is_empty());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_watch.rs | tokio/tests/sync_watch.rs | #![allow(clippy::cognitive_complexity)]
#![warn(rust_2018_idioms)]
#![cfg(feature = "sync")]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
use tokio::sync::watch;
use tokio_test::task::spawn;
use tokio_test::{
assert_pending, assert_ready, assert_ready_eq, assert_ready_err, assert_ready_ok,
};
#[test]
fn single_rx_recv() {
let (tx, mut rx) = watch::channel("one");
{
// Not initially notified
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
}
assert_eq!(*rx.borrow(), "one");
{
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
tx.send("two").unwrap();
assert!(t.is_woken());
assert_ready_ok!(t.poll());
}
assert_eq!(*rx.borrow(), "two");
{
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
drop(tx);
assert!(t.is_woken());
assert_ready_err!(t.poll());
}
assert_eq!(*rx.borrow(), "two");
}
#[test]
fn rx_version_underflow() {
let (_tx, mut rx) = watch::channel("one");
// Version starts at 2, validate we do not underflow
rx.mark_changed();
rx.mark_changed();
}
#[test]
fn rx_mark_changed() {
let (tx, mut rx) = watch::channel("one");
let mut rx2 = rx.clone();
let mut rx3 = rx.clone();
let mut rx4 = rx.clone();
{
rx.mark_changed();
assert!(rx.has_changed().unwrap());
let mut t = spawn(rx.changed());
assert_ready_ok!(t.poll());
}
{
assert!(!rx2.has_changed().unwrap());
let mut t = spawn(rx2.changed());
assert_pending!(t.poll());
}
{
rx3.mark_changed();
assert_eq!(*rx3.borrow(), "one");
assert!(rx3.has_changed().unwrap());
assert_eq!(*rx3.borrow_and_update(), "one");
assert!(!rx3.has_changed().unwrap());
let mut t = spawn(rx3.changed());
assert_pending!(t.poll());
}
{
tx.send("two").unwrap();
assert!(rx4.has_changed().unwrap());
assert_eq!(*rx4.borrow_and_update(), "two");
rx4.mark_changed();
assert!(rx4.has_changed().unwrap());
assert_eq!(*rx4.borrow_and_update(), "two")
}
assert_eq!(*rx.borrow(), "two");
}
#[test]
fn rx_mark_unchanged() {
let (tx, mut rx) = watch::channel("one");
let mut rx2 = rx.clone();
{
assert!(!rx.has_changed().unwrap());
rx.mark_changed();
assert!(rx.has_changed().unwrap());
rx.mark_unchanged();
assert!(!rx.has_changed().unwrap());
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
}
{
assert!(!rx2.has_changed().unwrap());
tx.send("two").unwrap();
assert!(rx2.has_changed().unwrap());
rx2.mark_unchanged();
assert!(!rx2.has_changed().unwrap());
assert_eq!(*rx2.borrow_and_update(), "two");
}
assert_eq!(*rx.borrow(), "two");
}
#[test]
fn multi_rx() {
let (tx, mut rx1) = watch::channel("one");
let mut rx2 = rx1.clone();
{
let mut t1 = spawn(rx1.changed());
let mut t2 = spawn(rx2.changed());
assert_pending!(t1.poll());
assert_pending!(t2.poll());
}
assert_eq!(*rx1.borrow(), "one");
assert_eq!(*rx2.borrow(), "one");
let mut t2 = spawn(rx2.changed());
{
let mut t1 = spawn(rx1.changed());
assert_pending!(t1.poll());
assert_pending!(t2.poll());
tx.send("two").unwrap();
assert!(t1.is_woken());
assert!(t2.is_woken());
assert_ready_ok!(t1.poll());
}
assert_eq!(*rx1.borrow(), "two");
{
let mut t1 = spawn(rx1.changed());
assert_pending!(t1.poll());
tx.send("three").unwrap();
assert!(t1.is_woken());
assert!(t2.is_woken());
assert_ready_ok!(t1.poll());
assert_ready_ok!(t2.poll());
}
assert_eq!(*rx1.borrow(), "three");
drop(t2);
assert_eq!(*rx2.borrow(), "three");
{
let mut t1 = spawn(rx1.changed());
let mut t2 = spawn(rx2.changed());
assert_pending!(t1.poll());
assert_pending!(t2.poll());
tx.send("four").unwrap();
assert_ready_ok!(t1.poll());
assert_ready_ok!(t2.poll());
}
assert_eq!(*rx1.borrow(), "four");
assert_eq!(*rx2.borrow(), "four");
}
#[test]
fn rx_observes_final_value() {
// Initial value
let (tx, mut rx) = watch::channel("one");
drop(tx);
{
let mut t1 = spawn(rx.changed());
assert_ready_err!(t1.poll());
}
assert_eq!(*rx.borrow(), "one");
// Sending a value
let (tx, mut rx) = watch::channel("one");
tx.send("two").unwrap();
{
let mut t1 = spawn(rx.changed());
assert_ready_ok!(t1.poll());
}
assert_eq!(*rx.borrow(), "two");
{
let mut t1 = spawn(rx.changed());
assert_pending!(t1.poll());
tx.send("three").unwrap();
drop(tx);
assert!(t1.is_woken());
assert_ready_ok!(t1.poll());
}
assert_eq!(*rx.borrow(), "three");
{
let mut t1 = spawn(rx.changed());
assert_ready_err!(t1.poll());
}
assert_eq!(*rx.borrow(), "three");
}
#[test]
fn poll_close() {
let (tx, rx) = watch::channel("one");
{
let mut t = spawn(tx.closed());
assert_pending!(t.poll());
drop(rx);
assert!(t.is_woken());
assert_ready!(t.poll());
}
assert!(tx.send("two").is_err());
}
#[test]
fn borrow_and_update() {
let (tx, mut rx) = watch::channel("one");
assert!(!rx.has_changed().unwrap());
tx.send("two").unwrap();
assert!(rx.has_changed().unwrap());
assert_ready!(spawn(rx.changed()).poll()).unwrap();
assert_pending!(spawn(rx.changed()).poll());
assert!(!rx.has_changed().unwrap());
tx.send("three").unwrap();
assert!(rx.has_changed().unwrap());
assert_eq!(*rx.borrow_and_update(), "three");
assert_pending!(spawn(rx.changed()).poll());
assert!(!rx.has_changed().unwrap());
drop(tx);
assert_eq!(*rx.borrow_and_update(), "three");
assert_ready!(spawn(rx.changed()).poll()).unwrap_err();
assert!(rx.has_changed().is_err());
}
#[test]
fn reopened_after_subscribe() {
let (tx, rx) = watch::channel("one");
assert!(!tx.is_closed());
drop(rx);
assert!(tx.is_closed());
let rx = tx.subscribe();
assert!(!tx.is_closed());
drop(rx);
assert!(tx.is_closed());
}
#[test]
#[cfg(panic = "unwind")]
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
fn send_modify_panic() {
let (tx, mut rx) = watch::channel("one");
tx.send_modify(|old| *old = "two");
assert_eq!(*rx.borrow_and_update(), "two");
let mut rx2 = rx.clone();
assert_eq!(*rx2.borrow_and_update(), "two");
let mut task = spawn(rx2.changed());
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
tx.send_modify(|old| {
*old = "panicked";
panic!();
})
}));
assert!(result.is_err());
assert_pending!(task.poll());
assert_eq!(*rx.borrow(), "panicked");
tx.send_modify(|old| *old = "three");
assert_ready_ok!(task.poll());
assert_eq!(*rx.borrow_and_update(), "three");
}
#[tokio::test]
async fn multiple_sender() {
let (tx1, mut rx) = watch::channel(0);
let tx2 = tx1.clone();
let mut t = spawn(async {
rx.changed().await.unwrap();
let v1 = *rx.borrow_and_update();
rx.changed().await.unwrap();
let v2 = *rx.borrow_and_update();
(v1, v2)
});
tx1.send(1).unwrap();
assert_pending!(t.poll());
tx2.send(2).unwrap();
assert_ready_eq!(t.poll(), (1, 2));
}
#[tokio::test]
async fn receiver_is_notified_when_last_sender_is_dropped() {
let (tx1, mut rx) = watch::channel(0);
let tx2 = tx1.clone();
let mut t = spawn(rx.changed());
assert_pending!(t.poll());
drop(tx1);
assert!(!t.is_woken());
drop(tx2);
assert!(t.is_woken());
}
#[tokio::test]
async fn receiver_changed_is_cooperative() {
let (tx, mut rx) = watch::channel(());
drop(tx);
tokio::select! {
biased;
_ = async {
loop {
assert!(rx.changed().await.is_err());
}
} => {},
_ = tokio::task::yield_now() => {},
}
}
#[tokio::test]
async fn receiver_changed_is_cooperative_ok() {
let (tx, mut rx) = watch::channel(());
tokio::select! {
biased;
_ = async {
loop {
assert!(tx.send(()).is_ok());
assert!(rx.changed().await.is_ok());
}
} => {},
_ = tokio::task::yield_now() => {},
}
}
#[tokio::test]
async fn receiver_wait_for_is_cooperative() {
let (tx, mut rx) = watch::channel(0);
drop(tx);
tokio::select! {
biased;
_ = async {
loop {
assert!(rx.wait_for(|val| *val == 1).await.is_err());
}
} => {},
_ = tokio::task::yield_now() => {},
}
}
#[tokio::test]
async fn receiver_wait_for_is_cooperative_ok() {
let (tx, mut rx) = watch::channel(0);
tokio::select! {
biased;
_ = async {
loop {
assert!(tx.send(1).is_ok());
assert!(rx.wait_for(|val| *val == 1).await.is_ok());
}
} => {},
_ = tokio::task::yield_now() => {},
}
}
#[tokio::test]
async fn sender_closed_is_cooperative() {
let (tx, rx) = watch::channel(());
drop(rx);
tokio::select! {
_ = async {
loop {
tx.closed().await;
}
} => {},
_ = tokio::task::yield_now() => {},
}
}
#[tokio::test]
async fn changed_succeeds_on_closed_channel_with_unseen_value() {
let (tx, mut rx) = watch::channel("A");
tx.send("B").unwrap();
drop(tx);
rx.changed()
.await
.expect("should not return error as long as the current value is not seen");
}
#[tokio::test]
async fn changed_errors_on_closed_channel_with_seen_value() {
let (tx, mut rx) = watch::channel("A");
drop(tx);
rx.changed()
.await
.expect_err("should return error if the tx is closed and the current value is seen");
}
#[test]
fn has_changed_errors_on_closed_channel_with_unseen_value() {
let (tx, rx) = watch::channel("A");
tx.send("B").unwrap();
drop(tx);
rx.has_changed()
.expect_err("`has_changed` returns an error if and only if channel is closed. Even if the current value is not seen.");
}
#[test]
fn has_changed_errors_on_closed_channel_with_seen_value() {
let (tx, rx) = watch::channel("A");
drop(tx);
rx.has_changed()
.expect_err("`has_changed` returns an error if and only if channel is closed.");
}
#[tokio::test]
async fn wait_for_errors_on_closed_channel_true_predicate() {
let (tx, mut rx) = watch::channel("A");
tx.send("B").unwrap();
drop(tx);
rx.wait_for(|_| true).await.expect(
"`wait_for` call does not return error even if channel is closed when predicate is true for last value.",
);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_driver_drop.rs | tokio/tests/io_driver_drop.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support bind
use tokio::net::TcpListener;
use tokio::runtime;
use tokio_test::{assert_err, assert_pending, assert_ready, task};
#[test]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn tcp_doesnt_block() {
let rt = rt();
let listener = {
let _enter = rt.enter();
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
listener.set_nonblocking(true).unwrap();
TcpListener::from_std(listener).unwrap()
};
drop(rt);
let mut task = task::spawn(async move {
assert_err!(listener.accept().await);
});
assert_ready!(task.poll());
}
#[test]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn drop_wakes() {
let rt = rt();
let listener = {
let _enter = rt.enter();
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
listener.set_nonblocking(true).unwrap();
TcpListener::from_std(listener).unwrap()
};
let mut task = task::spawn(async move {
assert_err!(listener.accept().await);
});
assert_pending!(task.poll());
drop(rt);
assert!(task.is_woken());
assert_ready!(task.poll());
}
fn rt() -> runtime::Runtime {
runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/net_bind_resource.rs | tokio/tests/net_bind_resource.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi doesn't support panic recovery or bind
use tokio::net::TcpListener;
use std::net;
#[test]
#[should_panic]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn no_runtime_panics_binding_net_tcp_listener() {
let listener = net::TcpListener::bind("127.0.0.1:0").expect("failed to bind listener");
let _ = TcpListener::try_from(listener);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/net_lookup_host.rs | tokio/tests/net_lookup_host.rs | #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support direct socket operations
use tokio::net;
use tokio_test::assert_ok;
use std::io;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
#[tokio::test]
async fn lookup_socket_addr() {
let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
let actual = assert_ok!(net::lookup_host(addr).await).collect::<Vec<_>>();
assert_eq!(vec![addr], actual);
}
#[tokio::test]
async fn lookup_str_socket_addr() {
let addr: SocketAddr = "127.0.0.1:8000".parse().unwrap();
let actual = assert_ok!(net::lookup_host("127.0.0.1:8000").await).collect::<Vec<_>>();
assert_eq!(vec![addr], actual);
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `getaddrinfo` in miri.
async fn resolve_dns() -> io::Result<()> {
let mut hosts = net::lookup_host("localhost:3000").await?;
let host = hosts.next().unwrap();
let expected = if host.is_ipv4() {
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 3000)
} else {
SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 3000)
};
assert_eq!(host, expected);
Ok(())
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/fs_open_options.rs | tokio/tests/fs_open_options.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // WASI does not support all fs operations
use std::io::Write;
use tempfile::NamedTempFile;
use tokio::fs::OpenOptions;
use tokio::io::AsyncReadExt;
const HELLO: &[u8] = b"hello world...";
#[tokio::test]
async fn open_with_open_options_and_read() {
let mut tempfile = NamedTempFile::new().unwrap();
tempfile.write_all(HELLO).unwrap();
let mut file = OpenOptions::new().read(true).open(tempfile).await.unwrap();
let mut buf = [0; 1024];
let n = file.read(&mut buf).await.unwrap();
assert_eq!(n, HELLO.len());
assert_eq!(&buf[..n], HELLO);
}
#[tokio::test]
async fn open_options_write() {
// TESTING HACK: use Debug output to check the stored data
assert!(format!("{:?}", OpenOptions::new().write(true)).contains("write: true"));
}
#[tokio::test]
async fn open_options_append() {
// TESTING HACK: use Debug output to check the stored data
assert!(format!("{:?}", OpenOptions::new().append(true)).contains("append: true"));
}
#[tokio::test]
async fn open_options_truncate() {
// TESTING HACK: use Debug output to check the stored data
assert!(format!("{:?}", OpenOptions::new().truncate(true)).contains("truncate: true"));
}
#[tokio::test]
async fn open_options_create() {
// TESTING HACK: use Debug output to check the stored data
assert!(format!("{:?}", OpenOptions::new().create(true)).contains("create: true"));
}
#[tokio::test]
async fn open_options_create_new() {
// TESTING HACK: use Debug output to check the stored data
assert!(format!("{:?}", OpenOptions::new().create_new(true)).contains("create_new: true"));
}
#[tokio::test]
#[cfg(unix)]
async fn open_options_mode() {
let mode = format!("{:?}", OpenOptions::new().mode(0o644));
// TESTING HACK: use Debug output to check the stored data
assert!(
mode.contains("mode: 420") || mode.contains("mode: 0o000644"),
"mode is: {mode}"
);
}
#[tokio::test]
#[cfg(target_os = "linux")]
async fn open_options_custom_flags_linux() {
// TESTING HACK: use Debug output to check the stored data
assert!(
format!("{:?}", OpenOptions::new().custom_flags(libc::O_TRUNC))
.contains("custom_flags: 512")
);
}
#[tokio::test]
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
async fn open_options_custom_flags_bsd_family() {
// TESTING HACK: use Debug output to check the stored data
assert!(
format!("{:?}", OpenOptions::new().custom_flags(libc::O_NOFOLLOW))
.contains("custom_flags: 256,")
);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/fs_link.rs | tokio/tests/fs_link.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // WASI does not support all fs operations
use tokio::fs;
use std::io::Write;
use tempfile::tempdir;
#[tokio::test]
#[cfg_attr(miri, ignore)] // No `linkat` in miri.
async fn test_hard_link() {
let dir = tempdir().unwrap();
let src = dir.path().join("src.txt");
let dst = dir.path().join("dst.txt");
std::fs::File::create(&src)
.unwrap()
.write_all(b"hello")
.unwrap();
fs::hard_link(&src, &dst).await.unwrap();
std::fs::File::create(&src)
.unwrap()
.write_all(b"new-data")
.unwrap();
let content = fs::read(&dst).await.unwrap();
assert_eq!(content, b"new-data");
// test that this is not a symlink:
assert!(fs::read_link(&dst).await.is_err());
}
#[cfg(unix)]
#[tokio::test]
async fn test_symlink() {
let dir = tempdir().unwrap();
let src = dir.path().join("src.txt");
let dst = dir.path().join("dst.txt");
std::fs::File::create(&src)
.unwrap()
.write_all(b"hello")
.unwrap();
fs::symlink(&src, &dst).await.unwrap();
std::fs::File::create(&src)
.unwrap()
.write_all(b"new-data")
.unwrap();
let content = fs::read(&dst).await.unwrap();
assert_eq!(content, b"new-data");
let read = fs::read_link(dst.clone()).await.unwrap();
assert!(read == src);
let symlink_meta = fs::symlink_metadata(dst.clone()).await.unwrap();
assert!(symlink_meta.file_type().is_symlink());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/time_timeout.rs | tokio/tests/time_timeout.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use tokio::sync::oneshot;
use tokio::time::{self, timeout, timeout_at, Instant};
use tokio_test::*;
use futures::future::pending;
use std::time::Duration;
#[tokio::test]
async fn simultaneous_deadline_future_completion() {
// Create a future that is immediately ready
let mut fut = task::spawn(timeout_at(Instant::now(), async {}));
// Ready!
assert_ready_ok!(fut.poll());
}
#[cfg_attr(target_os = "wasi", ignore = "FIXME: `fut.poll()` panics on Wasi")]
#[tokio::test]
async fn completed_future_past_deadline() {
// Wrap it with a deadline
let mut fut = task::spawn(timeout_at(Instant::now() - ms(1000), async {}));
// Ready!
assert_ready_ok!(fut.poll());
}
#[tokio::test]
async fn future_and_deadline_in_future() {
time::pause();
// Not yet complete
let (tx, rx) = oneshot::channel();
// Wrap it with a deadline
let mut fut = task::spawn(timeout_at(Instant::now() + ms(100), rx));
assert_pending!(fut.poll());
// Turn the timer, it runs for the elapsed time
time::advance(ms(90)).await;
assert_pending!(fut.poll());
// Complete the future
tx.send(()).unwrap();
assert!(fut.is_woken());
assert_ready_ok!(fut.poll()).unwrap();
}
#[tokio::test]
async fn future_and_timeout_in_future() {
time::pause();
// Not yet complete
let (tx, rx) = oneshot::channel();
// Wrap it with a deadline
let mut fut = task::spawn(timeout(ms(100), rx));
// Ready!
assert_pending!(fut.poll());
// Turn the timer, it runs for the elapsed time
time::advance(ms(90)).await;
assert_pending!(fut.poll());
// Complete the future
tx.send(()).unwrap();
assert_ready_ok!(fut.poll()).unwrap();
}
#[tokio::test]
async fn very_large_timeout() {
time::pause();
// Not yet complete
let (tx, rx) = oneshot::channel();
// copy-paste unstable `Duration::MAX`
let duration_max = Duration::from_secs(u64::MAX) + Duration::from_nanos(999_999_999);
// Wrap it with a deadline
let mut fut = task::spawn(timeout(duration_max, rx));
// Ready!
assert_pending!(fut.poll());
// Turn the timer, it runs for the elapsed time
time::advance(Duration::from_secs(86400 * 365 * 10)).await;
assert_pending!(fut.poll());
// Complete the future
tx.send(()).unwrap();
assert_ready_ok!(fut.poll()).unwrap();
}
#[tokio::test]
async fn deadline_now_elapses() {
use futures::future::pending;
time::pause();
// Wrap it with a deadline
let mut fut = task::spawn(timeout_at(Instant::now(), pending::<()>()));
// Factor in jitter
// TODO: don't require this
time::advance(ms(1)).await;
assert_ready_err!(fut.poll());
}
#[tokio::test]
async fn deadline_future_elapses() {
time::pause();
// Wrap it with a deadline
let mut fut = task::spawn(timeout_at(Instant::now() + ms(300), pending::<()>()));
assert_pending!(fut.poll());
time::advance(ms(301)).await;
assert!(fut.is_woken());
assert_ready_err!(fut.poll());
}
fn ms(n: u64) -> Duration {
Duration::from_millis(n)
}
#[tokio::test]
async fn timeout_is_not_exhausted_by_future() {
let fut = timeout(ms(1), async {
let mut buffer = [0u8; 1];
loop {
use tokio::io::AsyncReadExt;
let _ = tokio::io::empty().read(&mut buffer).await;
}
});
assert!(fut.await.is_err());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/signal_info.rs | tokio/tests/signal_info.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
#![cfg(any(
target_os = "dragonfly",
target_os = "freebsd",
target_os = "macos",
target_os = "netbsd",
target_os = "openbsd",
target_os = "illumos"
))]
#![cfg(not(miri))] // No `sigaction` on Miri
mod support {
pub mod signal;
}
use support::signal::send_signal;
use tokio::signal;
use tokio::signal::unix::SignalKind;
use tokio::time::{timeout, Duration};
#[tokio::test]
async fn siginfo() {
let mut sig = signal::unix::signal(SignalKind::info()).expect("installed signal handler");
tokio::spawn(async {
send_signal(libc::SIGINFO);
});
// Add a timeout to ensure the test doesn't hang.
timeout(Duration::from_secs(5), sig.recv())
.await
.expect("received SIGINFO signal in time")
.expect("received SIGINFO signal");
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/fs_write.rs | tokio/tests/fs_write.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // WASI does not support all fs operations
use tempfile::tempdir;
use tokio::fs;
#[tokio::test]
async fn write() {
let dir = tempdir().unwrap();
let path = dir.path().join("test.txt");
fs::write(&path, "Hello, World!").await.unwrap();
let contents = fs::read_to_string(&path).await.unwrap();
assert_eq!(contents, "Hello, World!");
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_split.rs | tokio/tests/io_split.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support panic recovery
use tokio::io::{
split, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf, ReadHalf, WriteHalf,
};
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
struct RW;
impl AsyncRead for RW {
fn poll_read(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
buf.put_slice(b"z");
Poll::Ready(Ok(()))
}
}
impl AsyncWrite for RW {
fn poll_write(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(1))
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn poll_write_vectored(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
_bufs: &[io::IoSlice<'_>],
) -> Poll<Result<usize, io::Error>> {
Poll::Ready(Ok(2))
}
fn is_write_vectored(&self) -> bool {
true
}
}
#[test]
fn is_send_and_sync() {
fn assert_bound<T: Send + Sync>() {}
assert_bound::<ReadHalf<RW>>();
assert_bound::<WriteHalf<RW>>();
}
#[test]
fn split_stream_id() {
let (r1, w1) = split(RW);
let (r2, w2) = split(RW);
assert!(r1.is_pair_of(&w1));
assert!(!r1.is_pair_of(&w2));
assert!(r2.is_pair_of(&w2));
assert!(!r2.is_pair_of(&w1));
}
#[test]
fn unsplit_ok() {
let (r, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err1() {
let (r, _) = split(RW);
let (_, w) = split(RW);
r.unsplit(w);
}
#[test]
#[should_panic]
fn unsplit_err2() {
let (_, w) = split(RW);
let (r, _) = split(RW);
r.unsplit(w);
}
#[test]
fn method_delegation() {
let (mut r, mut w) = split(RW);
let mut buf = [0; 1];
tokio_test::block_on(async move {
assert_eq!(1, r.read(&mut buf).await.unwrap());
assert_eq!(b'z', buf[0]);
assert_eq!(1, w.write(b"x").await.unwrap());
assert_eq!(
2,
w.write_vectored(&[io::IoSlice::new(b"x")]).await.unwrap()
);
assert!(w.is_write_vectored());
assert!(w.flush().await.is_ok());
assert!(w.shutdown().await.is_ok());
});
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/macros_select.rs | tokio/tests/macros_select.rs | #![cfg(feature = "macros")]
#![allow(clippy::disallowed_names)]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as maybe_tokio_test;
#[cfg(not(all(target_family = "wasm", not(target_os = "wasi"))))]
use tokio::test as maybe_tokio_test;
use tokio::sync::oneshot;
use tokio_test::{assert_ok, assert_pending, assert_ready};
use std::future::poll_fn;
use std::task::Poll::Ready;
#[maybe_tokio_test]
async fn sync_one_lit_expr_comma() {
let foo = tokio::select! {
foo = async { 1 } => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn no_branch_else_only() {
let foo = tokio::select! {
else => 1,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn no_branch_else_only_biased() {
let foo = tokio::select! {
biased;
else => 1,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn nested_one() {
let foo = tokio::select! {
foo = async { 1 } => tokio::select! {
bar = async { foo } => bar,
},
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_no_comma() {
let foo = tokio::select! {
foo = async { 1 } => foo
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_lit_expr_block() {
let foo = tokio::select! {
foo = async { 1 } => { foo }
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_await() {
let foo = tokio::select! {
foo = one() => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_one_ident() {
let one = one();
let foo = tokio::select! {
foo = one => foo,
};
assert_eq!(foo, 1);
}
#[maybe_tokio_test]
async fn sync_two() {
use std::cell::Cell;
let cnt = Cell::new(0);
let res = tokio::select! {
foo = async {
cnt.set(cnt.get() + 1);
1
} => foo,
bar = async {
cnt.set(cnt.get() + 1);
2
} => bar,
};
assert_eq!(1, cnt.get());
assert!(res == 1 || res == 2);
}
#[maybe_tokio_test]
async fn drop_in_fut() {
let s = "hello".to_string();
let res = tokio::select! {
foo = async {
let v = one().await;
drop(s);
v
} => foo
};
assert_eq!(res, 1);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn one_ready() {
let (tx1, rx1) = oneshot::channel::<i32>();
let (_tx2, rx2) = oneshot::channel::<i32>();
tx1.send(1).unwrap();
let v = tokio::select! {
res = rx1 => {
assert_ok!(res)
},
_ = rx2 => unreachable!(),
};
assert_eq!(1, v);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn select_streams() {
use tokio::sync::mpsc;
let (tx1, mut rx1) = mpsc::unbounded_channel::<i32>();
let (tx2, mut rx2) = mpsc::unbounded_channel::<i32>();
tokio::spawn(async move {
assert_ok!(tx2.send(1));
tokio::task::yield_now().await;
assert_ok!(tx1.send(2));
tokio::task::yield_now().await;
assert_ok!(tx2.send(3));
tokio::task::yield_now().await;
drop((tx1, tx2));
});
let mut rem = true;
let mut msgs = vec![];
while rem {
tokio::select! {
Some(x) = rx1.recv() => {
msgs.push(x);
}
Some(y) = rx2.recv() => {
msgs.push(y);
}
else => {
rem = false;
}
}
}
msgs.sort_unstable();
assert_eq!(&msgs[..], &[1, 2, 3]);
}
#[maybe_tokio_test]
async fn move_uncompleted_futures() {
let (tx1, mut rx1) = oneshot::channel::<i32>();
let (tx2, mut rx2) = oneshot::channel::<i32>();
tx1.send(1).unwrap();
tx2.send(2).unwrap();
let ran;
tokio::select! {
res = &mut rx1 => {
assert_eq!(1, assert_ok!(res));
assert_eq!(2, assert_ok!(rx2.await));
ran = true;
},
res = &mut rx2 => {
assert_eq!(2, assert_ok!(res));
assert_eq!(1, assert_ok!(rx1.await));
ran = true;
},
}
assert!(ran);
}
#[maybe_tokio_test]
async fn nested() {
let res = tokio::select! {
x = async { 1 } => {
tokio::select! {
y = async { 2 } => x + y,
}
}
};
assert_eq!(res, 3);
}
#[cfg(target_pointer_width = "64")]
mod pointer_64_tests {
use super::maybe_tokio_test;
use futures::future;
use std::mem;
#[maybe_tokio_test]
async fn struct_size_1() {
let fut = async {
let ready = future::ready(0i32);
tokio::select! {
_ = ready => {},
}
};
assert_eq!(mem::size_of_val(&fut), 32);
}
#[maybe_tokio_test]
async fn struct_size_2() {
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
tokio::select! {
_ = ready1 => {},
_ = ready2 => {},
}
};
assert_eq!(mem::size_of_val(&fut), 40);
}
#[maybe_tokio_test]
async fn struct_size_3() {
let fut = async {
let ready1 = future::ready(0i32);
let ready2 = future::ready(0i32);
let ready3 = future::ready(0i32);
tokio::select! {
_ = ready1 => {},
_ = ready2 => {},
_ = ready3 => {},
}
};
assert_eq!(mem::size_of_val(&fut), 48);
}
}
#[maybe_tokio_test]
async fn mutable_borrowing_future_with_same_borrow_in_block() {
let mut value = 234;
tokio::select! {
_ = require_mutable(&mut value) => { },
_ = async_noop() => {
value += 5;
},
}
assert!(value >= 234);
}
#[maybe_tokio_test]
async fn mutable_borrowing_future_with_same_borrow_in_block_and_else() {
let mut value = 234;
tokio::select! {
_ = require_mutable(&mut value) => { },
_ = async_noop() => {
value += 5;
},
else => {
value += 27;
},
}
assert!(value >= 234);
}
#[maybe_tokio_test]
async fn future_panics_after_poll() {
use tokio_test::task;
let (tx, rx) = oneshot::channel();
let mut polled = false;
let f = poll_fn(|_| {
assert!(!polled);
polled = true;
Ready(None::<()>)
});
let mut f = task::spawn(async {
tokio::select! {
Some(_) = f => unreachable!(),
ret = rx => ret.unwrap(),
}
});
assert_pending!(f.poll());
assert_pending!(f.poll());
assert_ok!(tx.send(1));
let res = assert_ready!(f.poll());
assert_eq!(1, res);
}
#[maybe_tokio_test]
async fn disable_with_if() {
use tokio_test::task;
let f = poll_fn(|_| panic!());
let (tx, rx) = oneshot::channel();
let mut f = task::spawn(async {
tokio::select! {
_ = f, if false => unreachable!(),
_ = rx => (),
}
});
assert_pending!(f.poll());
assert_ok!(tx.send(()));
assert!(f.is_woken());
assert_ready!(f.poll());
}
#[maybe_tokio_test]
async fn join_with_select() {
use tokio_test::task;
let (tx1, mut rx1) = oneshot::channel();
let (tx2, mut rx2) = oneshot::channel();
let mut f = task::spawn(async {
let mut a = None;
let mut b = None;
while a.is_none() || b.is_none() {
tokio::select! {
v1 = &mut rx1, if a.is_none() => a = Some(assert_ok!(v1)),
v2 = &mut rx2, if b.is_none() => b = Some(assert_ok!(v2))
}
}
(a.unwrap(), b.unwrap())
});
assert_pending!(f.poll());
assert_ok!(tx1.send(123));
assert!(f.is_woken());
assert_pending!(f.poll());
assert_ok!(tx2.send(456));
assert!(f.is_woken());
let (a, b) = assert_ready!(f.poll());
assert_eq!(a, 123);
assert_eq!(b, 456);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn use_future_in_if_condition() {
use tokio::time::{self, Duration};
tokio::select! {
_ = time::sleep(Duration::from_millis(10)), if false => {
panic!("if condition ignored")
}
_ = async { 1u32 } => {
}
}
}
#[tokio::test]
#[cfg(feature = "full")]
async fn use_future_in_if_condition_biased() {
use tokio::time::{self, Duration};
tokio::select! {
biased;
_ = time::sleep(Duration::from_millis(10)), if false => {
panic!("if condition ignored")
}
_ = async { 1u32 } => {
}
}
}
#[maybe_tokio_test]
async fn many_branches() {
let num = tokio::select! {
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
x = async { 1 } => x,
};
assert_eq!(1, num);
}
#[maybe_tokio_test]
async fn never_branch_no_warnings() {
let t = tokio::select! {
_ = async_never() => 0,
one_async_ready = one() => one_async_ready,
};
assert_eq!(t, 1);
}
async fn one() -> usize {
1
}
async fn require_mutable(_: &mut i32) {}
async fn async_noop() {}
async fn async_never() -> ! {
futures::future::pending().await
}
// From https://github.com/tokio-rs/tokio/issues/2857
#[maybe_tokio_test]
async fn mut_on_left_hand_side() {
let v = async move {
let ok = async { 1 };
tokio::pin!(ok);
tokio::select! {
mut a = &mut ok => {
a += 1;
a
}
}
}
.await;
assert_eq!(v, 2);
}
#[maybe_tokio_test]
async fn biased_one_not_ready() {
let (_tx1, rx1) = oneshot::channel::<i32>();
let (tx2, rx2) = oneshot::channel::<i32>();
let (tx3, rx3) = oneshot::channel::<i32>();
tx2.send(2).unwrap();
tx3.send(3).unwrap();
let v = tokio::select! {
biased;
_ = rx1 => unreachable!(),
res = rx2 => {
assert_ok!(res)
},
_ = rx3 => {
panic!("This branch should never be activated because `rx2` should be polled before `rx3` due to `biased;`.")
}
};
assert_eq!(2, v);
}
#[maybe_tokio_test]
#[cfg(feature = "full")]
async fn biased_eventually_ready() {
use tokio::task::yield_now;
let one = async {};
let two = async { yield_now().await };
let three = async { yield_now().await };
let mut count = 0u8;
tokio::pin!(one, two, three);
loop {
tokio::select! {
biased;
_ = &mut two, if count < 2 => {
count += 1;
assert_eq!(count, 2);
}
_ = &mut three, if count < 3 => {
count += 1;
assert_eq!(count, 3);
}
_ = &mut one, if count < 1 => {
count += 1;
assert_eq!(count, 1);
}
else => break,
}
}
assert_eq!(count, 3);
}
// https://github.com/tokio-rs/tokio/issues/3830
// https://github.com/rust-lang/rust-clippy/issues/7304
#[warn(clippy::default_numeric_fallback)]
pub async fn default_numeric_fallback() {
tokio::select! {
_ = async {} => (),
else => (),
}
}
// https://github.com/tokio-rs/tokio/issues/4182
#[maybe_tokio_test]
async fn mut_ref_patterns() {
tokio::select! {
Some(mut foo) = async { Some("1".to_string()) } => {
assert_eq!(foo, "1");
foo = "2".to_string();
assert_eq!(foo, "2");
},
};
tokio::select! {
Some(ref foo) = async { Some("1".to_string()) } => {
assert_eq!(*foo, "1");
},
};
tokio::select! {
Some(ref mut foo) = async { Some("1".to_string()) } => {
assert_eq!(*foo, "1");
*foo = "2".to_string();
assert_eq!(*foo, "2");
},
};
}
#[cfg(tokio_unstable)]
mod unstable {
use tokio::runtime::RngSeed;
#[test]
fn deterministic_select_current_thread() {
let seed = b"bytes used to generate seed";
let rt1 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt1_values = rt1.block_on(async { (select_0_to_9().await, select_0_to_9().await) });
let rt2 = tokio::runtime::Builder::new_current_thread()
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
let rt2_values = rt2.block_on(async { (select_0_to_9().await, select_0_to_9().await) });
assert_eq!(rt1_values, rt2_values);
}
#[test]
#[cfg(all(feature = "rt-multi-thread", not(target_os = "wasi")))]
fn deterministic_select_multi_thread() {
let seed = b"bytes used to generate seed";
let (tx, rx) = std::sync::mpsc::channel();
let rt1 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.on_thread_park(move || tx.send(()).unwrap())
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
// This makes sure that `enter_runtime()` from worker thread is called before the one from main thread,
// ensuring that the RNG state is consistent. See also https://github.com/tokio-rs/tokio/pull/7495.
rx.recv().unwrap();
let rt1_values = rt1.block_on(async {
tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) })
.await
.unwrap()
});
let (tx, rx) = std::sync::mpsc::channel();
let rt2 = tokio::runtime::Builder::new_multi_thread()
.worker_threads(1)
.on_thread_park(move || tx.send(()).unwrap())
.rng_seed(RngSeed::from_bytes(seed))
.build()
.unwrap();
// This makes sure that `enter_runtime()` from worker thread is called before the one from main thread,
// ensuring that the RNG state is consistent. See also https://github.com/tokio-rs/tokio/pull/7495.
rx.recv().unwrap();
let rt2_values = rt2.block_on(async {
tokio::spawn(async { (select_0_to_9().await, select_0_to_9().await) })
.await
.unwrap()
});
assert_eq!(rt1_values, rt2_values);
}
async fn select_0_to_9() -> u32 {
tokio::select!(
x = async { 0 } => x,
x = async { 1 } => x,
x = async { 2 } => x,
x = async { 3 } => x,
x = async { 4 } => x,
x = async { 5 } => x,
x = async { 6 } => x,
x = async { 7 } => x,
x = async { 8 } => x,
x = async { 9 } => x,
)
}
}
#[tokio::test]
async fn select_into_future() {
struct NotAFuture;
impl std::future::IntoFuture for NotAFuture {
type Output = ();
type IntoFuture = std::future::Ready<()>;
fn into_future(self) -> Self::IntoFuture {
std::future::ready(())
}
}
tokio::select! {
() = NotAFuture => {},
}
}
// regression test for https://github.com/tokio-rs/tokio/issues/6721
#[tokio::test]
async fn temporary_lifetime_extension() {
tokio::select! {
() = &mut std::future::ready(()) => {},
}
}
#[tokio::test]
async fn select_is_budget_aware() {
const BUDGET: usize = 128;
let task = || {
Box::pin(async move {
tokio::select! {
biased;
() = tokio::task::coop::consume_budget() => {},
() = std::future::ready(()) => {}
}
})
};
for _ in 0..BUDGET {
let poll = futures::poll!(&mut task());
assert!(poll.is_ready());
}
let poll = futures::poll!(&mut task());
assert!(poll.is_pending());
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/fs_remove_file.rs | tokio/tests/fs_remove_file.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))] // WASI does not support all fs operations
use tempfile::tempdir;
use tokio::fs;
#[tokio::test]
async fn remove_file() {
let temp_dir = tempdir().unwrap();
let file_path = temp_dir.path().join("a.txt");
fs::write(&file_path, b"Hello File!").await.unwrap();
assert!(fs::try_exists(&file_path).await.unwrap());
fs::remove_file(&file_path).await.unwrap();
// should no longer exist
match fs::try_exists(file_path).await {
Ok(exists) => assert!(!exists),
Err(_) => println!("ignored try_exists error after remove_file"),
};
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_panic.rs | tokio/tests/sync_panic.rs | #![warn(rust_2018_idioms)]
#![cfg(all(feature = "full", not(target_os = "wasi")))]
#![cfg(panic = "unwind")]
use std::{error::Error, sync::Arc};
use tokio::{
runtime::{Builder, Runtime},
sync::{broadcast, mpsc, oneshot, Mutex, RwLock, Semaphore},
};
mod support {
pub mod panic;
}
use support::panic::test_panic;
#[test]
fn broadcast_channel_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let (_, _) = broadcast::channel::<u32>(0);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mutex_blocking_lock_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let mutex = Mutex::new(5_u32);
let _g = mutex.blocking_lock();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn oneshot_blocking_recv_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let (_tx, rx) = oneshot::channel::<u8>();
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn rwlock_with_max_readers_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let _ = RwLock::<u8>::with_max_readers(0, (u32::MAX >> 3) + 1);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn rwlock_blocking_read_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let lock = RwLock::<u8>::new(0);
let _ = lock.blocking_read();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn rwlock_blocking_write_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
rt.block_on(async {
let lock = RwLock::<u8>::new(0);
let _ = lock.blocking_write();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_channel_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let (_, _) = mpsc::channel::<u8>(0);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_receiver_blocking_recv_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (_tx, mut rx) = mpsc::channel::<u8>(1);
rt.block_on(async {
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_receiver_blocking_recv_many_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (_tx, mut rx) = mpsc::channel::<u8>(1);
rt.block_on(async {
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_bounded_sender_blocking_send_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (tx, _rx) = mpsc::channel::<u8>(1);
rt.block_on(async {
let _ = tx.blocking_send(3);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_unbounded_receiver_blocking_recv_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (_tx, mut rx) = mpsc::unbounded_channel::<u8>();
rt.block_on(async {
let _ = rx.blocking_recv();
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn mpsc_unbounded_receiver_blocking_recv_many_panic_caller() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let rt = current_thread();
let (_tx, mut rx) = mpsc::unbounded_channel::<u8>();
let mut vec = vec![];
rt.block_on(async {
let _ = rx.blocking_recv_many(&mut vec, 1);
});
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn semaphore_merge_unrelated_owned_permits() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let sem1 = Arc::new(Semaphore::new(42));
let sem2 = Arc::new(Semaphore::new(42));
let mut p1 = sem1.try_acquire_owned().unwrap();
let p2 = sem2.try_acquire_owned().unwrap();
p1.merge(p2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
#[test]
fn semaphore_merge_unrelated_permits() -> Result<(), Box<dyn Error>> {
let panic_location_file = test_panic(|| {
let sem1 = Semaphore::new(42);
let sem2 = Semaphore::new(42);
let mut p1 = sem1.try_acquire().unwrap();
let p2 = sem2.try_acquire().unwrap();
p1.merge(p2);
});
// The panic location should be in this file
assert_eq!(&panic_location_file.unwrap(), file!());
Ok(())
}
fn current_thread() -> Runtime {
Builder::new_current_thread().enable_all().build().unwrap()
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/sync_semaphore.rs | tokio/tests/sync_semaphore.rs | #![cfg(feature = "sync")]
#[cfg(all(target_family = "wasm", not(target_os = "wasi")))]
use wasm_bindgen_test::wasm_bindgen_test as test;
use std::sync::Arc;
use tokio::sync::Semaphore;
#[test]
fn no_permits() {
// this should not panic
Semaphore::new(0);
}
#[test]
fn try_acquire() {
let sem = Semaphore::new(1);
{
let p1 = sem.try_acquire();
assert!(p1.is_ok());
let p2 = sem.try_acquire();
assert!(p2.is_err());
}
let p3 = sem.try_acquire();
assert!(p3.is_ok());
}
#[tokio::test]
#[cfg(feature = "full")]
async fn acquire() {
let sem = Arc::new(Semaphore::new(1));
let p1 = sem.try_acquire().unwrap();
let sem_clone = sem.clone();
let j = tokio::spawn(async move {
let _p2 = sem_clone.acquire().await;
});
drop(p1);
j.await.unwrap();
}
#[tokio::test]
#[cfg(feature = "full")]
async fn add_permits() {
let sem = Arc::new(Semaphore::new(0));
let sem_clone = sem.clone();
let j = tokio::spawn(async move {
let _p2 = sem_clone.acquire().await;
});
sem.add_permits(1);
j.await.unwrap();
}
#[test]
fn forget() {
let sem = Arc::new(Semaphore::new(1));
{
let p = sem.try_acquire().unwrap();
assert_eq!(sem.available_permits(), 0);
p.forget();
assert_eq!(sem.available_permits(), 0);
}
assert_eq!(sem.available_permits(), 0);
assert!(sem.try_acquire().is_err());
}
#[test]
fn merge() {
let sem = Arc::new(Semaphore::new(3));
{
let mut p1 = sem.try_acquire().unwrap();
assert_eq!(sem.available_permits(), 2);
let p2 = sem.try_acquire_many(2).unwrap();
assert_eq!(sem.available_permits(), 0);
p1.merge(p2);
assert_eq!(sem.available_permits(), 0);
}
assert_eq!(sem.available_permits(), 3);
}
#[test]
#[cfg(not(target_family = "wasm"))] // No stack unwinding on wasm targets
#[should_panic]
fn merge_unrelated_permits() {
let sem1 = Arc::new(Semaphore::new(3));
let sem2 = Arc::new(Semaphore::new(3));
let mut p1 = sem1.try_acquire().unwrap();
let p2 = sem2.try_acquire().unwrap();
p1.merge(p2);
}
#[test]
fn split() {
let sem = Semaphore::new(5);
let mut p1 = sem.try_acquire_many(3).unwrap();
assert_eq!(sem.available_permits(), 2);
assert_eq!(p1.num_permits(), 3);
let mut p2 = p1.split(1).unwrap();
assert_eq!(sem.available_permits(), 2);
assert_eq!(p1.num_permits(), 2);
assert_eq!(p2.num_permits(), 1);
let p3 = p1.split(0).unwrap();
assert_eq!(p3.num_permits(), 0);
drop(p1);
assert_eq!(sem.available_permits(), 4);
let p4 = p2.split(1).unwrap();
assert_eq!(p2.num_permits(), 0);
assert_eq!(p4.num_permits(), 1);
assert!(p2.split(1).is_none());
drop(p2);
assert_eq!(sem.available_permits(), 4);
drop(p3);
assert_eq!(sem.available_permits(), 4);
drop(p4);
assert_eq!(sem.available_permits(), 5);
}
#[tokio::test]
#[cfg(feature = "full")]
async fn stress_test() {
let sem = Arc::new(Semaphore::new(5));
let mut join_handles = Vec::new();
for _ in 0..1000 {
let sem_clone = sem.clone();
join_handles.push(tokio::spawn(async move {
let _p = sem_clone.acquire().await;
}));
}
for j in join_handles {
j.await.unwrap();
}
// there should be exactly 5 semaphores available now
let _p1 = sem.try_acquire().unwrap();
let _p2 = sem.try_acquire().unwrap();
let _p3 = sem.try_acquire().unwrap();
let _p4 = sem.try_acquire().unwrap();
let _p5 = sem.try_acquire().unwrap();
assert!(sem.try_acquire().is_err());
}
#[test]
fn add_max_amount_permits() {
let s = tokio::sync::Semaphore::new(0);
s.add_permits(tokio::sync::Semaphore::MAX_PERMITS);
assert_eq!(s.available_permits(), tokio::sync::Semaphore::MAX_PERMITS);
}
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
#[test]
#[should_panic]
fn add_more_than_max_amount_permits1() {
let s = tokio::sync::Semaphore::new(1);
s.add_permits(tokio::sync::Semaphore::MAX_PERMITS);
}
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
#[test]
#[should_panic]
fn add_more_than_max_amount_permits2() {
let s = Semaphore::new(Semaphore::MAX_PERMITS - 1);
s.add_permits(1);
s.add_permits(1);
}
#[cfg(not(target_family = "wasm"))] // wasm currently doesn't support unwinding
#[test]
#[should_panic]
fn panic_when_exceeds_maxpermits() {
let _ = Semaphore::new(Semaphore::MAX_PERMITS + 1);
}
#[test]
fn no_panic_at_maxpermits() {
let _ = Semaphore::new(Semaphore::MAX_PERMITS);
let s = Semaphore::new(Semaphore::MAX_PERMITS - 1);
s.add_permits(1);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/no_rt.rs | tokio/tests/no_rt.rs | #![cfg(all(feature = "full", not(target_os = "wasi")))] // Wasi does not support panic recovery
use tokio::net::TcpStream;
use tokio::sync::oneshot;
use tokio::time::{timeout, Duration};
use futures::executor::block_on;
use std::net::TcpListener;
#[test]
#[should_panic(
expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
)]
fn timeout_panics_when_no_tokio_context() {
block_on(timeout_value());
}
#[test]
#[should_panic(
expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
)]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn panics_when_no_reactor() {
let srv = TcpListener::bind("127.0.0.1:0").unwrap();
let addr = srv.local_addr().unwrap();
block_on(TcpStream::connect(&addr)).unwrap();
}
async fn timeout_value() {
let (_tx, rx) = oneshot::channel::<()>();
let dur = Duration::from_millis(10);
let _ = timeout(dur, rx).await;
}
#[test]
#[should_panic(
expected = "there is no reactor running, must be called from the context of a Tokio 1.x runtime"
)]
#[cfg_attr(miri, ignore)] // No `socket` in miri.
fn io_panics_when_no_tokio_context() {
let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap();
listener.set_nonblocking(true).unwrap();
let _ = tokio::net::TcpListener::from_std(listener);
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/io_read_until.rs | tokio/tests/io_read_until.rs | #![warn(rust_2018_idioms)]
#![cfg(feature = "full")]
use std::io::ErrorKind;
use tokio::io::{AsyncBufReadExt, BufReader, Error};
use tokio_test::{assert_ok, io::Builder};
#[tokio::test]
async fn read_until() {
let mut buf = vec![];
let mut rd: &[u8] = b"hello world";
let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
assert_eq!(n, 6);
assert_eq!(buf, b"hello ");
buf.clear();
let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
assert_eq!(n, 5);
assert_eq!(buf, b"world");
buf.clear();
let n = assert_ok!(rd.read_until(b' ', &mut buf).await);
assert_eq!(n, 0);
assert_eq!(buf, []);
}
#[tokio::test]
async fn read_until_not_all_ready() {
let mock = Builder::new()
.read(b"Hello Wor")
.read(b"ld#Fizz\xffBuz")
.read(b"z#1#2")
.build();
let mut read = BufReader::new(mock);
let mut chunk = b"We say ".to_vec();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, b"Hello World#".len());
assert_eq!(chunk, b"We say Hello World#");
chunk = b"I solve ".to_vec();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, b"Fizz\xffBuzz\n".len());
assert_eq!(chunk, b"I solve Fizz\xffBuzz#");
chunk.clear();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, 2);
assert_eq!(chunk, b"1#");
chunk.clear();
let bytes = read.read_until(b'#', &mut chunk).await.unwrap();
assert_eq!(bytes, 1);
assert_eq!(chunk, b"2");
}
#[tokio::test]
async fn read_until_fail() {
let mock = Builder::new()
.read(b"Hello \xffWor")
.read_error(Error::new(ErrorKind::Other, "The world has no end"))
.build();
let mut read = BufReader::new(mock);
let mut chunk = b"Foo".to_vec();
let err = read
.read_until(b'#', &mut chunk)
.await
.expect_err("Should fail");
assert_eq!(err.kind(), ErrorKind::Other);
assert_eq!(err.to_string(), "The world has no end");
assert_eq!(chunk, b"FooHello \xffWor");
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
tokio-rs/tokio | https://github.com/tokio-rs/tokio/blob/41d1877689f8669902b003a6affce60bdfeb3025/tokio/tests/rt_time_start_paused.rs | tokio/tests/rt_time_start_paused.rs | #![cfg(feature = "full")]
use tokio::time::{Duration, Instant};
#[tokio::test(start_paused = true)]
async fn test_start_paused() {
let now = Instant::now();
// Pause a few times w/ std sleep and ensure `now` stays the same
for _ in 0..5 {
std::thread::sleep(Duration::from_millis(1));
assert_eq!(now, Instant::now());
}
}
| rust | MIT | 41d1877689f8669902b003a6affce60bdfeb3025 | 2026-01-04T15:33:40.250594Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.