repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/io/fs.rs | ext/io/fs.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::fmt::Formatter;
use std::io;
use std::path::Path;
#[cfg(unix)]
use std::process::Stdio as StdStdio;
use std::rc::Rc;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use deno_core::BufMutView;
use deno_core::BufView;
use deno_core::OpState;
use deno_core::ResourceHandleFd;
use deno_core::ResourceId;
use deno_core::error::ResourceError;
use deno_error::JsErrorBox;
use deno_permissions::PermissionCheckError;
#[cfg(windows)]
use deno_subprocess_windows::Stdio as StdStdio;
use tokio::task::JoinError;
#[derive(Debug, deno_error::JsError)]
pub enum FsError {
#[class(inherit)]
Io(io::Error),
#[class("Busy")]
FileBusy,
#[class(not_supported)]
NotSupported,
#[class(inherit)]
PermissionCheck(PermissionCheckError),
}
impl std::fmt::Display for FsError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
FsError::Io(err) => std::fmt::Display::fmt(err, f),
FsError::FileBusy => f.write_str("file busy"),
FsError::NotSupported => f.write_str("not supported"),
FsError::PermissionCheck(err) => std::fmt::Display::fmt(err, f),
}
}
}
impl std::error::Error for FsError {}
impl FsError {
pub fn kind(&self) -> io::ErrorKind {
match self {
Self::Io(err) => err.kind(),
Self::FileBusy => io::ErrorKind::Other,
Self::NotSupported => io::ErrorKind::Other,
Self::PermissionCheck(e) => e.kind(),
}
}
pub fn into_io_error(self) -> io::Error {
match self {
FsError::Io(err) => err,
FsError::FileBusy => io::Error::new(self.kind(), "file busy"),
FsError::NotSupported => io::Error::new(self.kind(), "not supported"),
FsError::PermissionCheck(err) => err.into_io_error(),
}
}
}
impl From<io::Error> for FsError {
fn from(err: io::Error) -> Self {
Self::Io(err)
}
}
impl From<io::ErrorKind> for FsError {
fn from(err: io::ErrorKind) -> Self {
Self::Io(err.into())
}
}
impl From<PermissionCheckError> for FsError {
fn from(err: PermissionCheckError) -> Self {
Self::PermissionCheck(err)
}
}
impl From<JoinError> for FsError {
fn from(err: JoinError) -> Self {
if err.is_cancelled() {
todo!("async tasks must not be cancelled")
}
if err.is_panic() {
std::panic::resume_unwind(err.into_panic()); // resume the panic on the main thread
}
unreachable!()
}
}
pub type FsResult<T> = Result<T, FsError>;
pub struct FsStat {
pub is_file: bool,
pub is_directory: bool,
pub is_symlink: bool,
pub size: u64,
pub mtime: Option<u64>,
pub atime: Option<u64>,
pub birthtime: Option<u64>,
pub ctime: Option<u64>,
pub dev: u64,
pub ino: Option<u64>,
pub mode: u32,
pub nlink: Option<u64>,
pub uid: u32,
pub gid: u32,
pub rdev: u64,
pub blksize: u64,
pub blocks: Option<u64>,
pub is_block_device: bool,
pub is_char_device: bool,
pub is_fifo: bool,
pub is_socket: bool,
}
impl FsStat {
pub fn from_std(metadata: std::fs::Metadata) -> Self {
macro_rules! unix_some_or_none {
($member:ident) => {{
#[cfg(unix)]
{
use std::os::unix::fs::MetadataExt;
Some(metadata.$member())
}
#[cfg(not(unix))]
{
None
}
}};
}
macro_rules! unix_or_zero {
($member:ident) => {{
#[cfg(unix)]
{
use std::os::unix::fs::MetadataExt;
metadata.$member()
}
#[cfg(not(unix))]
{
0
}
}};
}
macro_rules! unix_or_false {
($member:ident) => {{
#[cfg(unix)]
{
use std::os::unix::fs::FileTypeExt;
metadata.file_type().$member()
}
#[cfg(not(unix))]
{
false
}
}};
}
#[inline(always)]
fn to_msec(maybe_time: Result<SystemTime, io::Error>) -> Option<u64> {
match maybe_time {
Ok(time) => Some(
time
.duration_since(UNIX_EPOCH)
.map(|t| t.as_millis() as u64)
.unwrap_or_else(|err| err.duration().as_millis() as u64),
),
Err(_) => None,
}
}
#[inline(always)]
fn get_ctime(ctime_or_0: i64) -> Option<u64> {
if ctime_or_0 > 0 {
// ctime return seconds since epoch, but we need milliseconds
return Some(ctime_or_0 as u64 * 1000);
}
None
}
Self {
is_file: metadata.is_file(),
is_directory: metadata.is_dir(),
is_symlink: metadata.file_type().is_symlink(),
size: metadata.len(),
mtime: to_msec(metadata.modified()),
atime: to_msec(metadata.accessed()),
birthtime: to_msec(metadata.created()),
ctime: get_ctime(unix_or_zero!(ctime)),
dev: unix_or_zero!(dev),
ino: unix_some_or_none!(ino),
mode: unix_or_zero!(mode),
nlink: unix_some_or_none!(nlink),
uid: unix_or_zero!(uid),
gid: unix_or_zero!(gid),
rdev: unix_or_zero!(rdev),
blksize: unix_or_zero!(blksize),
blocks: unix_some_or_none!(blocks),
is_block_device: unix_or_false!(is_block_device),
is_char_device: unix_or_false!(is_char_device),
is_fifo: unix_or_false!(is_fifo),
is_socket: unix_or_false!(is_socket),
}
}
}
#[async_trait::async_trait(?Send)]
pub trait File {
/// Provides the path of the file, which is used for checking
/// metadata permission updates.
fn maybe_path(&self) -> Option<&Path>;
fn read_sync(self: Rc<Self>, buf: &mut [u8]) -> FsResult<usize>;
async fn read(self: Rc<Self>, limit: usize) -> FsResult<BufView> {
let buf = BufMutView::new(limit);
let (nread, mut buf) = self.read_byob(buf).await?;
buf.truncate(nread);
Ok(buf.into_view())
}
async fn read_byob(
self: Rc<Self>,
buf: BufMutView,
) -> FsResult<(usize, BufMutView)>;
fn write_sync(self: Rc<Self>, buf: &[u8]) -> FsResult<usize>;
async fn write(
self: Rc<Self>,
buf: BufView,
) -> FsResult<deno_core::WriteOutcome>;
fn write_all_sync(self: Rc<Self>, buf: &[u8]) -> FsResult<()>;
async fn write_all(self: Rc<Self>, buf: BufView) -> FsResult<()>;
fn read_all_sync(self: Rc<Self>) -> FsResult<Cow<'static, [u8]>>;
async fn read_all_async(self: Rc<Self>) -> FsResult<Cow<'static, [u8]>>;
fn chmod_sync(self: Rc<Self>, pathmode: u32) -> FsResult<()>;
async fn chmod_async(self: Rc<Self>, mode: u32) -> FsResult<()>;
fn chown_sync(
self: Rc<Self>,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()>;
async fn chown_async(
self: Rc<Self>,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()>;
fn seek_sync(self: Rc<Self>, pos: io::SeekFrom) -> FsResult<u64>;
async fn seek_async(self: Rc<Self>, pos: io::SeekFrom) -> FsResult<u64>;
fn datasync_sync(self: Rc<Self>) -> FsResult<()>;
async fn datasync_async(self: Rc<Self>) -> FsResult<()>;
fn sync_sync(self: Rc<Self>) -> FsResult<()>;
async fn sync_async(self: Rc<Self>) -> FsResult<()>;
fn stat_sync(self: Rc<Self>) -> FsResult<FsStat>;
async fn stat_async(self: Rc<Self>) -> FsResult<FsStat>;
fn lock_sync(self: Rc<Self>, exclusive: bool) -> FsResult<()>;
async fn lock_async(self: Rc<Self>, exclusive: bool) -> FsResult<()>;
fn unlock_sync(self: Rc<Self>) -> FsResult<()>;
async fn unlock_async(self: Rc<Self>) -> FsResult<()>;
fn truncate_sync(self: Rc<Self>, len: u64) -> FsResult<()>;
async fn truncate_async(self: Rc<Self>, len: u64) -> FsResult<()>;
fn utime_sync(
self: Rc<Self>,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()>;
async fn utime_async(
self: Rc<Self>,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()>;
// lower level functionality
fn as_stdio(self: Rc<Self>) -> FsResult<StdStdio>;
fn backing_fd(self: Rc<Self>) -> Option<ResourceHandleFd>;
fn try_clone_inner(self: Rc<Self>) -> FsResult<Rc<dyn File>>;
}
pub struct FileResource {
name: String,
file: Rc<dyn File>,
}
impl FileResource {
pub fn new(file: Rc<dyn File>, name: String) -> Self {
Self { name, file }
}
fn with_resource<F, R>(
state: &OpState,
rid: ResourceId,
f: F,
) -> Result<R, JsErrorBox>
where
F: FnOnce(Rc<FileResource>) -> Result<R, JsErrorBox>,
{
let resource = state
.resource_table
.get::<FileResource>(rid)
.map_err(JsErrorBox::from_err)?;
f(resource)
}
pub fn get_file(
state: &OpState,
rid: ResourceId,
) -> Result<Rc<dyn File>, ResourceError> {
let resource = state.resource_table.get::<FileResource>(rid)?;
Ok(resource.file())
}
pub fn with_file<F, R>(
state: &OpState,
rid: ResourceId,
f: F,
) -> Result<R, JsErrorBox>
where
F: FnOnce(Rc<dyn File>) -> Result<R, JsErrorBox>,
{
Self::with_resource(state, rid, |r| f(r.file.clone()))
}
pub fn file(&self) -> Rc<dyn File> {
self.file.clone()
}
}
impl deno_core::Resource for FileResource {
fn name(&self) -> Cow<'_, str> {
Cow::Borrowed(&self.name)
}
fn read(self: Rc<Self>, limit: usize) -> deno_core::AsyncResult<BufView> {
Box::pin(async move {
self
.file
.clone()
.read(limit)
.await
.map_err(JsErrorBox::from_err)
})
}
fn read_byob(
self: Rc<Self>,
buf: BufMutView,
) -> deno_core::AsyncResult<(usize, BufMutView)> {
Box::pin(async move {
self
.file
.clone()
.read_byob(buf)
.await
.map_err(JsErrorBox::from_err)
})
}
fn write(
self: Rc<Self>,
buf: BufView,
) -> deno_core::AsyncResult<deno_core::WriteOutcome> {
Box::pin(async move {
self
.file
.clone()
.write(buf)
.await
.map_err(JsErrorBox::from_err)
})
}
fn write_all(self: Rc<Self>, buf: BufView) -> deno_core::AsyncResult<()> {
Box::pin(async move {
self
.file
.clone()
.write_all(buf)
.await
.map_err(JsErrorBox::from_err)
})
}
fn read_byob_sync(
self: Rc<Self>,
data: &mut [u8],
) -> Result<usize, JsErrorBox> {
self
.file
.clone()
.read_sync(data)
.map_err(JsErrorBox::from_err)
}
fn write_sync(self: Rc<Self>, data: &[u8]) -> Result<usize, JsErrorBox> {
self
.file
.clone()
.write_sync(data)
.map_err(JsErrorBox::from_err)
}
fn backing_fd(self: Rc<Self>) -> Option<ResourceHandleFd> {
self.file.clone().backing_fd()
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/io/winpipe.rs | ext/io/winpipe.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io;
use std::os::windows::io::RawHandle;
use std::sync::atomic::AtomicU32;
use std::sync::atomic::Ordering;
use rand::RngCore;
use rand::thread_rng;
use winapi::shared::minwindef::DWORD;
use winapi::um::errhandlingapi::GetLastError;
use winapi::um::fileapi::CreateFileA;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::minwinbase::SECURITY_ATTRIBUTES;
use winapi::um::winbase::CreateNamedPipeA;
use winapi::um::winbase::FILE_FLAG_FIRST_PIPE_INSTANCE;
use winapi::um::winbase::FILE_FLAG_OVERLAPPED;
use winapi::um::winbase::PIPE_ACCESS_DUPLEX;
use winapi::um::winbase::PIPE_READMODE_BYTE;
use winapi::um::winbase::PIPE_TYPE_BYTE;
use winapi::um::winnt::GENERIC_READ;
use winapi::um::winnt::GENERIC_WRITE;
/// Create a pair of file descriptors for a named pipe with non-inheritable handles. We cannot use
/// the anonymous pipe from `os_pipe` because that does not support OVERLAPPED (aka async) I/O.
///
/// This is the same way that Rust and pretty much everyone else does it.
///
/// For more information, there is an interesting S.O. question that explains the history, as
/// well as offering a complex NTAPI solution if we decide to try to make these pipes truely
/// anonymous: https://stackoverflow.com/questions/60645/overlapped-i-o-on-anonymous-pipe
pub fn create_named_pipe() -> io::Result<(RawHandle, RawHandle)> {
create_named_pipe_inner()
}
fn create_named_pipe_inner() -> io::Result<(RawHandle, RawHandle)> {
static NEXT_ID: AtomicU32 = AtomicU32::new(0);
// Create an extremely-likely-unique pipe name from randomness, identity and a serial counter.
let pipe_name = format!(
concat!(r#"\\.\pipe\deno_pipe_{:x}.{:x}.{:x}"#, "\0"),
thread_rng().next_u64(),
std::process::id(),
NEXT_ID.fetch_add(1, Ordering::SeqCst),
);
// Create security attributes to make the pipe handles non-inheritable
let mut security_attributes = SECURITY_ATTRIBUTES {
nLength: std::mem::size_of::<SECURITY_ATTRIBUTES>() as DWORD,
lpSecurityDescriptor: std::ptr::null_mut(),
bInheritHandle: 0,
};
// SAFETY: Create the pipe server with non-inheritable handle
let server_handle = unsafe {
CreateNamedPipeA(
pipe_name.as_ptr() as *const i8,
PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE,
// Read and write bytes, not messages
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE,
// The maximum number of instances that can be created for this pipe.
1,
// 4kB buffer sizes
4096,
4096,
// "The default time-out value, in milliseconds, if the WaitNamedPipe function specifies NMPWAIT_USE_DEFAULT_WAIT.
// Each instance of a named pipe must specify the same value. A value of zero will result in a default time-out of
// 50 milliseconds."
0,
&mut security_attributes,
)
};
if server_handle == INVALID_HANDLE_VALUE {
// This should not happen, so we would like to get some better diagnostics here.
// SAFETY: Printing last error for diagnostics
unsafe {
log::error!(
"*** Unexpected server pipe failure '{pipe_name:?}': {:x}",
GetLastError()
);
}
return Err(io::Error::last_os_error());
}
// SAFETY: Create the pipe client with non-inheritable handle
let client_handle = unsafe {
CreateFileA(
pipe_name.as_ptr() as *const i8,
GENERIC_READ | GENERIC_WRITE,
0,
&mut security_attributes,
OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
std::ptr::null_mut(),
)
};
if client_handle == INVALID_HANDLE_VALUE {
// SAFETY: Getting last error for diagnostics
let error = unsafe { GetLastError() };
// This should not happen, so we would like to get some better diagnostics here.
log::error!(
"*** Unexpected client pipe failure '{pipe_name:?}': {:x}",
error
);
let err = io::Error::last_os_error();
// SAFETY: Close the handles if we failed
unsafe {
CloseHandle(server_handle);
}
return Err(err);
}
Ok((server_handle, client_handle))
}
#[cfg(test)]
mod tests {
use std::fs::File;
use std::io::Read;
use std::io::Write;
use std::os::windows::io::FromRawHandle;
use std::sync::Arc;
use std::sync::Barrier;
use super::*;
#[test]
fn make_named_pipe() {
let (server, client) = create_named_pipe().unwrap();
// SAFETY: For testing
let mut server = unsafe { File::from_raw_handle(server) };
// SAFETY: For testing
let mut client = unsafe { File::from_raw_handle(client) };
// Write to the server and read from the client
server.write_all(b"hello").unwrap();
let mut buf: [u8; 5] = Default::default();
client.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"hello");
}
#[test]
fn make_many_named_pipes_serial() {
let mut handles = vec![];
for _ in 0..100 {
let (server, client) = create_named_pipe().unwrap();
// SAFETY: For testing
let server = unsafe { File::from_raw_handle(server) };
// SAFETY: For testing
let client = unsafe { File::from_raw_handle(client) };
handles.push((server, client))
}
}
#[test]
fn make_many_named_pipes_parallel() {
let mut handles = vec![];
let barrier = Arc::new(Barrier::new(50));
for _ in 0..50 {
let barrier = barrier.clone();
handles.push(std::thread::spawn(move || {
barrier.wait();
let (server, client) = create_named_pipe().unwrap();
// SAFETY: For testing
let server = unsafe { File::from_raw_handle(server) };
// SAFETY: For testing
let client = unsafe { File::from_raw_handle(client) };
std::thread::sleep(std::time::Duration::from_millis(100));
drop((server, client));
}));
}
for handle in handles.drain(..) {
handle.join().unwrap();
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/io/pipe.rs | ext/io/pipe.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io;
use std::pin::Pin;
use std::process::Stdio;
pub type RawPipeHandle = super::RawIoHandle;
// The synchronous read end of a unidirectional pipe.
pub struct PipeRead {
file: std::fs::File,
}
// The asynchronous read end of a unidirectional pipe.
pub struct AsyncPipeRead {
#[cfg(windows)]
/// We use a `ChildStdout` here as it's a much better fit for a Windows named pipe on Windows. We
/// might also be able to use `tokio::net::windows::named_pipe::NamedPipeClient` in the future
/// if those can be created from raw handles down the road.
read: tokio::process::ChildStdout,
#[cfg(not(windows))]
read: tokio::net::unix::pipe::Receiver,
}
// The synchronous write end of a unidirectional pipe.
pub struct PipeWrite {
file: std::fs::File,
}
// The asynchronous write end of a unidirectional pipe.
pub struct AsyncPipeWrite {
#[cfg(windows)]
/// We use a `ChildStdin` here as it's a much better fit for a Windows named pipe on Windows. We
/// might also be able to use `tokio::net::windows::named_pipe::NamedPipeClient` in the future
/// if those can be created from raw handles down the road.
write: tokio::process::ChildStdin,
#[cfg(not(windows))]
write: tokio::net::unix::pipe::Sender,
}
impl PipeRead {
/// Converts this sync reader into an async reader. May fail if the Tokio runtime is
/// unavailable.
#[cfg(windows)]
pub fn into_async(self) -> io::Result<AsyncPipeRead> {
let owned: std::os::windows::io::OwnedHandle = self.file.into();
let stdout = std::process::ChildStdout::from(owned);
Ok(AsyncPipeRead {
read: tokio::process::ChildStdout::from_std(stdout)?,
})
}
/// Converts this sync reader into an async reader. May fail if the Tokio runtime is
/// unavailable.
#[cfg(not(windows))]
pub fn into_async(self) -> io::Result<AsyncPipeRead> {
Ok(AsyncPipeRead {
read: tokio::net::unix::pipe::Receiver::from_file(self.file)?,
})
}
/// Creates a new [`PipeRead`] instance that shares the same underlying file handle
/// as the existing [`PipeRead`] instance.
pub fn try_clone(&self) -> io::Result<Self> {
Ok(Self {
file: self.file.try_clone()?,
})
}
}
impl AsyncPipeRead {
/// Converts this async reader into an sync reader. May fail if the Tokio runtime is
/// unavailable.
#[cfg(windows)]
pub fn into_sync(self) -> io::Result<PipeRead> {
let owned = self.read.into_owned_handle()?;
Ok(PipeRead { file: owned.into() })
}
/// Converts this async reader into an sync reader. May fail if the Tokio runtime is
/// unavailable.
#[cfg(not(windows))]
pub fn into_sync(self) -> io::Result<PipeRead> {
let file = self.read.into_nonblocking_fd()?.into();
Ok(PipeRead { file })
}
}
impl std::io::Read for PipeRead {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.file.read(buf)
}
fn read_vectored(
&mut self,
bufs: &mut [io::IoSliceMut<'_>],
) -> io::Result<usize> {
self.file.read_vectored(bufs)
}
}
impl tokio::io::AsyncRead for AsyncPipeRead {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<io::Result<()>> {
Pin::new(&mut self.get_mut().read).poll_read(cx, buf)
}
}
impl PipeWrite {
/// Converts this sync writer into an async writer. May fail if the Tokio runtime is
/// unavailable.
#[cfg(windows)]
pub fn into_async(self) -> io::Result<AsyncPipeWrite> {
let owned: std::os::windows::io::OwnedHandle = self.file.into();
let stdin = std::process::ChildStdin::from(owned);
Ok(AsyncPipeWrite {
write: tokio::process::ChildStdin::from_std(stdin)?,
})
}
/// Converts this sync writer into an async writer. May fail if the Tokio runtime is
/// unavailable.
#[cfg(not(windows))]
pub fn into_async(self) -> io::Result<AsyncPipeWrite> {
Ok(AsyncPipeWrite {
write: tokio::net::unix::pipe::Sender::from_file(self.file)?,
})
}
/// Creates a new [`PipeWrite`] instance that shares the same underlying file handle
/// as the existing [`PipeWrite`] instance.
pub fn try_clone(&self) -> io::Result<Self> {
Ok(Self {
file: self.file.try_clone()?,
})
}
}
impl AsyncPipeWrite {
/// Converts this async writer into an sync writer. May fail if the Tokio runtime is
/// unavailable.
#[cfg(windows)]
pub fn into_sync(self) -> io::Result<PipeWrite> {
let owned = self.write.into_owned_handle()?;
Ok(PipeWrite { file: owned.into() })
}
/// Converts this async writer into an sync writer. May fail if the Tokio runtime is
/// unavailable.
#[cfg(not(windows))]
pub fn into_sync(self) -> io::Result<PipeWrite> {
let file = self.write.into_nonblocking_fd()?.into();
Ok(PipeWrite { file })
}
}
impl std::io::Write for PipeWrite {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.file.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.file.flush()
}
fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
self.file.write_vectored(bufs)
}
}
impl tokio::io::AsyncWrite for AsyncPipeWrite {
#[inline(always)]
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.get_mut().write).poll_write(cx, buf)
}
#[inline(always)]
fn poll_flush(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), io::Error>> {
Pin::new(&mut self.get_mut().write).poll_flush(cx)
}
#[inline(always)]
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), io::Error>> {
Pin::new(&mut self.get_mut().write).poll_shutdown(cx)
}
#[inline(always)]
fn is_write_vectored(&self) -> bool {
self.write.is_write_vectored()
}
#[inline(always)]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> std::task::Poll<Result<usize, io::Error>> {
Pin::new(&mut self.get_mut().write).poll_write_vectored(cx, bufs)
}
}
impl From<PipeRead> for Stdio {
fn from(val: PipeRead) -> Self {
Stdio::from(val.file)
}
}
impl From<PipeWrite> for Stdio {
fn from(val: PipeWrite) -> Self {
Stdio::from(val.file)
}
}
impl From<PipeRead> for std::fs::File {
fn from(val: PipeRead) -> Self {
val.file
}
}
impl From<PipeWrite> for std::fs::File {
fn from(val: PipeWrite) -> Self {
val.file
}
}
#[cfg(not(windows))]
impl From<PipeRead> for std::os::unix::io::OwnedFd {
fn from(val: PipeRead) -> Self {
val.file.into()
}
}
#[cfg(not(windows))]
impl From<PipeWrite> for std::os::unix::io::OwnedFd {
fn from(val: PipeWrite) -> Self {
val.file.into()
}
}
#[cfg(windows)]
impl From<PipeRead> for std::os::windows::io::OwnedHandle {
fn from(val: PipeRead) -> Self {
val.file.into()
}
}
#[cfg(windows)]
impl From<PipeWrite> for std::os::windows::io::OwnedHandle {
fn from(val: PipeWrite) -> Self {
val.file.into()
}
}
/// Create a unidirectional pipe pair that starts off as a pair of synchronous file handles,
/// but either side may be promoted to an async-capable reader/writer.
///
/// On Windows, we use a named pipe because that's the only way to get reliable async I/O
/// support. On Unix platforms, we use the `os_pipe` library, which uses `pipe2` under the hood
/// (or `pipe` on OSX).
pub fn pipe() -> io::Result<(PipeRead, PipeWrite)> {
pipe_impl()
}
/// Creates a unidirectional pipe on top of a named pipe (which is technically bidirectional).
#[cfg(windows)]
pub fn pipe_impl() -> io::Result<(PipeRead, PipeWrite)> {
// SAFETY: We're careful with handles here
unsafe {
use std::os::windows::io::FromRawHandle;
use std::os::windows::io::OwnedHandle;
let (server, client) = crate::winpipe::create_named_pipe()?;
let read = std::fs::File::from(OwnedHandle::from_raw_handle(client));
let write = std::fs::File::from(OwnedHandle::from_raw_handle(server));
Ok((PipeRead { file: read }, PipeWrite { file: write }))
}
}
/// Creates a unidirectional pipe for unix platforms.
#[cfg(not(windows))]
pub fn pipe_impl() -> io::Result<(PipeRead, PipeWrite)> {
use std::os::unix::io::OwnedFd;
let (read, write) = os_pipe::pipe()?;
let read = std::fs::File::from(Into::<OwnedFd>::into(read));
let write = std::fs::File::from(Into::<OwnedFd>::into(write));
Ok((PipeRead { file: read }, PipeWrite { file: write }))
}
#[cfg(test)]
mod tests {
use std::io::Read;
use std::io::Write;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use super::*;
#[test]
fn test_pipe() {
let (mut read, mut write) = pipe().unwrap();
// Write to the server and read from the client
write.write_all(b"hello").unwrap();
let mut buf: [u8; 5] = Default::default();
read.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"hello");
}
#[tokio::test]
async fn test_async_pipe() {
let (read, write) = pipe().unwrap();
let mut read = read.into_async().unwrap();
let mut write = write.into_async().unwrap();
write.write_all(b"hello").await.unwrap();
let mut buf: [u8; 5] = Default::default();
read.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"hello");
}
/// Test a round-trip through async mode and back.
#[tokio::test]
async fn test_pipe_transmute() {
let (mut read, mut write) = pipe().unwrap();
// Sync
write.write_all(b"hello").unwrap();
let mut buf: [u8; 5] = Default::default();
read.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"hello");
let mut read = read.into_async().unwrap();
let mut write = write.into_async().unwrap();
// Async
write.write_all(b"hello").await.unwrap();
let mut buf: [u8; 5] = Default::default();
read.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"hello");
let mut read = read.into_sync().unwrap();
let mut write = write.into_sync().unwrap();
// Sync
write.write_all(b"hello").unwrap();
let mut buf: [u8; 5] = Default::default();
read.read_exact(&mut buf).unwrap();
assert_eq!(&buf, b"hello");
}
#[tokio::test]
async fn test_async_pipe_is_nonblocking() {
let (read, write) = pipe().unwrap();
let mut read = read.into_async().unwrap();
let mut write = write.into_async().unwrap();
let a = tokio::spawn(async move {
let mut buf: [u8; 5] = Default::default();
read.read_exact(&mut buf).await.unwrap();
assert_eq!(&buf, b"hello");
});
let b = tokio::spawn(async move {
write.write_all(b"hello").await.unwrap();
});
a.await.unwrap();
b.await.unwrap();
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/export_key.rs | ext/crypto/export_key.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use base64::Engine;
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use const_oid::AssociatedOid;
use const_oid::ObjectIdentifier;
use deno_core::ToJsBuffer;
use deno_core::op2;
use elliptic_curve::sec1::ToEncodedPoint;
use p256::pkcs8::DecodePrivateKey;
use rsa::pkcs1::der::Decode;
use rsa::pkcs8::der::Encode;
use rsa::pkcs8::der::asn1::UintRef;
use serde::Deserialize;
use serde::Serialize;
use spki::AlgorithmIdentifier;
use spki::AlgorithmIdentifierOwned;
use spki::der::asn1;
use spki::der::asn1::BitString;
use crate::shared::*;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ExportKeyError {
#[class(inherit)]
#[error(transparent)]
General(
#[from]
#[inherit]
SharedError,
),
#[class(generic)]
#[error(transparent)]
Der(#[from] spki::der::Error),
#[class("DOMExceptionNotSupportedError")]
#[error("Unsupported named curve")]
UnsupportedNamedCurve,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExportKeyOptions {
format: ExportKeyFormat,
#[serde(flatten)]
algorithm: ExportKeyAlgorithm,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum ExportKeyFormat {
Raw,
Pkcs8,
Spki,
JwkPublic,
JwkPrivate,
JwkSecret,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase", tag = "algorithm")]
pub enum ExportKeyAlgorithm {
#[serde(rename = "RSASSA-PKCS1-v1_5")]
RsassaPkcs1v15 {},
#[serde(rename = "RSA-PSS")]
RsaPss {},
#[serde(rename = "RSA-OAEP")]
RsaOaep {},
#[serde(rename = "ECDSA", rename_all = "camelCase")]
Ecdsa { named_curve: EcNamedCurve },
#[serde(rename = "ECDH", rename_all = "camelCase")]
Ecdh { named_curve: EcNamedCurve },
#[serde(rename = "AES")]
Aes {},
#[serde(rename = "HMAC")]
Hmac {},
}
#[derive(Serialize)]
#[serde(untagged)]
pub enum ExportKeyResult {
Raw(ToJsBuffer),
Pkcs8(ToJsBuffer),
Spki(ToJsBuffer),
JwkSecret {
k: String,
},
JwkPublicRsa {
n: String,
e: String,
},
JwkPrivateRsa {
n: String,
e: String,
d: String,
p: String,
q: String,
dp: String,
dq: String,
qi: String,
},
JwkPublicEc {
x: String,
y: String,
},
JwkPrivateEc {
x: String,
y: String,
d: String,
},
}
#[op2]
#[serde]
pub fn op_crypto_export_key(
#[serde] opts: ExportKeyOptions,
#[serde] key_data: V8RawKeyData,
) -> Result<ExportKeyResult, ExportKeyError> {
match opts.algorithm {
ExportKeyAlgorithm::RsassaPkcs1v15 {}
| ExportKeyAlgorithm::RsaPss {}
| ExportKeyAlgorithm::RsaOaep {} => export_key_rsa(opts.format, key_data),
ExportKeyAlgorithm::Ecdh { named_curve }
| ExportKeyAlgorithm::Ecdsa { named_curve } => {
export_key_ec(opts.format, key_data, opts.algorithm, named_curve)
}
ExportKeyAlgorithm::Aes {} | ExportKeyAlgorithm::Hmac {} => {
export_key_symmetric(opts.format, key_data)
}
}
}
fn uint_to_b64(bytes: UintRef) -> String {
BASE64_URL_SAFE_NO_PAD.encode(bytes.as_bytes())
}
fn bytes_to_b64(bytes: &[u8]) -> String {
BASE64_URL_SAFE_NO_PAD.encode(bytes)
}
fn export_key_rsa(
format: ExportKeyFormat,
key_data: V8RawKeyData,
) -> Result<ExportKeyResult, ExportKeyError> {
match format {
ExportKeyFormat::Spki => {
let subject_public_key = &key_data.as_rsa_public_key()?;
// the SPKI structure
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifier {
// rsaEncryption(1)
oid: const_oid::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.1"),
// parameters field should not be omitted (None).
// It MUST have ASN.1 type NULL.
parameters: Some(asn1::AnyRef::from(asn1::Null)),
},
subject_public_key: BitString::from_bytes(subject_public_key).unwrap(),
};
// Infallible because we know the public key is valid.
let spki_der = key_info.to_der().unwrap();
Ok(ExportKeyResult::Spki(spki_der.into()))
}
ExportKeyFormat::Pkcs8 => {
let private_key = key_data.as_rsa_private_key()?;
// the PKCS#8 v1 structure
// PrivateKeyInfo ::= SEQUENCE {
// version Version,
// privateKeyAlgorithm PrivateKeyAlgorithmIdentifier,
// privateKey PrivateKey,
// attributes [0] IMPLICIT Attributes OPTIONAL }
// version is 0 when publickey is None
let pk_info = rsa::pkcs8::PrivateKeyInfo {
public_key: None,
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
// rsaEncryption(1)
oid: rsa::pkcs8::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.1"),
// parameters field should not be omitted (None).
// It MUST have ASN.1 type NULL as per defined in RFC 3279 Section 2.3.1
parameters: Some(rsa::pkcs8::der::asn1::AnyRef::from(
rsa::pkcs8::der::asn1::Null,
)),
},
private_key,
};
// Infallible because we know the private key is valid.
let mut pkcs8_der = Vec::new();
pk_info.encode_to_vec(&mut pkcs8_der)?;
Ok(ExportKeyResult::Pkcs8(pkcs8_der.into()))
}
ExportKeyFormat::JwkPublic => {
let public_key = key_data.as_rsa_public_key()?;
let public_key = rsa::pkcs1::RsaPublicKey::from_der(&public_key)
.map_err(|_| SharedError::FailedDecodePublicKey)?;
Ok(ExportKeyResult::JwkPublicRsa {
n: uint_to_b64(public_key.modulus),
e: uint_to_b64(public_key.public_exponent),
})
}
ExportKeyFormat::JwkPrivate => {
let private_key = key_data.as_rsa_private_key()?;
let private_key = rsa::pkcs1::RsaPrivateKey::from_der(private_key)
.map_err(|_| SharedError::FailedDecodePrivateKey)?;
Ok(ExportKeyResult::JwkPrivateRsa {
n: uint_to_b64(private_key.modulus),
e: uint_to_b64(private_key.public_exponent),
d: uint_to_b64(private_key.private_exponent),
p: uint_to_b64(private_key.prime1),
q: uint_to_b64(private_key.prime2),
dp: uint_to_b64(private_key.exponent1),
dq: uint_to_b64(private_key.exponent2),
qi: uint_to_b64(private_key.coefficient),
})
}
_ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn export_key_symmetric(
format: ExportKeyFormat,
key_data: V8RawKeyData,
) -> Result<ExportKeyResult, ExportKeyError> {
match format {
ExportKeyFormat::JwkSecret => {
let bytes = key_data.as_secret_key()?;
Ok(ExportKeyResult::JwkSecret {
k: bytes_to_b64(bytes),
})
}
_ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn export_key_ec(
format: ExportKeyFormat,
key_data: V8RawKeyData,
algorithm: ExportKeyAlgorithm,
named_curve: EcNamedCurve,
) -> Result<ExportKeyResult, ExportKeyError> {
match format {
ExportKeyFormat::Raw => {
let subject_public_key = match named_curve {
EcNamedCurve::P256 => {
let point = key_data.as_ec_public_key_p256()?;
point.as_ref().to_vec()
}
EcNamedCurve::P384 => {
let point = key_data.as_ec_public_key_p384()?;
point.as_ref().to_vec()
}
EcNamedCurve::P521 => {
let point = key_data.as_ec_public_key_p521()?;
point.as_ref().to_vec()
}
};
Ok(ExportKeyResult::Raw(subject_public_key.into()))
}
ExportKeyFormat::Spki => {
let subject_public_key = match named_curve {
EcNamedCurve::P256 => {
let point = key_data.as_ec_public_key_p256()?;
point.as_ref().to_vec()
}
EcNamedCurve::P384 => {
let point = key_data.as_ec_public_key_p384()?;
point.as_ref().to_vec()
}
EcNamedCurve::P521 => {
let point = key_data.as_ec_public_key_p521()?;
point.as_ref().to_vec()
}
};
let alg_id = match named_curve {
EcNamedCurve::P256 => AlgorithmIdentifierOwned {
oid: elliptic_curve::ALGORITHM_OID,
parameters: Some((&p256::NistP256::OID).into()),
},
EcNamedCurve::P384 => AlgorithmIdentifierOwned {
oid: elliptic_curve::ALGORITHM_OID,
parameters: Some((&p384::NistP384::OID).into()),
},
EcNamedCurve::P521 => AlgorithmIdentifierOwned {
oid: elliptic_curve::ALGORITHM_OID,
parameters: Some((&p521::NistP521::OID).into()),
},
};
let alg_id = match algorithm {
ExportKeyAlgorithm::Ecdh { .. } => AlgorithmIdentifier {
oid: ObjectIdentifier::new_unwrap("1.2.840.10045.2.1"),
parameters: alg_id.parameters,
},
_ => alg_id,
};
// the SPKI structure
let key_info = spki::SubjectPublicKeyInfo {
algorithm: alg_id,
subject_public_key: BitString::from_bytes(&subject_public_key).unwrap(),
};
let spki_der = key_info.to_der().unwrap();
Ok(ExportKeyResult::Spki(spki_der.into()))
}
ExportKeyFormat::Pkcs8 => {
// private_key is a PKCS#8 DER-encoded private key
let private_key = key_data.as_ec_private_key()?;
Ok(ExportKeyResult::Pkcs8(private_key.to_vec().into()))
}
ExportKeyFormat::JwkPublic => match named_curve {
EcNamedCurve::P256 => {
let point = key_data.as_ec_public_key_p256()?;
let coords = point.coordinates();
if let p256::elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
coords
{
Ok(ExportKeyResult::JwkPublicEc {
x: bytes_to_b64(x),
y: bytes_to_b64(y),
})
} else {
Err(SharedError::FailedDecodePublicKey.into())
}
}
EcNamedCurve::P384 => {
let point = key_data.as_ec_public_key_p384()?;
let coords = point.coordinates();
if let p384::elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
coords
{
Ok(ExportKeyResult::JwkPublicEc {
x: bytes_to_b64(x),
y: bytes_to_b64(y),
})
} else {
Err(SharedError::FailedDecodePublicKey.into())
}
}
EcNamedCurve::P521 => {
let point = key_data.as_ec_public_key_p521()?;
let coords = point.coordinates();
if let p521::elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
coords
{
Ok(ExportKeyResult::JwkPublicEc {
x: bytes_to_b64(x),
y: bytes_to_b64(y),
})
} else {
Err(SharedError::FailedDecodePublicKey.into())
}
}
},
ExportKeyFormat::JwkPrivate => {
let private_key = key_data.as_ec_private_key()?;
match named_curve {
EcNamedCurve::P256 => {
let ec_key = p256::SecretKey::from_pkcs8_der(private_key)
.map_err(|_| SharedError::FailedDecodePrivateKey)?;
let point = ec_key.public_key().to_encoded_point(false);
if let elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
point.coordinates()
{
Ok(ExportKeyResult::JwkPrivateEc {
x: bytes_to_b64(x),
y: bytes_to_b64(y),
d: bytes_to_b64(&ec_key.to_bytes()),
})
} else {
Err(SharedError::ExpectedValidPublicECKey.into())
}
}
EcNamedCurve::P384 => {
let ec_key = p384::SecretKey::from_pkcs8_der(private_key)
.map_err(|_| SharedError::FailedDecodePrivateKey)?;
let point = ec_key.public_key().to_encoded_point(false);
if let elliptic_curve::sec1::Coordinates::Uncompressed { x, y } =
point.coordinates()
{
Ok(ExportKeyResult::JwkPrivateEc {
x: bytes_to_b64(x),
y: bytes_to_b64(y),
d: bytes_to_b64(&ec_key.to_bytes()),
})
} else {
Err(SharedError::ExpectedValidPublicECKey.into())
}
}
_ => Err(ExportKeyError::UnsupportedNamedCurve),
}
}
ExportKeyFormat::JwkSecret => Err(SharedError::UnsupportedFormat.into()),
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/key.rs | ext/crypto/key.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use aws_lc_rs::agreement::Algorithm as RingAlgorithm;
use aws_lc_rs::digest;
use aws_lc_rs::hkdf;
use aws_lc_rs::hmac::Algorithm as HmacAlgorithm;
use aws_lc_rs::signature::EcdsaSigningAlgorithm;
use aws_lc_rs::signature::EcdsaVerificationAlgorithm;
use serde::Deserialize;
use serde::Serialize;
#[derive(Serialize, Deserialize, Copy, Clone, Eq, PartialEq)]
pub enum CryptoHash {
#[serde(rename = "SHA-1")]
Sha1,
#[serde(rename = "SHA-256")]
Sha256,
#[serde(rename = "SHA-384")]
Sha384,
#[serde(rename = "SHA-512")]
Sha512,
}
#[derive(Serialize, Deserialize, Copy, Clone)]
pub enum CryptoNamedCurve {
#[serde(rename = "P-256")]
P256,
#[serde(rename = "P-384")]
P384,
}
impl From<CryptoNamedCurve> for &RingAlgorithm {
fn from(curve: CryptoNamedCurve) -> &'static RingAlgorithm {
match curve {
CryptoNamedCurve::P256 => &aws_lc_rs::agreement::ECDH_P256,
CryptoNamedCurve::P384 => &aws_lc_rs::agreement::ECDH_P384,
}
}
}
impl From<CryptoNamedCurve> for &EcdsaSigningAlgorithm {
fn from(curve: CryptoNamedCurve) -> &'static EcdsaSigningAlgorithm {
match curve {
CryptoNamedCurve::P256 => {
&aws_lc_rs::signature::ECDSA_P256_SHA256_FIXED_SIGNING
}
CryptoNamedCurve::P384 => {
&aws_lc_rs::signature::ECDSA_P384_SHA384_FIXED_SIGNING
}
}
}
}
impl From<CryptoNamedCurve> for &EcdsaVerificationAlgorithm {
fn from(curve: CryptoNamedCurve) -> &'static EcdsaVerificationAlgorithm {
match curve {
CryptoNamedCurve::P256 => &aws_lc_rs::signature::ECDSA_P256_SHA256_FIXED,
CryptoNamedCurve::P384 => &aws_lc_rs::signature::ECDSA_P384_SHA384_FIXED,
}
}
}
impl From<CryptoHash> for HmacAlgorithm {
fn from(hash: CryptoHash) -> HmacAlgorithm {
match hash {
CryptoHash::Sha1 => aws_lc_rs::hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY,
CryptoHash::Sha256 => aws_lc_rs::hmac::HMAC_SHA256,
CryptoHash::Sha384 => aws_lc_rs::hmac::HMAC_SHA384,
CryptoHash::Sha512 => aws_lc_rs::hmac::HMAC_SHA512,
}
}
}
impl From<CryptoHash> for &'static digest::Algorithm {
fn from(hash: CryptoHash) -> &'static digest::Algorithm {
match hash {
CryptoHash::Sha1 => &digest::SHA1_FOR_LEGACY_USE_ONLY,
CryptoHash::Sha256 => &digest::SHA256,
CryptoHash::Sha384 => &digest::SHA384,
CryptoHash::Sha512 => &digest::SHA512,
}
}
}
pub struct HkdfOutput<T>(pub T);
impl hkdf::KeyType for HkdfOutput<usize> {
fn len(&self) -> usize {
self.0
}
}
#[derive(Serialize, Deserialize, Clone, Copy)]
pub enum Algorithm {
#[serde(rename = "RSASSA-PKCS1-v1_5")]
RsassaPkcs1v15,
#[serde(rename = "RSA-PSS")]
RsaPss,
#[serde(rename = "RSA-OAEP")]
RsaOaep,
#[serde(rename = "ECDSA")]
Ecdsa,
#[serde(rename = "ECDH")]
Ecdh,
#[serde(rename = "AES-CTR")]
AesCtr,
#[serde(rename = "AES-CBC")]
AesCbc,
#[serde(rename = "AES-GCM")]
AesGcm,
#[serde(rename = "AES-KW")]
AesKw,
#[serde(rename = "HMAC")]
Hmac,
#[serde(rename = "PBKDF2")]
Pbkdf2,
#[serde(rename = "HKDF")]
Hkdf,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/lib.rs | ext/crypto/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::num::NonZeroU32;
use aes_kw::KekAes128;
use aes_kw::KekAes192;
use aes_kw::KekAes256;
use aws_lc_rs::digest;
use aws_lc_rs::hkdf;
use aws_lc_rs::hmac::Algorithm as HmacAlgorithm;
use aws_lc_rs::hmac::Key as HmacKey;
use aws_lc_rs::pbkdf2;
use base64::Engine;
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::ToJsBuffer;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use deno_error::JsErrorBox;
use p256::ecdsa::Signature as P256Signature;
use p256::ecdsa::SigningKey as P256SigningKey;
use p256::ecdsa::VerifyingKey as P256VerifyingKey;
use p256::elliptic_curve::sec1::FromEncodedPoint;
use p256::pkcs8::DecodePrivateKey;
use p384::ecdsa::Signature as P384Signature;
use p384::ecdsa::SigningKey as P384SigningKey;
use p384::ecdsa::VerifyingKey as P384VerifyingKey;
pub use rand;
use rand::Rng;
use rand::SeedableRng;
use rand::rngs::OsRng;
use rand::rngs::StdRng;
use rand::thread_rng;
use rsa::Pss;
use rsa::RsaPrivateKey;
use rsa::RsaPublicKey;
use rsa::pkcs1::DecodeRsaPrivateKey;
use rsa::pkcs1::DecodeRsaPublicKey;
use rsa::signature::SignatureEncoding;
use rsa::signature::Signer;
use rsa::signature::Verifier;
use rsa::traits::SignatureScheme;
use serde::Deserialize;
use sha1::Sha1;
use sha2::Digest;
use sha2::Sha256;
use sha2::Sha384;
use sha2::Sha512;
use signature::hazmat::PrehashSigner;
use signature::hazmat::PrehashVerifier; // Re-export rand
mod decrypt;
mod ed25519;
mod encrypt;
mod export_key;
mod generate_key;
mod import_key;
mod key;
mod shared;
mod x25519;
mod x448;
pub use crate::decrypt::DecryptError;
pub use crate::decrypt::op_crypto_decrypt;
pub use crate::ed25519::Ed25519Error;
pub use crate::encrypt::EncryptError;
pub use crate::encrypt::op_crypto_encrypt;
pub use crate::export_key::ExportKeyError;
pub use crate::export_key::op_crypto_export_key;
pub use crate::generate_key::GenerateKeyError;
pub use crate::generate_key::op_crypto_generate_key;
pub use crate::import_key::ImportKeyError;
pub use crate::import_key::op_crypto_import_key;
use crate::key::Algorithm;
use crate::key::CryptoHash;
use crate::key::CryptoNamedCurve;
use crate::key::HkdfOutput;
pub use crate::shared::SharedError;
use crate::shared::V8RawKeyData;
pub use crate::x448::X448Error;
pub use crate::x25519::X25519Error;
deno_core::extension!(deno_crypto,
deps = [ deno_webidl, deno_web ],
ops = [
op_crypto_get_random_values,
op_crypto_generate_key,
op_crypto_sign_key,
op_crypto_verify_key,
op_crypto_derive_bits,
op_crypto_import_key,
op_crypto_export_key,
op_crypto_encrypt,
op_crypto_decrypt,
op_crypto_subtle_digest,
op_crypto_random_uuid,
op_crypto_wrap_key,
op_crypto_unwrap_key,
op_crypto_base64url_decode,
op_crypto_base64url_encode,
x25519::op_crypto_generate_x25519_keypair,
x25519::op_crypto_x25519_public_key,
x25519::op_crypto_derive_bits_x25519,
x25519::op_crypto_import_spki_x25519,
x25519::op_crypto_import_pkcs8_x25519,
x25519::op_crypto_export_spki_x25519,
x25519::op_crypto_export_pkcs8_x25519,
x448::op_crypto_generate_x448_keypair,
x448::op_crypto_derive_bits_x448,
x448::op_crypto_import_spki_x448,
x448::op_crypto_import_pkcs8_x448,
x448::op_crypto_export_spki_x448,
x448::op_crypto_export_pkcs8_x448,
ed25519::op_crypto_generate_ed25519_keypair,
ed25519::op_crypto_import_spki_ed25519,
ed25519::op_crypto_import_pkcs8_ed25519,
ed25519::op_crypto_sign_ed25519,
ed25519::op_crypto_verify_ed25519,
ed25519::op_crypto_export_spki_ed25519,
ed25519::op_crypto_export_pkcs8_ed25519,
ed25519::op_crypto_jwk_x_ed25519,
],
esm = [ "00_crypto.js" ],
options = {
maybe_seed: Option<u64>,
},
state = |state, options| {
if let Some(seed) = options.maybe_seed {
state.put(StdRng::seed_from_u64(seed));
}
},
);
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CryptoError {
#[class(inherit)]
#[error(transparent)]
General(
#[from]
#[inherit]
SharedError,
),
#[class(inherit)]
#[error(transparent)]
JoinError(
#[from]
#[inherit]
tokio::task::JoinError,
),
#[class(generic)]
#[error(transparent)]
Der(#[from] rsa::pkcs1::der::Error),
#[class(type)]
#[error("Missing argument hash")]
MissingArgumentHash,
#[class(type)]
#[error("Missing argument saltLength")]
MissingArgumentSaltLength,
#[class(type)]
#[error("unsupported algorithm")]
UnsupportedAlgorithm,
#[class(generic)]
#[error(transparent)]
KeyRejected(#[from] aws_lc_rs::error::KeyRejected),
#[class(generic)]
#[error(transparent)]
RSA(#[from] rsa::Error),
#[class(generic)]
#[error(transparent)]
Pkcs1(#[from] rsa::pkcs1::Error),
#[class(generic)]
#[error(transparent)]
Unspecified(#[from] aws_lc_rs::error::Unspecified),
#[class(type)]
#[error("Invalid key format")]
InvalidKeyFormat,
#[class(generic)]
#[error(transparent)]
P256Ecdsa(#[from] p256::ecdsa::Error),
#[class(type)]
#[error("Unexpected error decoding private key")]
DecodePrivateKey,
#[class(type)]
#[error("Missing argument publicKey")]
MissingArgumentPublicKey,
#[class(type)]
#[error("Missing argument namedCurve")]
MissingArgumentNamedCurve,
#[class(type)]
#[error("Missing argument info")]
MissingArgumentInfo,
#[class("DOMExceptionOperationError")]
#[error("The length provided for HKDF is too large")]
HKDFLengthTooLarge,
#[class(generic)]
#[error(transparent)]
Base64Decode(#[from] base64::DecodeError),
#[class(type)]
#[error("Data must be multiple of 8 bytes")]
DataInvalidSize,
#[class(type)]
#[error("Invalid key length")]
InvalidKeyLength,
#[class("DOMExceptionOperationError")]
#[error("encryption error")]
EncryptionError,
#[class("DOMExceptionOperationError")]
#[error("decryption error - integrity check failed")]
DecryptionError,
#[class("DOMExceptionQuotaExceededError")]
#[error(
"The ArrayBufferView's byte length ({0}) exceeds the number of bytes of entropy available via this API (65536)"
)]
ArrayBufferViewLengthExceeded(usize),
#[class(inherit)]
#[error(transparent)]
Other(
#[from]
#[inherit]
JsErrorBox,
),
}
#[op2]
#[serde]
pub fn op_crypto_base64url_decode(
#[string] data: String,
) -> Result<ToJsBuffer, CryptoError> {
let data: Vec<u8> = BASE64_URL_SAFE_NO_PAD.decode(data)?;
Ok(data.into())
}
#[op2]
#[string]
pub fn op_crypto_base64url_encode(#[buffer] data: JsBuffer) -> String {
let data: String = BASE64_URL_SAFE_NO_PAD.encode(data);
data
}
#[op2(fast)]
pub fn op_crypto_get_random_values(
state: &mut OpState,
#[buffer] out: &mut [u8],
) -> Result<(), CryptoError> {
if out.len() > 65536 {
return Err(CryptoError::ArrayBufferViewLengthExceeded(out.len()));
}
let maybe_seeded_rng = state.try_borrow_mut::<StdRng>();
if let Some(seeded_rng) = maybe_seeded_rng {
seeded_rng.fill(out);
} else {
let mut rng = thread_rng();
rng.fill(out);
}
Ok(())
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum KeyFormat {
Raw,
Pkcs8,
Spki,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum KeyType {
Secret,
Private,
Public,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase")]
pub struct KeyData {
r#type: KeyType,
data: JsBuffer,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SignArg {
key: KeyData,
algorithm: Algorithm,
salt_length: Option<u32>,
hash: Option<CryptoHash>,
named_curve: Option<CryptoNamedCurve>,
}
#[op2(async)]
#[serde]
pub async fn op_crypto_sign_key(
#[serde] args: SignArg,
#[buffer] zero_copy: JsBuffer,
) -> Result<ToJsBuffer, CryptoError> {
deno_core::unsync::spawn_blocking(move || {
let data = &*zero_copy;
let algorithm = args.algorithm;
let signature = match algorithm {
Algorithm::RsassaPkcs1v15 => {
use rsa::pkcs1v15::SigningKey;
let private_key = RsaPrivateKey::from_pkcs1_der(&args.key.data)?;
match args.hash.ok_or_else(|| CryptoError::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let signing_key = SigningKey::<Sha1>::new(private_key);
signing_key.sign(data)
}
CryptoHash::Sha256 => {
let signing_key = SigningKey::<Sha256>::new(private_key);
signing_key.sign(data)
}
CryptoHash::Sha384 => {
let signing_key = SigningKey::<Sha384>::new(private_key);
signing_key.sign(data)
}
CryptoHash::Sha512 => {
let signing_key = SigningKey::<Sha512>::new(private_key);
signing_key.sign(data)
}
}
.to_vec()
}
Algorithm::RsaPss => {
let private_key = RsaPrivateKey::from_pkcs1_der(&args.key.data)?;
let salt_len = args
.salt_length
.ok_or_else(|| CryptoError::MissingArgumentSaltLength)?
as usize;
let mut rng = OsRng;
match args.hash.ok_or_else(|| CryptoError::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let signing_key = Pss::new_with_salt::<Sha1>(salt_len);
let hashed = Sha1::digest(data);
signing_key.sign(Some(&mut rng), &private_key, &hashed)?
}
CryptoHash::Sha256 => {
let signing_key = Pss::new_with_salt::<Sha256>(salt_len);
let hashed = Sha256::digest(data);
signing_key.sign(Some(&mut rng), &private_key, &hashed)?
}
CryptoHash::Sha384 => {
let signing_key = Pss::new_with_salt::<Sha384>(salt_len);
let hashed = Sha384::digest(data);
signing_key.sign(Some(&mut rng), &private_key, &hashed)?
}
CryptoHash::Sha512 => {
let signing_key = Pss::new_with_salt::<Sha512>(salt_len);
let hashed = Sha512::digest(data);
signing_key.sign(Some(&mut rng), &private_key, &hashed)?
}
}
.to_vec()
}
Algorithm::Ecdsa => {
let hash = args.hash.ok_or_else(|| CryptoError::MissingArgumentHash)?;
let named_curve =
args.named_curve.ok_or_else(JsErrorBox::not_supported)?;
match named_curve {
CryptoNamedCurve::P256 => {
// Decode PKCS#8 private key.
let secret_key = p256::SecretKey::from_pkcs8_der(&args.key.data)
.map_err(|_| CryptoError::InvalidKeyFormat)?;
let signing_key = P256SigningKey::from(secret_key);
let prehash = match hash {
CryptoHash::Sha1 => sha1::Sha1::digest(data).to_vec(),
CryptoHash::Sha256 => sha2::Sha256::digest(data).to_vec(),
CryptoHash::Sha384 => sha2::Sha384::digest(data).to_vec(),
CryptoHash::Sha512 => sha2::Sha512::digest(data).to_vec(),
};
// Sign the prehashed message, producing a raw r||s signature.
let signature: P256Signature =
signing_key.sign_prehash(&prehash)?;
signature.to_bytes().to_vec()
}
CryptoNamedCurve::P384 => {
let secret_key = p384::SecretKey::from_pkcs8_der(&args.key.data)
.map_err(|_| CryptoError::InvalidKeyFormat)?;
let signing_key = P384SigningKey::from(secret_key);
let prehash = match hash {
CryptoHash::Sha1 => sha1::Sha1::digest(data).to_vec(),
CryptoHash::Sha256 => sha2::Sha256::digest(data).to_vec(),
CryptoHash::Sha384 => sha2::Sha384::digest(data).to_vec(),
CryptoHash::Sha512 => sha2::Sha512::digest(data).to_vec(),
};
let signature: P384Signature =
signing_key.sign_prehash(&prehash)?;
signature.to_bytes().to_vec()
}
}
}
Algorithm::Hmac => {
let hash: HmacAlgorithm =
args.hash.ok_or_else(JsErrorBox::not_supported)?.into();
let key = HmacKey::new(hash, &args.key.data);
let signature = aws_lc_rs::hmac::sign(&key, data);
signature.as_ref().to_vec()
}
_ => return Err(CryptoError::UnsupportedAlgorithm),
};
Ok(signature.into())
})
.await?
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VerifyArg {
key: KeyData,
algorithm: Algorithm,
salt_length: Option<u32>,
hash: Option<CryptoHash>,
signature: JsBuffer,
named_curve: Option<CryptoNamedCurve>,
}
#[op2(async)]
pub async fn op_crypto_verify_key(
#[serde] args: VerifyArg,
#[buffer] zero_copy: JsBuffer,
) -> Result<bool, CryptoError> {
deno_core::unsync::spawn_blocking(move || {
let data = &*zero_copy;
let algorithm = args.algorithm;
let verification = match algorithm {
Algorithm::RsassaPkcs1v15 => {
use rsa::pkcs1v15::Signature;
use rsa::pkcs1v15::VerifyingKey;
let public_key = read_rsa_public_key(args.key)?;
let signature: Signature = args.signature.as_ref().try_into()?;
match args.hash.ok_or_else(|| CryptoError::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let verifying_key = VerifyingKey::<Sha1>::new(public_key);
verifying_key.verify(data, &signature).is_ok()
}
CryptoHash::Sha256 => {
let verifying_key = VerifyingKey::<Sha256>::new(public_key);
verifying_key.verify(data, &signature).is_ok()
}
CryptoHash::Sha384 => {
let verifying_key = VerifyingKey::<Sha384>::new(public_key);
verifying_key.verify(data, &signature).is_ok()
}
CryptoHash::Sha512 => {
let verifying_key = VerifyingKey::<Sha512>::new(public_key);
verifying_key.verify(data, &signature).is_ok()
}
}
}
Algorithm::RsaPss => {
let public_key = read_rsa_public_key(args.key)?;
let signature = args.signature.as_ref();
let salt_len = args
.salt_length
.ok_or_else(|| CryptoError::MissingArgumentSaltLength)?
as usize;
match args.hash.ok_or_else(|| CryptoError::MissingArgumentHash)? {
CryptoHash::Sha1 => {
let pss = Pss::new_with_salt::<Sha1>(salt_len);
let hashed = Sha1::digest(data);
pss.verify(&public_key, &hashed, signature).is_ok()
}
CryptoHash::Sha256 => {
let pss = Pss::new_with_salt::<Sha256>(salt_len);
let hashed = Sha256::digest(data);
pss.verify(&public_key, &hashed, signature).is_ok()
}
CryptoHash::Sha384 => {
let pss = Pss::new_with_salt::<Sha384>(salt_len);
let hashed = Sha384::digest(data);
pss.verify(&public_key, &hashed, signature).is_ok()
}
CryptoHash::Sha512 => {
let pss = Pss::new_with_salt::<Sha512>(salt_len);
let hashed = Sha512::digest(data);
pss.verify(&public_key, &hashed, signature).is_ok()
}
}
}
Algorithm::Hmac => {
let hash: HmacAlgorithm =
args.hash.ok_or_else(JsErrorBox::not_supported)?.into();
let key = HmacKey::new(hash, &args.key.data);
aws_lc_rs::hmac::verify(&key, data, &args.signature).is_ok()
}
Algorithm::Ecdsa => {
let hash = args.hash.ok_or_else(|| CryptoError::MissingArgumentHash)?;
let named_curve =
args.named_curve.ok_or_else(JsErrorBox::not_supported)?;
match named_curve {
CryptoNamedCurve::P256 => {
let verifying_key = match args.key.r#type {
KeyType::Public => {
P256VerifyingKey::from_sec1_bytes(&args.key.data)
.map_err(|_| CryptoError::InvalidKeyFormat)?
}
KeyType::Private => {
let secret_key =
p256::SecretKey::from_pkcs8_der(&args.key.data)
.map_err(|_| CryptoError::InvalidKeyFormat)?;
let signing_key = P256SigningKey::from(secret_key);
*signing_key.verifying_key()
}
_ => return Err(CryptoError::InvalidKeyFormat),
};
match P256Signature::from_slice(&args.signature) {
Ok(signature) => {
let prehash = match hash {
CryptoHash::Sha1 => sha1::Sha1::digest(data).to_vec(),
CryptoHash::Sha256 => sha2::Sha256::digest(data).to_vec(),
CryptoHash::Sha384 => sha2::Sha384::digest(data).to_vec(),
CryptoHash::Sha512 => sha2::Sha512::digest(data).to_vec(),
};
verifying_key.verify_prehash(&prehash, &signature).is_ok()
}
_ => false,
}
}
CryptoNamedCurve::P384 => {
let verifying_key = match args.key.r#type {
KeyType::Public => {
P384VerifyingKey::from_sec1_bytes(&args.key.data)
.map_err(|_| CryptoError::InvalidKeyFormat)?
}
KeyType::Private => {
let secret_key =
p384::SecretKey::from_pkcs8_der(&args.key.data)
.map_err(|_| CryptoError::InvalidKeyFormat)?;
let signing_key = P384SigningKey::from(secret_key);
*signing_key.verifying_key()
}
_ => return Err(CryptoError::InvalidKeyFormat),
};
match P384Signature::from_slice(&args.signature) {
Ok(signature) => {
let prehash = match hash {
CryptoHash::Sha1 => sha1::Sha1::digest(data).to_vec(),
CryptoHash::Sha256 => sha2::Sha256::digest(data).to_vec(),
CryptoHash::Sha384 => sha2::Sha384::digest(data).to_vec(),
CryptoHash::Sha512 => sha2::Sha512::digest(data).to_vec(),
};
verifying_key.verify_prehash(&prehash, &signature).is_ok()
}
_ => false,
}
}
}
}
_ => return Err(CryptoError::UnsupportedAlgorithm),
};
Ok(verification)
})
.await?
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DeriveKeyArg {
key: KeyData,
algorithm: Algorithm,
hash: Option<CryptoHash>,
length: usize,
iterations: Option<u32>,
// ECDH
public_key: Option<KeyData>,
named_curve: Option<CryptoNamedCurve>,
// HKDF
info: Option<JsBuffer>,
}
#[op2(async)]
#[serde]
pub async fn op_crypto_derive_bits(
#[serde] args: DeriveKeyArg,
#[buffer] zero_copy: Option<JsBuffer>,
) -> Result<ToJsBuffer, CryptoError> {
deno_core::unsync::spawn_blocking(move || {
let algorithm = args.algorithm;
match algorithm {
Algorithm::Pbkdf2 => {
let zero_copy = zero_copy.ok_or_else(JsErrorBox::not_supported)?;
let salt = &*zero_copy;
// The caller must validate these cases.
assert!(args.length > 0);
assert!(args.length.is_multiple_of(8));
let algorithm = match args.hash.ok_or_else(JsErrorBox::not_supported)? {
CryptoHash::Sha1 => pbkdf2::PBKDF2_HMAC_SHA1,
CryptoHash::Sha256 => pbkdf2::PBKDF2_HMAC_SHA256,
CryptoHash::Sha384 => pbkdf2::PBKDF2_HMAC_SHA384,
CryptoHash::Sha512 => pbkdf2::PBKDF2_HMAC_SHA512,
};
// This will never panic. We have already checked length earlier.
let iterations = NonZeroU32::new(
args.iterations.ok_or_else(JsErrorBox::not_supported)?,
)
.unwrap();
let secret = args.key.data;
let mut out = vec![0; args.length / 8];
pbkdf2::derive(algorithm, iterations, salt, &secret, &mut out);
Ok(out.into())
}
Algorithm::Ecdh => {
let named_curve = args
.named_curve
.ok_or_else(|| CryptoError::MissingArgumentNamedCurve)?;
let public_key = args
.public_key
.ok_or_else(|| CryptoError::MissingArgumentPublicKey)?;
match named_curve {
CryptoNamedCurve::P256 => {
let secret_key = p256::SecretKey::from_pkcs8_der(&args.key.data)
.map_err(|_| CryptoError::DecodePrivateKey)?;
let public_key = match public_key.r#type {
KeyType::Private => {
p256::SecretKey::from_pkcs8_der(&public_key.data)
.map_err(|_| CryptoError::DecodePrivateKey)?
.public_key()
}
KeyType::Public => {
let point = p256::EncodedPoint::from_bytes(public_key.data)
.map_err(|_| CryptoError::DecodePrivateKey)?;
let pk = p256::PublicKey::from_encoded_point(&point);
// pk is a constant time Option.
if pk.is_some().into() {
pk.unwrap()
} else {
return Err(CryptoError::DecodePrivateKey);
}
}
_ => unreachable!(),
};
let shared_secret = p256::elliptic_curve::ecdh::diffie_hellman(
secret_key.to_nonzero_scalar(),
public_key.as_affine(),
);
// raw serialized x-coordinate of the computed point
Ok(shared_secret.raw_secret_bytes().to_vec().into())
}
CryptoNamedCurve::P384 => {
let secret_key = p384::SecretKey::from_pkcs8_der(&args.key.data)
.map_err(|_| CryptoError::DecodePrivateKey)?;
let public_key = match public_key.r#type {
KeyType::Private => {
p384::SecretKey::from_pkcs8_der(&public_key.data)
.map_err(|_| CryptoError::DecodePrivateKey)?
.public_key()
}
KeyType::Public => {
let point = p384::EncodedPoint::from_bytes(public_key.data)
.map_err(|_| CryptoError::DecodePrivateKey)?;
let pk = p384::PublicKey::from_encoded_point(&point);
// pk is a constant time Option.
if pk.is_some().into() {
pk.unwrap()
} else {
return Err(CryptoError::DecodePrivateKey);
}
}
_ => unreachable!(),
};
let shared_secret = p384::elliptic_curve::ecdh::diffie_hellman(
secret_key.to_nonzero_scalar(),
public_key.as_affine(),
);
// raw serialized x-coordinate of the computed point
Ok(shared_secret.raw_secret_bytes().to_vec().into())
}
}
}
Algorithm::Hkdf => {
let zero_copy = zero_copy.ok_or_else(JsErrorBox::not_supported)?;
let salt = &*zero_copy;
let algorithm = match args.hash.ok_or_else(JsErrorBox::not_supported)? {
CryptoHash::Sha1 => hkdf::HKDF_SHA1_FOR_LEGACY_USE_ONLY,
CryptoHash::Sha256 => hkdf::HKDF_SHA256,
CryptoHash::Sha384 => hkdf::HKDF_SHA384,
CryptoHash::Sha512 => hkdf::HKDF_SHA512,
};
let info = args.info.ok_or(CryptoError::MissingArgumentInfo)?;
// IKM
let secret = args.key.data;
// L
let length = args.length / 8;
let salt = hkdf::Salt::new(algorithm, salt);
let prk = salt.extract(&secret);
let info = &[&*info];
let okm = prk
.expand(info, HkdfOutput(length))
.map_err(|_e| CryptoError::HKDFLengthTooLarge)?;
let mut r = vec![0u8; length];
okm.fill(&mut r)?;
Ok(r.into())
}
_ => Err(CryptoError::UnsupportedAlgorithm),
}
})
.await?
}
fn read_rsa_public_key(key_data: KeyData) -> Result<RsaPublicKey, CryptoError> {
let public_key = match key_data.r#type {
KeyType::Private => {
RsaPrivateKey::from_pkcs1_der(&key_data.data)?.to_public_key()
}
KeyType::Public => RsaPublicKey::from_pkcs1_der(&key_data.data)?,
KeyType::Secret => unreachable!("unexpected KeyType::Secret"),
};
Ok(public_key)
}
#[op2]
#[string]
pub fn op_crypto_random_uuid(
state: &mut OpState,
) -> Result<String, CryptoError> {
let maybe_seeded_rng = state.try_borrow_mut::<StdRng>();
let uuid = if let Some(seeded_rng) = maybe_seeded_rng {
let mut bytes = [0u8; 16];
seeded_rng.fill(&mut bytes);
fast_uuid_v4(&mut bytes)
} else {
let mut rng = thread_rng();
let mut bytes = [0u8; 16];
rng.fill(&mut bytes);
fast_uuid_v4(&mut bytes)
};
Ok(uuid)
}
#[op2(async)]
#[serde]
pub async fn op_crypto_subtle_digest(
#[serde] algorithm: CryptoHash,
#[buffer] data: JsBuffer,
) -> Result<ToJsBuffer, CryptoError> {
let output = spawn_blocking(move || {
digest::digest(algorithm.into(), &data)
.as_ref()
.to_vec()
.into()
})
.await?;
Ok(output)
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct WrapUnwrapKeyArg {
key: V8RawKeyData,
algorithm: Algorithm,
}
#[op2]
#[serde]
pub fn op_crypto_wrap_key(
#[serde] args: WrapUnwrapKeyArg,
#[buffer] data: JsBuffer,
) -> Result<ToJsBuffer, CryptoError> {
let algorithm = args.algorithm;
match algorithm {
Algorithm::AesKw => {
let key = args.key.as_secret_key()?;
if !data.len().is_multiple_of(8) {
return Err(CryptoError::DataInvalidSize);
}
let wrapped_key = match key.len() {
16 => KekAes128::new(key.into()).wrap_vec(&data),
24 => KekAes192::new(key.into()).wrap_vec(&data),
32 => KekAes256::new(key.into()).wrap_vec(&data),
_ => return Err(CryptoError::InvalidKeyLength),
}
.map_err(|_| CryptoError::EncryptionError)?;
Ok(wrapped_key.into())
}
_ => Err(CryptoError::UnsupportedAlgorithm),
}
}
#[op2]
#[serde]
pub fn op_crypto_unwrap_key(
#[serde] args: WrapUnwrapKeyArg,
#[buffer] data: JsBuffer,
) -> Result<ToJsBuffer, CryptoError> {
let algorithm = args.algorithm;
match algorithm {
Algorithm::AesKw => {
let key = args.key.as_secret_key()?;
if !data.len().is_multiple_of(8) {
return Err(CryptoError::DataInvalidSize);
}
let unwrapped_key = match key.len() {
16 => KekAes128::new(key.into()).unwrap_vec(&data),
24 => KekAes192::new(key.into()).unwrap_vec(&data),
32 => KekAes256::new(key.into()).unwrap_vec(&data),
_ => return Err(CryptoError::InvalidKeyLength),
}
.map_err(|_| CryptoError::DecryptionError)?;
Ok(unwrapped_key.into())
}
_ => Err(CryptoError::UnsupportedAlgorithm),
}
}
const HEX_CHARS: &[u8; 16] = b"0123456789abcdef";
fn fast_uuid_v4(bytes: &mut [u8; 16]) -> String {
// Set UUID version to 4 and variant to 1.
bytes[6] = (bytes[6] & 0x0f) | 0x40;
bytes[8] = (bytes[8] & 0x3f) | 0x80;
let buf = [
HEX_CHARS[(bytes[0] >> 4) as usize],
HEX_CHARS[(bytes[0] & 0x0f) as usize],
HEX_CHARS[(bytes[1] >> 4) as usize],
HEX_CHARS[(bytes[1] & 0x0f) as usize],
HEX_CHARS[(bytes[2] >> 4) as usize],
HEX_CHARS[(bytes[2] & 0x0f) as usize],
HEX_CHARS[(bytes[3] >> 4) as usize],
HEX_CHARS[(bytes[3] & 0x0f) as usize],
b'-',
HEX_CHARS[(bytes[4] >> 4) as usize],
HEX_CHARS[(bytes[4] & 0x0f) as usize],
HEX_CHARS[(bytes[5] >> 4) as usize],
HEX_CHARS[(bytes[5] & 0x0f) as usize],
b'-',
HEX_CHARS[(bytes[6] >> 4) as usize],
HEX_CHARS[(bytes[6] & 0x0f) as usize],
HEX_CHARS[(bytes[7] >> 4) as usize],
HEX_CHARS[(bytes[7] & 0x0f) as usize],
b'-',
HEX_CHARS[(bytes[8] >> 4) as usize],
HEX_CHARS[(bytes[8] & 0x0f) as usize],
HEX_CHARS[(bytes[9] >> 4) as usize],
HEX_CHARS[(bytes[9] & 0x0f) as usize],
b'-',
HEX_CHARS[(bytes[10] >> 4) as usize],
HEX_CHARS[(bytes[10] & 0x0f) as usize],
HEX_CHARS[(bytes[11] >> 4) as usize],
HEX_CHARS[(bytes[11] & 0x0f) as usize],
HEX_CHARS[(bytes[12] >> 4) as usize],
HEX_CHARS[(bytes[12] & 0x0f) as usize],
HEX_CHARS[(bytes[13] >> 4) as usize],
HEX_CHARS[(bytes[13] & 0x0f) as usize],
HEX_CHARS[(bytes[14] >> 4) as usize],
HEX_CHARS[(bytes[14] & 0x0f) as usize],
HEX_CHARS[(bytes[15] >> 4) as usize],
HEX_CHARS[(bytes[15] & 0x0f) as usize],
];
// Safety: the buffer is all valid UTF-8.
unsafe { String::from_utf8_unchecked(buf.to_vec()) }
}
#[test]
fn test_fast_uuid_v4_correctness() {
let mut rng = thread_rng();
let mut bytes = [0u8; 16];
rng.fill(&mut bytes);
let uuid = fast_uuid_v4(&mut bytes.clone());
let uuid_lib = uuid::Builder::from_bytes(bytes)
.set_variant(uuid::Variant::RFC4122)
.set_version(uuid::Version::Random)
.as_uuid()
.to_string();
assert_eq!(uuid, uuid_lib);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/x448.rs | ext/crypto/x448.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::ToJsBuffer;
use deno_core::op2;
use ed448_goldilocks::Scalar;
use ed448_goldilocks::curve::MontgomeryPoint;
use elliptic_curve::pkcs8::PrivateKeyInfo;
use elliptic_curve::subtle::ConstantTimeEq;
use rand::RngCore;
use rand::rngs::OsRng;
use spki::der::Decode;
use spki::der::Encode;
use spki::der::asn1::BitString;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum X448Error {
#[class("DOMExceptionOperationError")]
#[error("Failed to export key")]
FailedExport,
#[class(generic)]
#[error(transparent)]
Der(#[from] spki::der::Error),
}
#[op2(fast)]
pub fn op_crypto_generate_x448_keypair(
#[buffer] pkey: &mut [u8],
#[buffer] pubkey: &mut [u8],
) {
let mut rng = OsRng;
rng.fill_bytes(pkey);
// x448(pkey, 5)
let point = &MontgomeryPoint::generator()
* &Scalar::from_bytes(pkey.try_into().unwrap());
pubkey.copy_from_slice(&point.0);
}
const MONTGOMERY_IDENTITY: MontgomeryPoint = MontgomeryPoint([0; 56]);
#[op2(fast)]
pub fn op_crypto_derive_bits_x448(
#[buffer] k: &[u8],
#[buffer] u: &[u8],
#[buffer] secret: &mut [u8],
) -> bool {
let k: [u8; 56] = k.try_into().expect("Expected byteLength 56");
let u: [u8; 56] = u.try_into().expect("Expected byteLength 56");
// x448(k, u)
let point = &MontgomeryPoint(u) * &Scalar::from_bytes(k);
if point.ct_eq(&MONTGOMERY_IDENTITY).unwrap_u8() == 1 {
return true;
}
secret.copy_from_slice(&point.0);
false
}
// id-X448 OBJECT IDENTIFIER ::= { 1 3 101 111 }
const X448_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.3.101.111");
#[op2]
#[serde]
pub fn op_crypto_export_spki_x448(
#[buffer] pubkey: &[u8],
) -> Result<ToJsBuffer, X448Error> {
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifierRef {
oid: X448_OID,
parameters: None,
},
subject_public_key: BitString::from_bytes(pubkey)?,
};
Ok(
key_info
.to_der()
.map_err(|_| X448Error::FailedExport)?
.into(),
)
}
#[op2]
#[serde]
pub fn op_crypto_export_pkcs8_x448(
#[buffer] pkey: &[u8],
) -> Result<ToJsBuffer, X448Error> {
use rsa::pkcs1::der::Encode;
let pk_info = rsa::pkcs8::PrivateKeyInfo {
public_key: None,
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
oid: X448_OID,
parameters: None,
},
private_key: pkey, // OCTET STRING
};
let mut buf = Vec::new();
pk_info.encode_to_vec(&mut buf)?;
Ok(buf.into())
}
#[op2(fast)]
pub fn op_crypto_import_spki_x448(
#[buffer] key_data: &[u8],
#[buffer] out: &mut [u8],
) -> bool {
// 2-3.
let pk_info = match spki::SubjectPublicKeyInfoRef::try_from(key_data) {
Ok(pk_info) => pk_info,
Err(_) => return false,
};
// 4.
let alg = pk_info.algorithm.oid;
if alg != X448_OID {
return false;
}
// 5.
if pk_info.algorithm.parameters.is_some() {
return false;
}
out.copy_from_slice(pk_info.subject_public_key.raw_bytes());
true
}
#[op2(fast)]
pub fn op_crypto_import_pkcs8_x448(
#[buffer] key_data: &[u8],
#[buffer] out: &mut [u8],
) -> bool {
// 2-3.
let pk_info = match PrivateKeyInfo::from_der(key_data) {
Ok(pk_info) => pk_info,
Err(_) => return false,
};
// 4.
let alg = pk_info.algorithm.oid;
if alg != X448_OID {
return false;
}
// 5.
if pk_info.algorithm.parameters.is_some() {
return false;
}
// 6.
// CurvePrivateKey ::= OCTET STRING
if pk_info.private_key.len() != 56 {
return false;
}
out.copy_from_slice(&pk_info.private_key[2..]);
true
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/decrypt.rs | ext/crypto/decrypt.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use aes::cipher::BlockDecryptMut;
use aes::cipher::KeyIvInit;
use aes::cipher::block_padding::Pkcs7;
use aes_gcm::AeadInPlace;
use aes_gcm::KeyInit;
use aes_gcm::Nonce;
use aes_gcm::aead::generic_array::ArrayLength;
use aes_gcm::aead::generic_array::typenum::U12;
use aes_gcm::aead::generic_array::typenum::U16;
use aes_gcm::aes::Aes128;
use aes_gcm::aes::Aes192;
use aes_gcm::aes::Aes256;
use ctr::Ctr32BE;
use ctr::Ctr64BE;
use ctr::Ctr128BE;
use ctr::cipher::StreamCipher;
use deno_core::JsBuffer;
use deno_core::ToJsBuffer;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use rsa::pkcs1::DecodeRsaPrivateKey;
use serde::Deserialize;
use sha1::Sha1;
use sha2::Sha256;
use sha2::Sha384;
use sha2::Sha512;
use crate::shared::*;
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct DecryptOptions {
key: V8RawKeyData,
#[serde(flatten)]
algorithm: DecryptAlgorithm,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase", tag = "algorithm")]
pub enum DecryptAlgorithm {
#[serde(rename = "RSA-OAEP")]
RsaOaep {
hash: ShaHash,
#[serde(with = "serde_bytes")]
label: Vec<u8>,
},
#[serde(rename = "AES-CBC", rename_all = "camelCase")]
AesCbc {
#[serde(with = "serde_bytes")]
iv: Vec<u8>,
length: usize,
},
#[serde(rename = "AES-CTR", rename_all = "camelCase")]
AesCtr {
#[serde(with = "serde_bytes")]
counter: Vec<u8>,
ctr_length: usize,
key_length: usize,
},
#[serde(rename = "AES-GCM", rename_all = "camelCase")]
AesGcm {
#[serde(with = "serde_bytes")]
iv: Vec<u8>,
#[serde(with = "serde_bytes")]
additional_data: Option<Vec<u8>>,
length: usize,
tag_length: usize,
},
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum DecryptError {
#[class(inherit)]
#[error(transparent)]
General(
#[from]
#[inherit]
SharedError,
),
#[class(generic)]
#[error(transparent)]
Pkcs1(#[from] rsa::pkcs1::Error),
#[class("DOMExceptionOperationError")]
#[error("Decryption failed")]
Failed,
#[class(type)]
#[error("invalid length")]
InvalidLength,
#[class(type)]
#[error("invalid counter length. Currently supported 32/64/128 bits")]
InvalidCounterLength,
#[class(type)]
#[error("tag length not equal to 128")]
InvalidTagLength,
#[class("DOMExceptionOperationError")]
#[error("invalid key or iv")]
InvalidKeyOrIv,
#[class("DOMExceptionOperationError")]
#[error("tried to decrypt too much data")]
TooMuchData,
#[class(type)]
#[error("iv length not equal to 12 or 16")]
InvalidIvLength,
#[class("DOMExceptionOperationError")]
#[error("{0}")]
Rsa(rsa::Error),
}
#[op2(async)]
#[serde]
pub async fn op_crypto_decrypt(
#[serde] opts: DecryptOptions,
#[buffer] data: JsBuffer,
) -> Result<ToJsBuffer, DecryptError> {
let key = opts.key;
let fun = move || match opts.algorithm {
DecryptAlgorithm::RsaOaep { hash, label } => {
decrypt_rsa_oaep(key, hash, label, &data)
}
DecryptAlgorithm::AesCbc { iv, length } => {
decrypt_aes_cbc(key, length, iv, &data)
}
DecryptAlgorithm::AesCtr {
counter,
ctr_length,
key_length,
} => decrypt_aes_ctr(key, key_length, &counter, ctr_length, &data),
DecryptAlgorithm::AesGcm {
iv,
additional_data,
length,
tag_length,
} => decrypt_aes_gcm(key, length, tag_length, iv, additional_data, &data),
};
let buf = spawn_blocking(fun).await.unwrap()?;
Ok(buf.into())
}
fn decrypt_rsa_oaep(
key: V8RawKeyData,
hash: ShaHash,
label: Vec<u8>,
data: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let key = key.as_rsa_private_key()?;
let private_key = rsa::RsaPrivateKey::from_pkcs1_der(key)?;
let label = Some(String::from_utf8_lossy(&label).to_string());
let padding = match hash {
ShaHash::Sha1 => rsa::Oaep {
digest: Box::<Sha1>::default(),
mgf_digest: Box::<Sha1>::default(),
label,
},
ShaHash::Sha256 => rsa::Oaep {
digest: Box::<Sha256>::default(),
mgf_digest: Box::<Sha256>::default(),
label,
},
ShaHash::Sha384 => rsa::Oaep {
digest: Box::<Sha384>::default(),
mgf_digest: Box::<Sha384>::default(),
label,
},
ShaHash::Sha512 => rsa::Oaep {
digest: Box::<Sha512>::default(),
mgf_digest: Box::<Sha512>::default(),
label,
},
};
private_key
.decrypt(padding, data)
.map_err(DecryptError::Rsa)
}
fn decrypt_aes_cbc(
key: V8RawKeyData,
length: usize,
iv: Vec<u8>,
data: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let key = key.as_secret_key()?;
// 2.
let plaintext = match length {
128 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes128CbcDec = cbc::Decryptor<aes::Aes128>;
let cipher = Aes128CbcDec::new_from_slices(key, &iv)
.map_err(|_| DecryptError::InvalidKeyOrIv)?;
cipher
.decrypt_padded_vec_mut::<Pkcs7>(data)
.map_err(|_| DecryptError::Failed)?
}
192 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes192CbcDec = cbc::Decryptor<aes::Aes192>;
let cipher = Aes192CbcDec::new_from_slices(key, &iv)
.map_err(|_| DecryptError::InvalidKeyOrIv)?;
cipher
.decrypt_padded_vec_mut::<Pkcs7>(data)
.map_err(|_| DecryptError::Failed)?
}
256 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes256CbcDec = cbc::Decryptor<aes::Aes256>;
let cipher = Aes256CbcDec::new_from_slices(key, &iv)
.map_err(|_| DecryptError::InvalidKeyOrIv)?;
cipher
.decrypt_padded_vec_mut::<Pkcs7>(data)
.map_err(|_| DecryptError::Failed)?
}
_ => unreachable!(),
};
// 6.
Ok(plaintext)
}
fn decrypt_aes_ctr_gen<B>(
key: &[u8],
counter: &[u8],
data: &[u8],
) -> Result<Vec<u8>, DecryptError>
where
B: KeyIvInit + StreamCipher,
{
let mut cipher = B::new(key.into(), counter.into());
let mut plaintext = data.to_vec();
cipher
.try_apply_keystream(&mut plaintext)
.map_err(|_| DecryptError::TooMuchData)?;
Ok(plaintext)
}
fn decrypt_aes_gcm_gen<N: ArrayLength<u8>>(
key: &[u8],
tag: &aes_gcm::Tag,
nonce: &[u8],
length: usize,
additional_data: Vec<u8>,
plaintext: &mut [u8],
) -> Result<(), DecryptError> {
let nonce = Nonce::from_slice(nonce);
match length {
128 => {
let cipher = aes_gcm::AesGcm::<Aes128, N>::new_from_slice(key)
.map_err(|_| DecryptError::Failed)?;
cipher
.decrypt_in_place_detached(
nonce,
additional_data.as_slice(),
plaintext,
tag,
)
.map_err(|_| DecryptError::Failed)?
}
192 => {
let cipher = aes_gcm::AesGcm::<Aes192, N>::new_from_slice(key)
.map_err(|_| DecryptError::Failed)?;
cipher
.decrypt_in_place_detached(
nonce,
additional_data.as_slice(),
plaintext,
tag,
)
.map_err(|_| DecryptError::Failed)?
}
256 => {
let cipher = aes_gcm::AesGcm::<Aes256, N>::new_from_slice(key)
.map_err(|_| DecryptError::Failed)?;
cipher
.decrypt_in_place_detached(
nonce,
additional_data.as_slice(),
plaintext,
tag,
)
.map_err(|_| DecryptError::Failed)?
}
_ => return Err(DecryptError::InvalidLength),
};
Ok(())
}
fn decrypt_aes_ctr(
key: V8RawKeyData,
key_length: usize,
counter: &[u8],
ctr_length: usize,
data: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let key = key.as_secret_key()?;
match ctr_length {
32 => match key_length {
128 => decrypt_aes_ctr_gen::<Ctr32BE<aes::Aes128>>(key, counter, data),
192 => decrypt_aes_ctr_gen::<Ctr32BE<aes::Aes192>>(key, counter, data),
256 => decrypt_aes_ctr_gen::<Ctr32BE<aes::Aes256>>(key, counter, data),
_ => Err(DecryptError::InvalidLength),
},
64 => match key_length {
128 => decrypt_aes_ctr_gen::<Ctr64BE<aes::Aes128>>(key, counter, data),
192 => decrypt_aes_ctr_gen::<Ctr64BE<aes::Aes192>>(key, counter, data),
256 => decrypt_aes_ctr_gen::<Ctr64BE<aes::Aes256>>(key, counter, data),
_ => Err(DecryptError::InvalidLength),
},
128 => match key_length {
128 => decrypt_aes_ctr_gen::<Ctr128BE<aes::Aes128>>(key, counter, data),
192 => decrypt_aes_ctr_gen::<Ctr128BE<aes::Aes192>>(key, counter, data),
256 => decrypt_aes_ctr_gen::<Ctr128BE<aes::Aes256>>(key, counter, data),
_ => Err(DecryptError::InvalidLength),
},
_ => Err(DecryptError::InvalidCounterLength),
}
}
fn decrypt_aes_gcm(
key: V8RawKeyData,
length: usize,
tag_length: usize,
iv: Vec<u8>,
additional_data: Option<Vec<u8>>,
data: &[u8],
) -> Result<Vec<u8>, DecryptError> {
let key = key.as_secret_key()?;
let additional_data = additional_data.unwrap_or_default();
// The `aes_gcm` crate only supports 128 bits tag length.
//
// Note that encryption won't fail, it instead truncates the tag
// to the specified tag length as specified in the spec.
if tag_length != 128 {
return Err(DecryptError::InvalidTagLength);
}
let sep = data.len() - (tag_length / 8);
let tag = &data[sep..];
// The actual ciphertext, called plaintext because it is reused in place.
let mut plaintext = data[..sep].to_vec();
// Fixed 96-bit or 128-bit nonce
match iv.len() {
12 => decrypt_aes_gcm_gen::<U12>(
key,
tag.into(),
&iv,
length,
additional_data,
&mut plaintext,
)?,
16 => decrypt_aes_gcm_gen::<U16>(
key,
tag.into(),
&iv,
length,
additional_data,
&mut plaintext,
)?,
_ => return Err(DecryptError::InvalidIvLength),
}
Ok(plaintext)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/ed25519.rs | ext/crypto/ed25519.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use aws_lc_rs::signature::Ed25519KeyPair;
use aws_lc_rs::signature::KeyPair;
use base64::Engine;
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use deno_core::ToJsBuffer;
use deno_core::op2;
use elliptic_curve::pkcs8::PrivateKeyInfo;
use rand::RngCore;
use rand::rngs::OsRng;
use spki::der::Decode;
use spki::der::Encode;
use spki::der::asn1::BitString;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum Ed25519Error {
#[class("DOMExceptionOperationError")]
#[error("Failed to export key")]
FailedExport,
#[class(generic)]
#[error(transparent)]
Der(#[from] rsa::pkcs1::der::Error),
#[class(generic)]
#[error(transparent)]
KeyRejected(#[from] aws_lc_rs::error::KeyRejected),
}
#[op2(fast)]
pub fn op_crypto_generate_ed25519_keypair(
#[buffer] pkey: &mut [u8],
#[buffer] pubkey: &mut [u8],
) -> bool {
let mut rng = OsRng;
rng.fill_bytes(pkey);
let pair = match Ed25519KeyPair::from_seed_unchecked(pkey) {
Ok(p) => p,
Err(_) => return false,
};
pubkey.copy_from_slice(pair.public_key().as_ref());
true
}
#[op2(fast)]
pub fn op_crypto_sign_ed25519(
#[buffer] key: &[u8],
#[buffer] data: &[u8],
#[buffer] signature: &mut [u8],
) -> bool {
let pair = match Ed25519KeyPair::from_seed_unchecked(key) {
Ok(p) => p,
Err(_) => return false,
};
signature.copy_from_slice(pair.sign(data).as_ref());
true
}
#[op2(fast)]
pub fn op_crypto_verify_ed25519(
#[buffer] pubkey: &[u8],
#[buffer] data: &[u8],
#[buffer] signature: &[u8],
) -> bool {
aws_lc_rs::signature::UnparsedPublicKey::new(
&aws_lc_rs::signature::ED25519,
pubkey,
)
.verify(data, signature)
.is_ok()
}
// id-Ed25519 OBJECT IDENTIFIER ::= { 1 3 101 112 }
pub const ED25519_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.3.101.112");
#[op2(fast)]
pub fn op_crypto_import_spki_ed25519(
#[buffer] key_data: &[u8],
#[buffer] out: &mut [u8],
) -> bool {
// 2-3.
let pk_info = match spki::SubjectPublicKeyInfoRef::try_from(key_data) {
Ok(pk_info) => pk_info,
Err(_) => return false,
};
// 4.
let alg = pk_info.algorithm.oid;
if alg != ED25519_OID {
return false;
}
// 5.
if pk_info.algorithm.parameters.is_some() {
return false;
}
out.copy_from_slice(pk_info.subject_public_key.raw_bytes());
true
}
#[op2(fast)]
pub fn op_crypto_import_pkcs8_ed25519(
#[buffer] key_data: &[u8],
#[buffer] out: &mut [u8],
) -> bool {
// 2-3.
// This should probably use OneAsymmetricKey instead
let pk_info = match PrivateKeyInfo::from_der(key_data) {
Ok(pk_info) => pk_info,
Err(_) => return false,
};
// 4.
let alg = pk_info.algorithm.oid;
if alg != ED25519_OID {
return false;
}
// 5.
if pk_info.algorithm.parameters.is_some() {
return false;
}
// 6.
// CurvePrivateKey ::= OCTET STRING
if pk_info.private_key.len() != 34 {
return false;
}
out.copy_from_slice(&pk_info.private_key[2..]);
true
}
#[op2]
#[serde]
pub fn op_crypto_export_spki_ed25519(
#[buffer] pubkey: &[u8],
) -> Result<ToJsBuffer, Ed25519Error> {
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifierOwned {
// id-Ed25519
oid: ED25519_OID,
parameters: None,
},
subject_public_key: BitString::from_bytes(pubkey)?,
};
Ok(
key_info
.to_der()
.map_err(|_| Ed25519Error::FailedExport)?
.into(),
)
}
#[op2]
#[serde]
pub fn op_crypto_export_pkcs8_ed25519(
#[buffer] pkey: &[u8],
) -> Result<ToJsBuffer, Ed25519Error> {
use rsa::pkcs1::der::Encode;
// This should probably use OneAsymmetricKey instead
let pk_info = rsa::pkcs8::PrivateKeyInfo {
public_key: None,
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
// id-Ed25519
oid: ED25519_OID,
parameters: None,
},
private_key: pkey, // OCTET STRING
};
let mut buf = Vec::new();
pk_info.encode_to_vec(&mut buf)?;
Ok(buf.into())
}
// 'x' from Section 2 of RFC 8037
// https://www.rfc-editor.org/rfc/rfc8037#section-2
#[op2]
#[string]
pub fn op_crypto_jwk_x_ed25519(
#[buffer] pkey: &[u8],
) -> Result<String, Ed25519Error> {
let pair = Ed25519KeyPair::from_seed_unchecked(pkey)?;
Ok(BASE64_URL_SAFE_NO_PAD.encode(pair.public_key().as_ref()))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/shared.rs | ext/crypto/shared.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use deno_core::JsBuffer;
use deno_core::ToJsBuffer;
use elliptic_curve::sec1::ToEncodedPoint;
use p256::pkcs8::DecodePrivateKey;
use rsa::RsaPrivateKey;
use rsa::pkcs1::DecodeRsaPrivateKey;
use rsa::pkcs1::EncodeRsaPublicKey;
use serde::Deserialize;
use serde::Serialize;
pub const RSA_ENCRYPTION_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.2.840.113549.1.1.1");
pub const ID_SECP256R1_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.2.840.10045.3.1.7");
pub const ID_SECP384R1_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.3.132.0.34");
pub const ID_SECP521R1_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.3.132.0.35");
#[derive(Serialize, Deserialize, Copy, Clone, Eq, PartialEq)]
pub enum ShaHash {
#[serde(rename = "SHA-1")]
Sha1,
#[serde(rename = "SHA-256")]
Sha256,
#[serde(rename = "SHA-384")]
Sha384,
#[serde(rename = "SHA-512")]
Sha512,
}
#[derive(Serialize, Deserialize, Copy, Clone, Eq, PartialEq)]
pub enum EcNamedCurve {
#[serde(rename = "P-256")]
P256,
#[serde(rename = "P-384")]
P384,
#[serde(rename = "P-521")]
P521,
}
#[derive(Deserialize)]
#[serde(rename_all = "lowercase", tag = "type", content = "data")]
pub enum V8RawKeyData {
Secret(JsBuffer),
Private(JsBuffer),
Public(JsBuffer),
}
#[derive(Serialize)]
#[serde(rename_all = "lowercase", tag = "type", content = "data")]
pub enum RustRawKeyData {
Secret(ToJsBuffer),
Private(ToJsBuffer),
Public(ToJsBuffer),
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum SharedError {
#[class(type)]
#[error("expected valid private key")]
ExpectedValidPrivateKey,
#[class(type)]
#[error("expected valid public key")]
ExpectedValidPublicKey,
#[class(type)]
#[error("expected valid private EC key")]
ExpectedValidPrivateECKey,
#[class(type)]
#[error("expected valid public EC key")]
ExpectedValidPublicECKey,
#[class(type)]
#[error("expected private key")]
ExpectedPrivateKey,
#[class(type)]
#[error("expected public key")]
ExpectedPublicKey,
#[class(type)]
#[error("expected secret key")]
ExpectedSecretKey,
#[class("DOMExceptionOperationError")]
#[error("failed to decode private key")]
FailedDecodePrivateKey,
#[class("DOMExceptionOperationError")]
#[error("failed to decode public key")]
FailedDecodePublicKey,
#[class("DOMExceptionNotSupportedError")]
#[error("unsupported format")]
UnsupportedFormat,
}
impl V8RawKeyData {
pub fn as_rsa_public_key(&self) -> Result<Cow<'_, [u8]>, SharedError> {
match self {
V8RawKeyData::Public(data) => Ok(Cow::Borrowed(data)),
V8RawKeyData::Private(data) => {
let private_key = RsaPrivateKey::from_pkcs1_der(data)
.map_err(|_| SharedError::ExpectedValidPrivateKey)?;
let public_key_doc = private_key
.to_public_key()
.to_pkcs1_der()
.map_err(|_| SharedError::ExpectedValidPublicKey)?;
Ok(Cow::Owned(public_key_doc.as_bytes().into()))
}
_ => Err(SharedError::ExpectedPublicKey),
}
}
pub fn as_rsa_private_key(&self) -> Result<&[u8], SharedError> {
match self {
V8RawKeyData::Private(data) => Ok(data),
_ => Err(SharedError::ExpectedPrivateKey),
}
}
pub fn as_secret_key(&self) -> Result<&[u8], SharedError> {
match self {
V8RawKeyData::Secret(data) => Ok(data),
_ => Err(SharedError::ExpectedSecretKey),
}
}
pub fn as_ec_public_key_p256(
&self,
) -> Result<p256::EncodedPoint, SharedError> {
match self {
V8RawKeyData::Public(data) => p256::PublicKey::from_sec1_bytes(data)
.map(|p| p.to_encoded_point(false))
.map_err(|_| SharedError::ExpectedValidPublicECKey),
V8RawKeyData::Private(data) => {
let signing_key = p256::SecretKey::from_pkcs8_der(data)
.map_err(|_| SharedError::ExpectedValidPrivateECKey)?;
Ok(signing_key.public_key().to_encoded_point(false))
}
// Should never reach here.
V8RawKeyData::Secret(_) => unreachable!(),
}
}
pub fn as_ec_public_key_p384(
&self,
) -> Result<p384::EncodedPoint, SharedError> {
match self {
V8RawKeyData::Public(data) => p384::PublicKey::from_sec1_bytes(data)
.map(|p| p.to_encoded_point(false))
.map_err(|_| SharedError::ExpectedValidPublicECKey),
V8RawKeyData::Private(data) => {
let signing_key = p384::SecretKey::from_pkcs8_der(data)
.map_err(|_| SharedError::ExpectedValidPrivateECKey)?;
Ok(signing_key.public_key().to_encoded_point(false))
}
// Should never reach here.
V8RawKeyData::Secret(_) => unreachable!(),
}
}
pub fn as_ec_public_key_p521(
&self,
) -> Result<p521::EncodedPoint, SharedError> {
match self {
V8RawKeyData::Public(data) => {
// public_key is a serialized EncodedPoint
p521::EncodedPoint::from_bytes(data)
.map_err(|_| SharedError::ExpectedValidPublicECKey)
}
V8RawKeyData::Private(data) => {
let signing_key = p521::SecretKey::from_pkcs8_der(data)
.map_err(|_| SharedError::ExpectedValidPrivateECKey)?;
Ok(signing_key.public_key().to_encoded_point(false))
}
// Should never reach here.
V8RawKeyData::Secret(_) => unreachable!(),
}
}
pub fn as_ec_private_key(&self) -> Result<&[u8], SharedError> {
match self {
V8RawKeyData::Private(data) => Ok(data),
_ => Err(SharedError::ExpectedPrivateKey),
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/import_key.rs | ext/crypto/import_key.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use base64::Engine;
use deno_core::JsBuffer;
use deno_core::ToJsBuffer;
use deno_core::op2;
use elliptic_curve::pkcs8::PrivateKeyInfo;
use p256::pkcs8::EncodePrivateKey;
use rsa::pkcs1::UintRef;
use rsa::pkcs8::der::Encode;
use serde::Deserialize;
use serde::Serialize;
use spki::der::Decode;
use crate::shared::*;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class("DOMExceptionDataError")]
pub enum ImportKeyError {
#[class(inherit)]
#[error(transparent)]
General(
#[from]
#[inherit]
SharedError,
),
#[error("invalid modulus")]
InvalidModulus,
#[error("invalid public exponent")]
InvalidPublicExponent,
#[error("invalid private exponent")]
InvalidPrivateExponent,
#[error("invalid first prime factor")]
InvalidFirstPrimeFactor,
#[error("invalid second prime factor")]
InvalidSecondPrimeFactor,
#[error("invalid first CRT exponent")]
InvalidFirstCRTExponent,
#[error("invalid second CRT exponent")]
InvalidSecondCRTExponent,
#[error("invalid CRT coefficient")]
InvalidCRTCoefficient,
#[error("invalid b64 coordinate")]
InvalidB64Coordinate,
#[error("invalid RSA public key")]
InvalidRSAPublicKey,
#[error("invalid RSA private key")]
InvalidRSAPrivateKey,
#[error("unsupported algorithm")]
UnsupportedAlgorithm,
#[error("public key is invalid (too long)")]
PublicKeyTooLong,
#[error("private key is invalid (too long)")]
PrivateKeyTooLong,
#[error("invalid P-256 elliptic curve point")]
InvalidP256ECPoint,
#[error("invalid P-384 elliptic curve point")]
InvalidP384ECPoint,
#[error("invalid P-521 elliptic curve point")]
InvalidP521ECPoint,
#[error("invalid P-256 elliptic curve SPKI data")]
InvalidP256ECSPKIData,
#[error("invalid P-384 elliptic curve SPKI data")]
InvalidP384ECSPKIData,
#[error("invalid P-521 elliptic curve SPKI data")]
InvalidP521ECSPKIData,
#[error("curve mismatch")]
CurveMismatch,
#[error("Unsupported named curve")]
UnsupportedNamedCurve,
#[error("invalid key data")]
InvalidKeyData,
#[error("invalid JWK private key")]
InvalidJWKPrivateKey,
#[error(transparent)]
EllipticCurve(#[from] elliptic_curve::Error),
#[error("expected valid PKCS#8 data")]
ExpectedValidPkcs8Data,
#[error("malformed parameters")]
MalformedParameters,
#[error(transparent)]
Spki(#[from] spki::Error),
#[error(transparent)]
Der(#[from] rsa::pkcs1::der::Error),
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub enum KeyData {
Spki(JsBuffer),
Pkcs8(JsBuffer),
Raw(JsBuffer),
JwkSecret {
k: String,
},
JwkPublicRsa {
n: String,
e: String,
},
JwkPrivateRsa {
n: String,
e: String,
d: String,
p: String,
q: String,
dp: String,
dq: String,
qi: String,
},
JwkPublicEc {
x: String,
y: String,
},
JwkPrivateEc {
#[allow(dead_code)]
x: String,
#[allow(dead_code)]
y: String,
d: String,
},
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase", tag = "algorithm")]
pub enum ImportKeyOptions {
#[serde(rename = "RSASSA-PKCS1-v1_5")]
RsassaPkcs1v15 {},
#[serde(rename = "RSA-PSS")]
RsaPss {},
#[serde(rename = "RSA-OAEP")]
RsaOaep {},
#[serde(rename = "ECDSA", rename_all = "camelCase")]
Ecdsa { named_curve: EcNamedCurve },
#[serde(rename = "ECDH", rename_all = "camelCase")]
Ecdh { named_curve: EcNamedCurve },
#[serde(rename = "AES", rename_all = "camelCase")]
Aes {},
#[serde(rename = "HMAC", rename_all = "camelCase")]
Hmac {},
}
#[derive(Serialize)]
#[serde(untagged)]
pub enum ImportKeyResult {
#[serde(rename_all = "camelCase")]
Rsa {
raw_data: RustRawKeyData,
modulus_length: usize,
public_exponent: ToJsBuffer,
},
#[serde(rename_all = "camelCase")]
Ec { raw_data: RustRawKeyData },
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
Aes { raw_data: RustRawKeyData },
#[serde(rename_all = "camelCase")]
Hmac { raw_data: RustRawKeyData },
}
#[op2]
#[serde]
pub fn op_crypto_import_key(
#[serde] opts: ImportKeyOptions,
#[serde] key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
match opts {
ImportKeyOptions::RsassaPkcs1v15 {} => import_key_rsassa(key_data),
ImportKeyOptions::RsaPss {} => import_key_rsapss(key_data),
ImportKeyOptions::RsaOaep {} => import_key_rsaoaep(key_data),
ImportKeyOptions::Ecdsa { named_curve }
| ImportKeyOptions::Ecdh { named_curve } => {
import_key_ec(key_data, named_curve)
}
ImportKeyOptions::Aes {} => import_key_aes(key_data),
ImportKeyOptions::Hmac {} => import_key_hmac(key_data),
}
}
const BASE64_URL_SAFE_FORGIVING:
base64::engine::general_purpose::GeneralPurpose =
base64::engine::general_purpose::GeneralPurpose::new(
&base64::alphabet::URL_SAFE,
base64::engine::general_purpose::GeneralPurposeConfig::new()
.with_decode_allow_trailing_bits(true)
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
);
macro_rules! jwt_b64_int_or_err {
($name:ident, $b64:expr, $err:tt) => {
let bytes = BASE64_URL_SAFE_FORGIVING
.decode($b64)
.map_err(|_| ImportKeyError::$err)?;
let $name = UintRef::new(&bytes).map_err(|_| ImportKeyError::$err)?;
};
}
fn import_key_rsa_jwk(
key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::JwkPublicRsa { n, e } => {
jwt_b64_int_or_err!(modulus, &n, InvalidModulus);
jwt_b64_int_or_err!(public_exponent, &e, InvalidPublicExponent);
let public_key = rsa::pkcs1::RsaPublicKey {
modulus,
public_exponent,
};
let mut data = Vec::new();
public_key
.encode_to_vec(&mut data)
.map_err(|_| ImportKeyError::InvalidRSAPublicKey)?;
let public_exponent =
public_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = public_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Public(data.into()),
modulus_length,
public_exponent,
})
}
KeyData::JwkPrivateRsa {
n,
e,
d,
p,
q,
dp,
dq,
qi,
} => {
jwt_b64_int_or_err!(modulus, &n, InvalidModulus);
jwt_b64_int_or_err!(public_exponent, &e, InvalidPublicExponent);
jwt_b64_int_or_err!(private_exponent, &d, InvalidPrivateExponent);
jwt_b64_int_or_err!(prime1, &p, InvalidFirstPrimeFactor);
jwt_b64_int_or_err!(prime2, &q, InvalidSecondPrimeFactor);
jwt_b64_int_or_err!(exponent1, &dp, InvalidFirstCRTExponent);
jwt_b64_int_or_err!(exponent2, &dq, InvalidSecondCRTExponent);
jwt_b64_int_or_err!(coefficient, &qi, InvalidCRTCoefficient);
let private_key = rsa::pkcs1::RsaPrivateKey {
modulus,
public_exponent,
private_exponent,
prime1,
prime2,
exponent1,
exponent2,
coefficient,
other_prime_infos: None,
};
let mut data = Vec::new();
private_key
.encode_to_vec(&mut data)
.map_err(|_| ImportKeyError::InvalidRSAPrivateKey)?;
let public_exponent =
private_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = private_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Private(data.into()),
modulus_length,
public_exponent,
})
}
_ => unreachable!(),
}
}
fn import_key_rsassa(
key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Spki(data) => {
// 2-3.
let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let public_key = rsa::pkcs1::RsaPublicKey::from_der(
pk_info.subject_public_key.raw_bytes(),
)?;
let bytes_consumed = public_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(
pk_info.subject_public_key.raw_bytes().len() as u16,
)
{
return Err(ImportKeyError::PublicKeyTooLong);
}
let data = pk_info.subject_public_key.raw_bytes().to_vec().into();
let public_exponent =
public_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = public_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Public(data),
modulus_length,
public_exponent,
})
}
KeyData::Pkcs8(data) => {
// 2-3.
let pk_info = PrivateKeyInfo::from_der(&data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let private_key =
rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?;
let bytes_consumed = private_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16)
{
return Err(ImportKeyError::PrivateKeyTooLong);
}
let data = pk_info.private_key.to_vec().into();
let public_exponent =
private_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = private_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Private(data),
modulus_length,
public_exponent,
})
}
KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => {
import_key_rsa_jwk(key_data)
}
_ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn import_key_rsapss(
key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Spki(data) => {
// 2-3.
let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let public_key = rsa::pkcs1::RsaPublicKey::from_der(
pk_info.subject_public_key.raw_bytes(),
)?;
let bytes_consumed = public_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(
pk_info.subject_public_key.raw_bytes().len() as u16,
)
{
return Err(ImportKeyError::PublicKeyTooLong);
}
let data = pk_info.subject_public_key.raw_bytes().to_vec().into();
let public_exponent =
public_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = public_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Public(data),
modulus_length,
public_exponent,
})
}
KeyData::Pkcs8(data) => {
// 2-3.
let pk_info = PrivateKeyInfo::from_der(&data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let private_key =
rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?;
let bytes_consumed = private_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16)
{
return Err(ImportKeyError::PrivateKeyTooLong);
}
let data = pk_info.private_key.to_vec().into();
let public_exponent =
private_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = private_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Private(data),
modulus_length,
public_exponent,
})
}
KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => {
import_key_rsa_jwk(key_data)
}
_ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn import_key_rsaoaep(
key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Spki(data) => {
// 2-3.
let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let public_key = rsa::pkcs1::RsaPublicKey::from_der(
pk_info.subject_public_key.raw_bytes(),
)?;
let bytes_consumed = public_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(
pk_info.subject_public_key.raw_bytes().len() as u16,
)
{
return Err(ImportKeyError::PublicKeyTooLong);
}
let data = pk_info.subject_public_key.raw_bytes().to_vec().into();
let public_exponent =
public_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = public_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Public(data),
modulus_length,
public_exponent,
})
}
KeyData::Pkcs8(data) => {
// 2-3.
let pk_info = PrivateKeyInfo::from_der(&data)?;
// 4-5.
let alg = pk_info.algorithm.oid;
// 6-7. (skipped, only support rsaEncryption for interoperability)
if alg != RSA_ENCRYPTION_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 8-9.
let private_key =
rsa::pkcs1::RsaPrivateKey::from_der(pk_info.private_key)?;
let bytes_consumed = private_key.encoded_len()?;
if bytes_consumed
!= rsa::pkcs1::der::Length::new(pk_info.private_key.len() as u16)
{
return Err(ImportKeyError::PrivateKeyTooLong);
}
let data = pk_info.private_key.to_vec().into();
let public_exponent =
private_key.public_exponent.as_bytes().to_vec().into();
let modulus_length = private_key.modulus.as_bytes().len() * 8;
Ok(ImportKeyResult::Rsa {
raw_data: RustRawKeyData::Private(data),
modulus_length,
public_exponent,
})
}
KeyData::JwkPublicRsa { .. } | KeyData::JwkPrivateRsa { .. } => {
import_key_rsa_jwk(key_data)
}
_ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn decode_b64url_to_field_bytes<C: elliptic_curve::Curve>(
b64: &str,
) -> Result<elliptic_curve::FieldBytes<C>, ImportKeyError> {
jwt_b64_int_or_err!(val, b64, InvalidB64Coordinate);
let mut bytes = elliptic_curve::FieldBytes::<C>::default();
let original_bytes = val.as_bytes();
let mut new_bytes: Vec<u8> = vec![];
if original_bytes.len() < bytes.len() {
new_bytes = vec![0; bytes.len() - original_bytes.len()];
}
new_bytes.extend_from_slice(original_bytes);
let val = new_bytes.as_slice();
if val.len() != bytes.len() {
return Err(ImportKeyError::InvalidB64Coordinate);
}
bytes.copy_from_slice(val);
Ok(bytes)
}
fn import_key_ec_jwk_to_point(
x: String,
y: String,
named_curve: EcNamedCurve,
) -> Result<Vec<u8>, ImportKeyError> {
let point_bytes = match named_curve {
EcNamedCurve::P256 => {
let x = decode_b64url_to_field_bytes::<p256::NistP256>(&x)?;
let y = decode_b64url_to_field_bytes::<p256::NistP256>(&y)?;
p256::EncodedPoint::from_affine_coordinates(&x, &y, false).to_bytes()
}
EcNamedCurve::P384 => {
let x = decode_b64url_to_field_bytes::<p384::NistP384>(&x)?;
let y = decode_b64url_to_field_bytes::<p384::NistP384>(&y)?;
p384::EncodedPoint::from_affine_coordinates(&x, &y, false).to_bytes()
}
EcNamedCurve::P521 => {
let x = decode_b64url_to_field_bytes::<p521::NistP521>(&x)?;
let y = decode_b64url_to_field_bytes::<p521::NistP521>(&y)?;
p521::EncodedPoint::from_affine_coordinates(&x, &y, false).to_bytes()
}
};
Ok(point_bytes.to_vec())
}
fn import_key_ec_jwk(
key_data: KeyData,
named_curve: EcNamedCurve,
) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::JwkPublicEc { x, y } => {
let point_bytes = import_key_ec_jwk_to_point(x, y, named_curve)?;
Ok(ImportKeyResult::Ec {
raw_data: RustRawKeyData::Public(point_bytes.into()),
})
}
KeyData::JwkPrivateEc { d, .. } => {
let pkcs8_der = match named_curve {
EcNamedCurve::P256 => {
let d = decode_b64url_to_field_bytes::<p256::NistP256>(&d)?;
let pk = p256::SecretKey::from_bytes(&d)?;
pk.to_pkcs8_der()
.map_err(|_| ImportKeyError::InvalidJWKPrivateKey)?
}
EcNamedCurve::P384 => {
let d = decode_b64url_to_field_bytes::<p384::NistP384>(&d)?;
let pk = p384::SecretKey::from_bytes(&d)?;
pk.to_pkcs8_der()
.map_err(|_| ImportKeyError::InvalidJWKPrivateKey)?
}
EcNamedCurve::P521 => {
let d = decode_b64url_to_field_bytes::<p521::NistP521>(&d)?;
let pk = p521::SecretKey::from_bytes(&d)?;
pk.to_pkcs8_der()
.map_err(|_| ImportKeyError::InvalidJWKPrivateKey)?
}
};
Ok(ImportKeyResult::Ec {
raw_data: RustRawKeyData::Private(pkcs8_der.as_bytes().to_vec().into()),
})
}
_ => unreachable!(),
}
}
pub struct ECParametersSpki {
pub named_curve_alg: spki::der::asn1::ObjectIdentifier,
}
impl<'a> TryFrom<spki::der::asn1::AnyRef<'a>> for ECParametersSpki {
type Error = spki::der::Error;
fn try_from(
any: spki::der::asn1::AnyRef<'a>,
) -> spki::der::Result<ECParametersSpki> {
let x = any.try_into()?;
Ok(Self { named_curve_alg: x })
}
}
fn import_key_ec(
key_data: KeyData,
named_curve: EcNamedCurve,
) -> Result<ImportKeyResult, ImportKeyError> {
match key_data {
KeyData::Raw(data) => {
// The point is parsed and validated, ultimately the original data is
// returned though.
match named_curve {
EcNamedCurve::P256 => {
// 1-2.
let point = p256::EncodedPoint::from_bytes(&data)
.map_err(|_| ImportKeyError::InvalidP256ECPoint)?;
// 3.
if point.is_identity() {
return Err(ImportKeyError::InvalidP256ECPoint);
}
}
EcNamedCurve::P384 => {
// 1-2.
let point = p384::EncodedPoint::from_bytes(&data)
.map_err(|_| ImportKeyError::InvalidP384ECPoint)?;
// 3.
if point.is_identity() {
return Err(ImportKeyError::InvalidP384ECPoint);
}
}
EcNamedCurve::P521 => {
// 1-2.
let point = p521::EncodedPoint::from_bytes(&data)
.map_err(|_| ImportKeyError::InvalidP521ECPoint)?;
// 3.
if point.is_identity() {
return Err(ImportKeyError::InvalidP521ECPoint);
}
}
};
Ok(ImportKeyResult::Ec {
raw_data: RustRawKeyData::Public(data.to_vec().into()),
})
}
KeyData::Pkcs8(data) => {
let pk = PrivateKeyInfo::from_der(data.as_ref())
.map_err(|_| ImportKeyError::ExpectedValidPkcs8Data)?;
let named_curve_alg = pk
.algorithm
.parameters
.ok_or(ImportKeyError::MalformedParameters)?
.try_into()
.unwrap();
let pk_named_curve = match named_curve_alg {
// id-secp256r1
ID_SECP256R1_OID => Some(EcNamedCurve::P256),
// id-secp384r1
ID_SECP384R1_OID => Some(EcNamedCurve::P384),
// id-secp521r1
ID_SECP521R1_OID => Some(EcNamedCurve::P521),
_ => None,
};
if pk_named_curve != Some(named_curve) {
return Err(ImportKeyError::CurveMismatch);
}
Ok(ImportKeyResult::Ec {
raw_data: RustRawKeyData::Private(data.to_vec().into()),
})
}
KeyData::Spki(data) => {
// 2-3.
let pk_info = spki::SubjectPublicKeyInfoRef::try_from(&*data)?;
// 4.
let alg = pk_info.algorithm.oid;
// id-ecPublicKey
if alg != elliptic_curve::ALGORITHM_OID {
return Err(ImportKeyError::UnsupportedAlgorithm);
}
// 5-7.
let params = ECParametersSpki::try_from(
pk_info
.algorithm
.parameters
.ok_or(ImportKeyError::MalformedParameters)?,
)
.map_err(|_| ImportKeyError::MalformedParameters)?;
// 8-9.
let named_curve_alg = params.named_curve_alg;
let pk_named_curve = match named_curve_alg {
// id-secp256r1
ID_SECP256R1_OID => Some(EcNamedCurve::P256),
// id-secp384r1
ID_SECP384R1_OID => Some(EcNamedCurve::P384),
// id-secp521r1
ID_SECP521R1_OID => Some(EcNamedCurve::P521),
_ => None,
};
// 10.
let encoded_key;
if let Some(pk_named_curve) = pk_named_curve {
let pk = pk_info.subject_public_key;
encoded_key = pk.raw_bytes().to_vec();
let bytes_consumed = match named_curve {
EcNamedCurve::P256 => {
let point = p256::EncodedPoint::from_bytes(&*encoded_key)
.map_err(|_| ImportKeyError::InvalidP256ECSPKIData)?;
if point.is_identity() {
return Err(ImportKeyError::InvalidP256ECPoint);
}
point.as_bytes().len()
}
EcNamedCurve::P384 => {
let point = p384::EncodedPoint::from_bytes(&*encoded_key)
.map_err(|_| ImportKeyError::InvalidP384ECSPKIData)?;
if point.is_identity() {
return Err(ImportKeyError::InvalidP384ECPoint);
}
point.as_bytes().len()
}
EcNamedCurve::P521 => {
let point = p521::EncodedPoint::from_bytes(&*encoded_key)
.map_err(|_| ImportKeyError::InvalidP521ECSPKIData)?;
if point.is_identity() {
return Err(ImportKeyError::InvalidP521ECPoint);
}
point.as_bytes().len()
}
};
if bytes_consumed != pk_info.subject_public_key.raw_bytes().len() {
return Err(ImportKeyError::PublicKeyTooLong);
}
// 11.
if named_curve != pk_named_curve {
return Err(ImportKeyError::CurveMismatch);
}
} else {
return Err(ImportKeyError::UnsupportedNamedCurve);
}
Ok(ImportKeyResult::Ec {
raw_data: RustRawKeyData::Public(encoded_key.into()),
})
}
KeyData::JwkPublicEc { .. } | KeyData::JwkPrivateEc { .. } => {
import_key_ec_jwk(key_data, named_curve)
}
_ => Err(SharedError::UnsupportedFormat.into()),
}
}
fn import_key_aes(
key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
Ok(match key_data {
KeyData::JwkSecret { k } => {
let data = BASE64_URL_SAFE_FORGIVING
.decode(k)
.map_err(|_| ImportKeyError::InvalidKeyData)?;
ImportKeyResult::Hmac {
raw_data: RustRawKeyData::Secret(data.into()),
}
}
_ => return Err(SharedError::UnsupportedFormat.into()),
})
}
fn import_key_hmac(
key_data: KeyData,
) -> Result<ImportKeyResult, ImportKeyError> {
Ok(match key_data {
KeyData::JwkSecret { k } => {
let data = BASE64_URL_SAFE_FORGIVING
.decode(k)
.map_err(|_| ImportKeyError::InvalidKeyData)?;
ImportKeyResult::Hmac {
raw_data: RustRawKeyData::Secret(data.into()),
}
}
_ => return Err(SharedError::UnsupportedFormat.into()),
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/generate_key.rs | ext/crypto/generate_key.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use aws_lc_rs::rand::SecureRandom;
use aws_lc_rs::signature::EcdsaKeyPair;
use deno_core::ToJsBuffer;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use elliptic_curve::rand_core::OsRng;
use num_traits::FromPrimitive;
use once_cell::sync::Lazy;
use rsa::BigUint;
use rsa::RsaPrivateKey;
use rsa::pkcs1::EncodeRsaPrivateKey;
use serde::Deserialize;
use crate::shared::*;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class("DOMExceptionOperationError")]
pub enum GenerateKeyError {
#[class(inherit)]
#[error(transparent)]
General(
#[from]
#[inherit]
SharedError,
),
#[error("Bad public exponent")]
BadPublicExponent,
#[error("Invalid HMAC key length")]
InvalidHMACKeyLength,
#[error("Failed to serialize RSA key")]
FailedRSAKeySerialization,
#[error("Invalid AES key length")]
InvalidAESKeyLength,
#[error("Failed to generate RSA key")]
FailedRSAKeyGeneration,
#[error("Failed to generate EC key")]
FailedECKeyGeneration,
#[error("Failed to generate key")]
FailedKeyGeneration,
}
// Allowlist for RSA public exponents.
static PUB_EXPONENT_1: Lazy<BigUint> =
Lazy::new(|| BigUint::from_u64(3).unwrap());
static PUB_EXPONENT_2: Lazy<BigUint> =
Lazy::new(|| BigUint::from_u64(65537).unwrap());
#[derive(Deserialize)]
#[serde(rename_all = "camelCase", tag = "algorithm")]
pub enum GenerateKeyOptions {
#[serde(rename = "RSA", rename_all = "camelCase")]
Rsa {
modulus_length: u32,
#[serde(with = "serde_bytes")]
public_exponent: Vec<u8>,
},
#[serde(rename = "EC", rename_all = "camelCase")]
Ec { named_curve: EcNamedCurve },
#[serde(rename = "AES", rename_all = "camelCase")]
Aes { length: usize },
#[serde(rename = "HMAC", rename_all = "camelCase")]
Hmac {
hash: ShaHash,
length: Option<usize>,
},
}
#[op2(async)]
#[serde]
pub async fn op_crypto_generate_key(
#[serde] opts: GenerateKeyOptions,
) -> Result<ToJsBuffer, GenerateKeyError> {
let fun = || match opts {
GenerateKeyOptions::Rsa {
modulus_length,
public_exponent,
} => generate_key_rsa(modulus_length, &public_exponent),
GenerateKeyOptions::Ec { named_curve } => generate_key_ec(named_curve),
GenerateKeyOptions::Aes { length } => generate_key_aes(length),
GenerateKeyOptions::Hmac { hash, length } => {
generate_key_hmac(hash, length)
}
};
let buf = spawn_blocking(fun).await.unwrap()?;
Ok(buf.into())
}
fn generate_key_rsa(
modulus_length: u32,
public_exponent: &[u8],
) -> Result<Vec<u8>, GenerateKeyError> {
let exponent = BigUint::from_bytes_be(public_exponent);
if exponent != *PUB_EXPONENT_1 && exponent != *PUB_EXPONENT_2 {
return Err(GenerateKeyError::BadPublicExponent);
}
let mut rng = OsRng;
let private_key =
RsaPrivateKey::new_with_exp(&mut rng, modulus_length as usize, &exponent)
.map_err(|_| GenerateKeyError::FailedRSAKeyGeneration)?;
let private_key = private_key
.to_pkcs1_der()
.map_err(|_| GenerateKeyError::FailedRSAKeySerialization)?;
Ok(private_key.as_bytes().to_vec())
}
fn generate_key_ec_p521() -> Vec<u8> {
let mut rng = OsRng;
let key = p521::SecretKey::random(&mut rng);
key.to_nonzero_scalar().to_bytes().to_vec()
}
fn generate_key_ec(
named_curve: EcNamedCurve,
) -> Result<Vec<u8>, GenerateKeyError> {
let curve = match named_curve {
EcNamedCurve::P256 => {
&aws_lc_rs::signature::ECDSA_P256_SHA256_FIXED_SIGNING
}
EcNamedCurve::P384 => {
&aws_lc_rs::signature::ECDSA_P384_SHA384_FIXED_SIGNING
}
EcNamedCurve::P521 => return Ok(generate_key_ec_p521()),
};
let rng = aws_lc_rs::rand::SystemRandom::new();
let pkcs8 = EcdsaKeyPair::generate_pkcs8(curve, &rng)
.map_err(|_| GenerateKeyError::FailedECKeyGeneration)?;
Ok(pkcs8.as_ref().to_vec())
}
fn generate_key_aes(length: usize) -> Result<Vec<u8>, GenerateKeyError> {
if !length.is_multiple_of(8) || length > 256 {
return Err(GenerateKeyError::InvalidAESKeyLength);
}
let mut key = vec![0u8; length / 8];
let rng = aws_lc_rs::rand::SystemRandom::new();
rng
.fill(&mut key)
.map_err(|_| GenerateKeyError::FailedKeyGeneration)?;
Ok(key)
}
fn generate_key_hmac(
hash: ShaHash,
length: Option<usize>,
) -> Result<Vec<u8>, GenerateKeyError> {
let hash = match hash {
ShaHash::Sha1 => &aws_lc_rs::hmac::HMAC_SHA1_FOR_LEGACY_USE_ONLY,
ShaHash::Sha256 => &aws_lc_rs::hmac::HMAC_SHA256,
ShaHash::Sha384 => &aws_lc_rs::hmac::HMAC_SHA384,
ShaHash::Sha512 => &aws_lc_rs::hmac::HMAC_SHA512,
};
let length = if let Some(length) = length {
if length % 8 != 0 {
return Err(GenerateKeyError::InvalidHMACKeyLength);
}
let length = length / 8;
if length > aws_lc_rs::digest::MAX_BLOCK_LEN {
return Err(GenerateKeyError::InvalidHMACKeyLength);
}
length
} else {
hash.digest_algorithm().block_len()
};
let rng = aws_lc_rs::rand::SystemRandom::new();
let mut key = vec![0u8; length];
rng
.fill(&mut key)
.map_err(|_| GenerateKeyError::FailedKeyGeneration)?;
Ok(key)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/encrypt.rs | ext/crypto/encrypt.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use aes::cipher::BlockEncryptMut;
use aes::cipher::KeyIvInit;
use aes::cipher::StreamCipher;
use aes::cipher::block_padding::Pkcs7;
use aes_gcm::AeadInPlace;
use aes_gcm::KeyInit;
use aes_gcm::Nonce;
use aes_gcm::aead::generic_array::ArrayLength;
use aes_gcm::aead::generic_array::typenum::U12;
use aes_gcm::aead::generic_array::typenum::U16;
use aes_gcm::aes::Aes128;
use aes_gcm::aes::Aes192;
use aes_gcm::aes::Aes256;
use ctr::Ctr32BE;
use ctr::Ctr64BE;
use ctr::Ctr128BE;
use deno_core::JsBuffer;
use deno_core::ToJsBuffer;
use deno_core::op2;
use deno_core::unsync::spawn_blocking;
use rand::rngs::OsRng;
use rsa::pkcs1::DecodeRsaPublicKey;
use serde::Deserialize;
use sha1::Sha1;
use sha2::Sha256;
use sha2::Sha384;
use sha2::Sha512;
use crate::shared::*;
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EncryptOptions {
key: V8RawKeyData,
#[serde(flatten)]
algorithm: EncryptAlgorithm,
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase", tag = "algorithm")]
pub enum EncryptAlgorithm {
#[serde(rename = "RSA-OAEP")]
RsaOaep {
hash: ShaHash,
#[serde(with = "serde_bytes")]
label: Vec<u8>,
},
#[serde(rename = "AES-CBC", rename_all = "camelCase")]
AesCbc {
#[serde(with = "serde_bytes")]
iv: Vec<u8>,
length: usize,
},
#[serde(rename = "AES-GCM", rename_all = "camelCase")]
AesGcm {
#[serde(with = "serde_bytes")]
iv: Vec<u8>,
#[serde(with = "serde_bytes")]
additional_data: Option<Vec<u8>>,
length: usize,
tag_length: usize,
},
#[serde(rename = "AES-CTR", rename_all = "camelCase")]
AesCtr {
#[serde(with = "serde_bytes")]
counter: Vec<u8>,
ctr_length: usize,
key_length: usize,
},
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum EncryptError {
#[class(inherit)]
#[error(transparent)]
General(
#[from]
#[inherit]
SharedError,
),
#[class(type)]
#[error("invalid length")]
InvalidLength,
#[class("DOMExceptionOperationError")]
#[error("invalid key or iv")]
InvalidKeyOrIv,
#[class(type)]
#[error("iv length not equal to 12 or 16")]
InvalidIvLength,
#[class(type)]
#[error("invalid counter length. Currently supported 32/64/128 bits")]
InvalidCounterLength,
#[class("DOMExceptionOperationError")]
#[error("tried to encrypt too much data")]
TooMuchData,
#[class("DOMExceptionOperationError")]
#[error("Encryption failed")]
Failed,
}
#[op2(async)]
#[serde]
pub async fn op_crypto_encrypt(
#[serde] opts: EncryptOptions,
#[buffer] data: JsBuffer,
) -> Result<ToJsBuffer, EncryptError> {
let key = opts.key;
let fun = move || match opts.algorithm {
EncryptAlgorithm::RsaOaep { hash, label } => {
encrypt_rsa_oaep(key, hash, label, &data)
}
EncryptAlgorithm::AesCbc { iv, length } => {
encrypt_aes_cbc(key, length, iv, &data)
}
EncryptAlgorithm::AesGcm {
iv,
additional_data,
length,
tag_length,
} => encrypt_aes_gcm(key, length, tag_length, iv, additional_data, &data),
EncryptAlgorithm::AesCtr {
counter,
ctr_length,
key_length,
} => encrypt_aes_ctr(key, key_length, &counter, ctr_length, &data),
};
let buf = spawn_blocking(fun).await.unwrap()?;
Ok(buf.into())
}
fn encrypt_rsa_oaep(
key: V8RawKeyData,
hash: ShaHash,
label: Vec<u8>,
data: &[u8],
) -> Result<Vec<u8>, EncryptError> {
let label = String::from_utf8_lossy(&label).to_string();
let public_key = key.as_rsa_public_key()?;
let public_key = rsa::RsaPublicKey::from_pkcs1_der(&public_key)
.map_err(|_| SharedError::FailedDecodePublicKey)?;
let mut rng = OsRng;
let padding = match hash {
ShaHash::Sha1 => rsa::Oaep {
digest: Box::<Sha1>::default(),
mgf_digest: Box::<Sha1>::default(),
label: Some(label),
},
ShaHash::Sha256 => rsa::Oaep {
digest: Box::<Sha256>::default(),
mgf_digest: Box::<Sha256>::default(),
label: Some(label),
},
ShaHash::Sha384 => rsa::Oaep {
digest: Box::<Sha384>::default(),
mgf_digest: Box::<Sha384>::default(),
label: Some(label),
},
ShaHash::Sha512 => rsa::Oaep {
digest: Box::<Sha512>::default(),
mgf_digest: Box::<Sha512>::default(),
label: Some(label),
},
};
let encrypted = public_key
.encrypt(&mut rng, padding, data)
.map_err(|_| EncryptError::Failed)?;
Ok(encrypted)
}
fn encrypt_aes_cbc(
key: V8RawKeyData,
length: usize,
iv: Vec<u8>,
data: &[u8],
) -> Result<Vec<u8>, EncryptError> {
let key = key.as_secret_key()?;
let ciphertext = match length {
128 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes128CbcEnc = cbc::Encryptor<aes::Aes128>;
let cipher = Aes128CbcEnc::new_from_slices(key, &iv)
.map_err(|_| EncryptError::InvalidKeyOrIv)?;
cipher.encrypt_padded_vec_mut::<Pkcs7>(data)
}
192 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes192CbcEnc = cbc::Encryptor<aes::Aes192>;
let cipher = Aes192CbcEnc::new_from_slices(key, &iv)
.map_err(|_| EncryptError::InvalidKeyOrIv)?;
cipher.encrypt_padded_vec_mut::<Pkcs7>(data)
}
256 => {
// Section 10.3 Step 2 of RFC 2315 https://www.rfc-editor.org/rfc/rfc2315
type Aes256CbcEnc = cbc::Encryptor<aes::Aes256>;
let cipher = Aes256CbcEnc::new_from_slices(key, &iv)
.map_err(|_| EncryptError::InvalidKeyOrIv)?;
cipher.encrypt_padded_vec_mut::<Pkcs7>(data)
}
_ => return Err(EncryptError::InvalidLength),
};
Ok(ciphertext)
}
fn encrypt_aes_gcm_general<N: ArrayLength<u8>>(
key: &[u8],
iv: Vec<u8>,
length: usize,
ciphertext: &mut [u8],
additional_data: Vec<u8>,
) -> Result<aes_gcm::Tag, EncryptError> {
let nonce = Nonce::<N>::from_slice(&iv);
let tag = match length {
128 => {
let cipher = aes_gcm::AesGcm::<Aes128, N>::new_from_slice(key)
.map_err(|_| EncryptError::Failed)?;
cipher
.encrypt_in_place_detached(nonce, &additional_data, ciphertext)
.map_err(|_| EncryptError::Failed)?
}
192 => {
let cipher = aes_gcm::AesGcm::<Aes192, N>::new_from_slice(key)
.map_err(|_| EncryptError::Failed)?;
cipher
.encrypt_in_place_detached(nonce, &additional_data, ciphertext)
.map_err(|_| EncryptError::Failed)?
}
256 => {
let cipher = aes_gcm::AesGcm::<Aes256, N>::new_from_slice(key)
.map_err(|_| EncryptError::Failed)?;
cipher
.encrypt_in_place_detached(nonce, &additional_data, ciphertext)
.map_err(|_| EncryptError::Failed)?
}
_ => return Err(EncryptError::InvalidLength),
};
Ok(tag)
}
fn encrypt_aes_gcm(
key: V8RawKeyData,
length: usize,
tag_length: usize,
iv: Vec<u8>,
additional_data: Option<Vec<u8>>,
data: &[u8],
) -> Result<Vec<u8>, EncryptError> {
let key = key.as_secret_key()?;
let additional_data = additional_data.unwrap_or_default();
let mut ciphertext = data.to_vec();
// Fixed 96-bit OR 128-bit nonce
let tag = match iv.len() {
12 => encrypt_aes_gcm_general::<U12>(
key,
iv,
length,
&mut ciphertext,
additional_data,
)?,
16 => encrypt_aes_gcm_general::<U16>(
key,
iv,
length,
&mut ciphertext,
additional_data,
)?,
_ => return Err(EncryptError::InvalidIvLength),
};
// Truncated tag to the specified tag length.
// `tag` is fixed to be 16 bytes long and (tag_length / 8) is always <= 16
let tag = &tag[..(tag_length / 8)];
// C | T
ciphertext.extend_from_slice(tag);
Ok(ciphertext)
}
fn encrypt_aes_ctr_gen<B>(
key: &[u8],
counter: &[u8],
data: &[u8],
) -> Result<Vec<u8>, EncryptError>
where
B: KeyIvInit + StreamCipher,
{
let mut cipher = B::new(key.into(), counter.into());
let mut ciphertext = data.to_vec();
cipher
.try_apply_keystream(&mut ciphertext)
.map_err(|_| EncryptError::TooMuchData)?;
Ok(ciphertext)
}
fn encrypt_aes_ctr(
key: V8RawKeyData,
key_length: usize,
counter: &[u8],
ctr_length: usize,
data: &[u8],
) -> Result<Vec<u8>, EncryptError> {
let key = key.as_secret_key()?;
match ctr_length {
32 => match key_length {
128 => encrypt_aes_ctr_gen::<Ctr32BE<aes::Aes128>>(key, counter, data),
192 => encrypt_aes_ctr_gen::<Ctr32BE<aes::Aes192>>(key, counter, data),
256 => encrypt_aes_ctr_gen::<Ctr32BE<aes::Aes256>>(key, counter, data),
_ => Err(EncryptError::InvalidLength),
},
64 => match key_length {
128 => encrypt_aes_ctr_gen::<Ctr64BE<aes::Aes128>>(key, counter, data),
192 => encrypt_aes_ctr_gen::<Ctr64BE<aes::Aes192>>(key, counter, data),
256 => encrypt_aes_ctr_gen::<Ctr64BE<aes::Aes256>>(key, counter, data),
_ => Err(EncryptError::InvalidLength),
},
128 => match key_length {
128 => encrypt_aes_ctr_gen::<Ctr128BE<aes::Aes128>>(key, counter, data),
192 => encrypt_aes_ctr_gen::<Ctr128BE<aes::Aes192>>(key, counter, data),
256 => encrypt_aes_ctr_gen::<Ctr128BE<aes::Aes256>>(key, counter, data),
_ => Err(EncryptError::InvalidLength),
},
_ => Err(EncryptError::InvalidCounterLength),
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/crypto/x25519.rs | ext/crypto/x25519.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use base64::prelude::BASE64_URL_SAFE_NO_PAD;
use curve25519_dalek::montgomery::MontgomeryPoint;
use deno_core::ToJsBuffer;
use deno_core::op2;
use elliptic_curve::pkcs8::PrivateKeyInfo;
use elliptic_curve::subtle::ConstantTimeEq;
use rand::RngCore;
use rand::rngs::OsRng;
use spki::der::Decode;
use spki::der::Encode;
use spki::der::asn1::BitString;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum X25519Error {
#[class("DOMExceptionOperationError")]
#[error("Failed to export key")]
FailedExport,
#[class(generic)]
#[error(transparent)]
Der(#[from] spki::der::Error),
}
// u-coordinate of the base point.
const X25519_BASEPOINT_BYTES: [u8; 32] = [
9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
];
#[op2(fast)]
pub fn op_crypto_generate_x25519_keypair(
#[buffer] pkey: &mut [u8],
#[buffer] pubkey: &mut [u8],
) {
let mut rng = OsRng;
rng.fill_bytes(pkey);
// https://www.rfc-editor.org/rfc/rfc7748#section-6.1
// pubkey = x25519(a, 9) which is constant-time Montgomery ladder.
// https://eprint.iacr.org/2014/140.pdf page 4
// https://eprint.iacr.org/2017/212.pdf algorithm 8
// pubkey is in LE order.
let pkey: [u8; 32] = pkey.try_into().expect("Expected byteLength 32");
pubkey.copy_from_slice(&x25519_dalek::x25519(pkey, X25519_BASEPOINT_BYTES));
}
#[op2]
#[string]
pub fn op_crypto_x25519_public_key(#[buffer] private_key: &[u8]) -> String {
use base64::Engine;
let private_key: [u8; 32] =
private_key.try_into().expect("Expected byteLength 32");
BASE64_URL_SAFE_NO_PAD
.encode(x25519_dalek::x25519(private_key, X25519_BASEPOINT_BYTES))
}
const MONTGOMERY_IDENTITY: MontgomeryPoint = MontgomeryPoint([0; 32]);
#[op2(fast)]
pub fn op_crypto_derive_bits_x25519(
#[buffer] k: &[u8],
#[buffer] u: &[u8],
#[buffer] secret: &mut [u8],
) -> bool {
let k: [u8; 32] = k.try_into().expect("Expected byteLength 32");
let u: [u8; 32] = u.try_into().expect("Expected byteLength 32");
let sh_sec = x25519_dalek::x25519(k, u);
let point = MontgomeryPoint(sh_sec);
if point.ct_eq(&MONTGOMERY_IDENTITY).unwrap_u8() == 1 {
return true;
}
secret.copy_from_slice(&sh_sec);
false
}
// id-X25519 OBJECT IDENTIFIER ::= { 1 3 101 110 }
pub const X25519_OID: const_oid::ObjectIdentifier =
const_oid::ObjectIdentifier::new_unwrap("1.3.101.110");
#[op2(fast)]
pub fn op_crypto_import_spki_x25519(
#[buffer] key_data: &[u8],
#[buffer] out: &mut [u8],
) -> bool {
// 2-3.
let pk_info = match spki::SubjectPublicKeyInfoRef::try_from(key_data) {
Ok(pk_info) => pk_info,
Err(_) => return false,
};
// 4.
let alg = pk_info.algorithm.oid;
if alg != X25519_OID {
return false;
}
// 5.
if pk_info.algorithm.parameters.is_some() {
return false;
}
out.copy_from_slice(pk_info.subject_public_key.raw_bytes());
true
}
#[op2(fast)]
pub fn op_crypto_import_pkcs8_x25519(
#[buffer] key_data: &[u8],
#[buffer] out: &mut [u8],
) -> bool {
// 2-3.
// This should probably use OneAsymmetricKey instead
let pk_info = match PrivateKeyInfo::from_der(key_data) {
Ok(pk_info) => pk_info,
Err(_) => return false,
};
// 4.
let alg = pk_info.algorithm.oid;
if alg != X25519_OID {
return false;
}
// 5.
if pk_info.algorithm.parameters.is_some() {
return false;
}
// 6.
// CurvePrivateKey ::= OCTET STRING
if pk_info.private_key.len() != 34 {
return false;
}
out.copy_from_slice(&pk_info.private_key[2..]);
true
}
#[op2]
#[serde]
pub fn op_crypto_export_spki_x25519(
#[buffer] pubkey: &[u8],
) -> Result<ToJsBuffer, X25519Error> {
let key_info = spki::SubjectPublicKeyInfo {
algorithm: spki::AlgorithmIdentifierRef {
// id-X25519
oid: X25519_OID,
parameters: None,
},
subject_public_key: BitString::from_bytes(pubkey)?,
};
Ok(
key_info
.to_der()
.map_err(|_| X25519Error::FailedExport)?
.into(),
)
}
#[op2]
#[serde]
pub fn op_crypto_export_pkcs8_x25519(
#[buffer] pkey: &[u8],
) -> Result<ToJsBuffer, X25519Error> {
use rsa::pkcs1::der::Encode;
// This should probably use OneAsymmetricKey instead
let pk_info = rsa::pkcs8::PrivateKeyInfo {
public_key: None,
algorithm: rsa::pkcs8::AlgorithmIdentifierRef {
// id-X25519
oid: X25519_OID,
parameters: None,
},
private_key: pkey, // OCTET STRING
};
let mut buf = Vec::new();
pk_info.encode_to_vec(&mut buf)?;
Ok(buf.into())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webidl/lib.rs | ext/webidl/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
deno_core::extension!(deno_webidl, esm = ["00_webidl.js"],);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webidl/benches/dict.rs | ext/webidl/benches/dict.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_bench_util::bench_js_sync;
use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::Bencher;
use deno_bench_util::bencher::benchmark_group;
use deno_core::Extension;
fn setup() -> Vec<Extension> {
deno_core::extension!(
deno_webidl_bench,
esm_entry_point = "ext:deno_webidl_bench/setup.js",
esm = ["ext:deno_webidl_bench/setup.js" = "benches/dict.js"]
);
vec![deno_webidl::deno_webidl::init(), deno_webidl_bench::init()]
}
fn converter_undefined(b: &mut Bencher) {
bench_js_sync(b, r#"TextDecodeOptions(undefined);"#, setup);
}
fn handwritten_baseline_undefined(b: &mut Bencher) {
bench_js_sync(b, r#"handwrittenConverter(undefined)"#, setup);
}
fn converter_object(b: &mut Bencher) {
bench_js_sync(b, r#"TextDecodeOptions({});"#, setup);
}
fn handwritten_baseline_object(b: &mut Bencher) {
bench_js_sync(b, r#"handwrittenConverter({})"#, setup);
}
benchmark_group!(
benches,
converter_undefined,
handwritten_baseline_undefined,
converter_object,
handwritten_baseline_object,
);
bench_or_profile!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/telemetry/lib.rs | ext/telemetry/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#![allow(clippy::too_many_arguments)]
#![expect(unexpected_cfgs)]
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::env;
use std::ffi::c_void;
use std::fmt::Debug;
use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::atomic::AtomicU64;
use std::task::Context;
use std::task::Poll;
use std::thread;
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::futures::FutureExt;
use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
use deno_core::futures::channel::mpsc;
use deno_core::futures::channel::mpsc::UnboundedSender;
use deno_core::futures::future::BoxFuture;
use deno_core::futures::stream;
use deno_core::op2;
use deno_core::v8;
use deno_core::v8::DataError;
use deno_error::JsError;
use deno_error::JsErrorBox;
use once_cell::sync::Lazy;
use once_cell::sync::OnceCell;
use opentelemetry::InstrumentationScope;
pub use opentelemetry::Key;
pub use opentelemetry::KeyValue;
pub use opentelemetry::StringValue;
pub use opentelemetry::Value;
use opentelemetry::logs::AnyValue;
use opentelemetry::logs::LogRecord as LogRecordTrait;
use opentelemetry::logs::Severity;
use opentelemetry::metrics::AsyncInstrumentBuilder;
pub use opentelemetry::metrics::Gauge;
pub use opentelemetry::metrics::Histogram;
use opentelemetry::metrics::InstrumentBuilder;
pub use opentelemetry::metrics::MeterProvider;
pub use opentelemetry::metrics::UpDownCounter;
use opentelemetry::otel_debug;
use opentelemetry::otel_error;
use opentelemetry::trace::Event;
use opentelemetry::trace::Link;
use opentelemetry::trace::SpanContext;
use opentelemetry::trace::SpanId;
use opentelemetry::trace::SpanKind;
use opentelemetry::trace::Status as SpanStatus;
use opentelemetry::trace::TraceFlags;
use opentelemetry::trace::TraceId;
use opentelemetry::trace::TraceState;
use opentelemetry_otlp::HttpExporterBuilder;
use opentelemetry_otlp::Protocol;
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_otlp::WithHttpConfig;
use opentelemetry_sdk::Resource;
use opentelemetry_sdk::export::trace::SpanData;
use opentelemetry_sdk::logs::BatchLogProcessor;
use opentelemetry_sdk::logs::LogProcessor;
use opentelemetry_sdk::logs::LogRecord;
use opentelemetry_sdk::metrics::ManualReader;
use opentelemetry_sdk::metrics::MetricResult;
use opentelemetry_sdk::metrics::SdkMeterProvider;
use opentelemetry_sdk::metrics::Temporality;
use opentelemetry_sdk::metrics::exporter::PushMetricExporter;
use opentelemetry_sdk::metrics::reader::MetricReader;
use opentelemetry_sdk::trace::BatchSpanProcessor;
use opentelemetry_sdk::trace::IdGenerator;
use opentelemetry_sdk::trace::RandomIdGenerator;
use opentelemetry_sdk::trace::SpanEvents;
use opentelemetry_sdk::trace::SpanLinks;
use opentelemetry_sdk::trace::SpanProcessor as _;
use opentelemetry_semantic_conventions::resource::PROCESS_RUNTIME_NAME;
use opentelemetry_semantic_conventions::resource::PROCESS_RUNTIME_VERSION;
use opentelemetry_semantic_conventions::resource::TELEMETRY_SDK_LANGUAGE;
use opentelemetry_semantic_conventions::resource::TELEMETRY_SDK_NAME;
use opentelemetry_semantic_conventions::resource::TELEMETRY_SDK_VERSION;
use serde::Deserialize;
use serde::Serialize;
use thiserror::Error;
use tokio::sync::oneshot;
use tokio::task::JoinSet;
deno_core::extension!(
deno_telemetry,
ops = [
op_otel_collect_isolate_metrics,
op_otel_enable_isolate_metrics,
op_otel_log,
op_otel_log_foreign,
op_otel_span_attribute1,
op_otel_span_attribute2,
op_otel_span_attribute3,
op_otel_span_add_link,
op_otel_span_update_name,
op_otel_metric_attribute3,
op_otel_metric_record0,
op_otel_metric_record1,
op_otel_metric_record2,
op_otel_metric_record3,
op_otel_metric_observable_record0,
op_otel_metric_observable_record1,
op_otel_metric_observable_record2,
op_otel_metric_observable_record3,
op_otel_metric_wait_to_observe,
op_otel_metric_observation_done,
],
objects = [OtelTracer, OtelMeter, OtelSpan],
esm = ["telemetry.ts", "util.ts"],
);
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OtelRuntimeConfig {
pub runtime_name: Cow<'static, str>,
pub runtime_version: Cow<'static, str>,
}
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
pub struct OtelConfig {
pub tracing_enabled: bool,
pub metrics_enabled: bool,
pub console: OtelConsoleConfig,
pub deterministic_prefix: Option<u8>,
pub propagators: std::collections::HashSet<OtelPropagators>,
}
impl OtelConfig {
pub fn as_v8(&self) -> Box<[u8]> {
let mut data = vec![
self.tracing_enabled as u8,
self.metrics_enabled as u8,
self.console as u8,
];
data.extend(self.propagators.iter().map(|propagator| *propagator as u8));
data.into_boxed_slice()
}
}
#[derive(
Default, Debug, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash,
)]
#[repr(u8)]
pub enum OtelPropagators {
TraceContext = 0,
Baggage = 1,
#[default]
None = 2,
}
#[derive(
Debug, Default, Clone, Copy, PartialEq, Eq, Serialize, Deserialize,
)]
#[repr(u8)]
pub enum OtelConsoleConfig {
#[default]
Ignore = 0,
Capture = 1,
Replace = 2,
}
static OTEL_SHARED_RUNTIME_SPAWN_TASK_TX: Lazy<
UnboundedSender<BoxFuture<'static, ()>>,
> = Lazy::new(otel_create_shared_runtime);
static OTEL_PRE_COLLECT_CALLBACKS: Lazy<
Mutex<Vec<oneshot::Sender<oneshot::Sender<()>>>>,
> = Lazy::new(Default::default);
fn otel_create_shared_runtime() -> UnboundedSender<BoxFuture<'static, ()>> {
let (spawn_task_tx, mut spawn_task_rx) =
mpsc::unbounded::<BoxFuture<'static, ()>>();
thread::spawn(move || {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
// This limits the number of threads for blocking operations (like for
// synchronous fs ops) or CPU bound tasks like when we run dprint in
// parallel for deno fmt.
// The default value is 512, which is an unhelpfully large thread pool. We
// don't ever want to have more than a couple dozen threads.
.max_blocking_threads(if cfg!(windows) {
// on windows, tokio uses blocking tasks for child process IO, make sure
// we have enough available threads for other tasks to run
4 * std::thread::available_parallelism()
.map(|n| n.get())
.unwrap_or(8)
} else {
32
})
.build()
.unwrap();
rt.block_on(async move {
while let Some(task) = spawn_task_rx.next().await {
tokio::spawn(task);
}
});
});
spawn_task_tx
}
#[derive(Clone, Copy)]
pub struct OtelSharedRuntime;
impl hyper::rt::Executor<BoxFuture<'static, ()>> for OtelSharedRuntime {
fn execute(&self, fut: BoxFuture<'static, ()>) {
(*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX)
.unbounded_send(fut)
.expect("failed to send task to shared OpenTelemetry runtime");
}
}
impl opentelemetry_sdk::runtime::Runtime for OtelSharedRuntime {
type Interval = Pin<Box<dyn Stream<Item = ()> + Send + 'static>>;
type Delay = Pin<Box<tokio::time::Sleep>>;
fn interval(&self, period: Duration) -> Self::Interval {
stream::repeat(())
.then(move |_| tokio::time::sleep(period))
.boxed()
}
fn spawn(&self, future: BoxFuture<'static, ()>) {
(*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX)
.unbounded_send(future)
.expect("failed to send task to shared OpenTelemetry runtime");
}
fn delay(&self, duration: Duration) -> Self::Delay {
Box::pin(tokio::time::sleep(duration))
}
}
impl opentelemetry_sdk::runtime::RuntimeChannel for OtelSharedRuntime {
type Receiver<T: Debug + Send> = BatchMessageChannelReceiver<T>;
type Sender<T: Debug + Send> = BatchMessageChannelSender<T>;
fn batch_message_channel<T: Debug + Send>(
&self,
capacity: usize,
) -> (Self::Sender<T>, Self::Receiver<T>) {
let (batch_tx, batch_rx) = tokio::sync::mpsc::channel::<T>(capacity);
(batch_tx.into(), batch_rx.into())
}
}
#[derive(Debug)]
pub struct BatchMessageChannelSender<T: Send> {
sender: tokio::sync::mpsc::Sender<T>,
}
impl<T: Send> From<tokio::sync::mpsc::Sender<T>>
for BatchMessageChannelSender<T>
{
fn from(sender: tokio::sync::mpsc::Sender<T>) -> Self {
Self { sender }
}
}
impl<T: Send> opentelemetry_sdk::runtime::TrySend
for BatchMessageChannelSender<T>
{
type Message = T;
fn try_send(
&self,
item: Self::Message,
) -> Result<(), opentelemetry_sdk::runtime::TrySendError> {
self.sender.try_send(item).map_err(|err| match err {
tokio::sync::mpsc::error::TrySendError::Full(_) => {
opentelemetry_sdk::runtime::TrySendError::ChannelFull
}
tokio::sync::mpsc::error::TrySendError::Closed(_) => {
opentelemetry_sdk::runtime::TrySendError::ChannelClosed
}
})
}
}
pub struct BatchMessageChannelReceiver<T> {
receiver: tokio::sync::mpsc::Receiver<T>,
}
impl<T> From<tokio::sync::mpsc::Receiver<T>>
for BatchMessageChannelReceiver<T>
{
fn from(receiver: tokio::sync::mpsc::Receiver<T>) -> Self {
Self { receiver }
}
}
impl<T> Stream for BatchMessageChannelReceiver<T> {
type Item = T;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
self.receiver.poll_recv(cx)
}
}
enum DenoPeriodicReaderMessage {
Register(std::sync::Weak<opentelemetry_sdk::metrics::Pipeline>),
Export,
ForceFlush(oneshot::Sender<MetricResult<()>>),
Shutdown(oneshot::Sender<MetricResult<()>>),
}
#[derive(Debug)]
struct DenoPeriodicReader {
tx: tokio::sync::mpsc::Sender<DenoPeriodicReaderMessage>,
temporality: Temporality,
}
impl MetricReader for DenoPeriodicReader {
fn register_pipeline(
&self,
pipeline: std::sync::Weak<opentelemetry_sdk::metrics::Pipeline>,
) {
let _ = self
.tx
.try_send(DenoPeriodicReaderMessage::Register(pipeline));
}
fn collect(
&self,
_rm: &mut opentelemetry_sdk::metrics::data::ResourceMetrics,
) -> opentelemetry_sdk::metrics::MetricResult<()> {
unreachable!("collect should not be called on DenoPeriodicReader");
}
fn force_flush(&self) -> opentelemetry_sdk::metrics::MetricResult<()> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.try_send(DenoPeriodicReaderMessage::ForceFlush(tx));
deno_core::futures::executor::block_on(rx).unwrap()?;
Ok(())
}
fn shutdown(&self) -> opentelemetry_sdk::metrics::MetricResult<()> {
let (tx, rx) = oneshot::channel();
let _ = self.tx.try_send(DenoPeriodicReaderMessage::Shutdown(tx));
deno_core::futures::executor::block_on(rx).unwrap()?;
Ok(())
}
fn temporality(
&self,
_kind: opentelemetry_sdk::metrics::InstrumentKind,
) -> Temporality {
self.temporality
}
}
const METRIC_EXPORT_INTERVAL_NAME: &str = "OTEL_METRIC_EXPORT_INTERVAL";
const DEFAULT_INTERVAL: Duration = Duration::from_secs(60);
impl DenoPeriodicReader {
fn new(exporter: opentelemetry_otlp::MetricExporter) -> Self {
let interval = env::var(METRIC_EXPORT_INTERVAL_NAME)
.ok()
.and_then(|v| v.parse().map(Duration::from_millis).ok())
.unwrap_or(DEFAULT_INTERVAL);
let (tx, mut rx) = tokio::sync::mpsc::channel(256);
let temporality = PushMetricExporter::temporality(&exporter);
let worker = async move {
let inner = ManualReader::builder()
.with_temporality(PushMetricExporter::temporality(&exporter))
.build();
let collect_and_export = |collect_observed: bool| {
let inner = &inner;
let exporter = &exporter;
async move {
let mut resource_metrics =
opentelemetry_sdk::metrics::data::ResourceMetrics {
resource: Default::default(),
scope_metrics: Default::default(),
};
if collect_observed {
let callbacks = {
let mut callbacks = OTEL_PRE_COLLECT_CALLBACKS.lock().unwrap();
std::mem::take(&mut *callbacks)
};
let mut futures = JoinSet::new();
for callback in callbacks {
let (tx, rx) = oneshot::channel();
if let Ok(()) = callback.send(tx) {
futures.spawn(rx);
}
}
while futures.join_next().await.is_some() {}
}
inner.collect(&mut resource_metrics)?;
if resource_metrics.scope_metrics.is_empty() {
return Ok(());
}
exporter.export(&mut resource_metrics).await?;
Ok(())
}
};
let mut ticker = tokio::time::interval(interval);
ticker.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
ticker.tick().await;
loop {
let message = tokio::select! {
_ = ticker.tick() => DenoPeriodicReaderMessage::Export,
message = rx.recv() => if let Some(message) = message {
message
} else {
break;
},
};
match message {
DenoPeriodicReaderMessage::Register(new_pipeline) => {
inner.register_pipeline(new_pipeline);
}
DenoPeriodicReaderMessage::Export => {
otel_debug!(
name: "DenoPeriodicReader.ExportTriggered",
message = "Export message received.",
);
if let Err(err) = collect_and_export(true).await {
otel_error!(
name: "DenoPeriodicReader.ExportFailed",
message = "Failed to export metrics",
reason = format!("{}", err));
}
}
DenoPeriodicReaderMessage::ForceFlush(sender) => {
otel_debug!(
name: "DenoPeriodicReader.ForceFlushCalled",
message = "Flush message received.",
);
let res = collect_and_export(false).await;
if let Err(send_error) = sender.send(res) {
otel_debug!(
name: "DenoPeriodicReader.Flush.SendResultError",
message = "Failed to send flush result.",
reason = format!("{:?}", send_error),
);
}
}
DenoPeriodicReaderMessage::Shutdown(sender) => {
otel_debug!(
name: "DenoPeriodicReader.ShutdownCalled",
message = "Shutdown message received",
);
let res = collect_and_export(false).await;
let _ = exporter.shutdown();
if let Err(send_error) = sender.send(res) {
otel_debug!(
name: "DenoPeriodicReader.Shutdown.SendResultError",
message = "Failed to send shutdown result",
reason = format!("{:?}", send_error),
);
}
break;
}
}
}
};
(*OTEL_SHARED_RUNTIME_SPAWN_TASK_TX)
.unbounded_send(worker.boxed())
.expect("failed to send task to shared OpenTelemetry runtime");
DenoPeriodicReader { tx, temporality }
}
}
mod hyper_client {
use std::fmt::Debug;
use std::pin::Pin;
use std::task::Poll;
use deno_net::tunnel::TunnelConnection;
use deno_net::tunnel::TunnelStream;
use deno_net::tunnel::get_tunnel;
use deno_tls::SocketUse;
use deno_tls::TlsKey;
use deno_tls::TlsKeys;
use deno_tls::create_client_config;
use deno_tls::load_certs;
use deno_tls::load_private_keys;
use http_body_util::BodyExt;
use http_body_util::Full;
use hyper::Uri;
use hyper_rustls::HttpsConnector;
use hyper_rustls::MaybeHttpsStream;
use hyper_util::client::legacy::Client;
use hyper_util::client::legacy::connect::Connected;
use hyper_util::client::legacy::connect::HttpConnector;
use hyper_util::rt::TokioIo;
use opentelemetry_http::Bytes;
use opentelemetry_http::HttpError;
use opentelemetry_http::Request;
use opentelemetry_http::Response;
use opentelemetry_http::ResponseExt;
use tokio::net::TcpStream;
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
use tokio_vsock::VsockAddr;
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
use tokio_vsock::VsockStream;
use super::OtelSharedRuntime;
#[derive(Debug, thiserror::Error)]
enum Error {
#[error(transparent)]
StdIo(#[from] std::io::Error),
#[error(transparent)]
Box(#[from] Box<dyn std::error::Error + Send + Sync>),
#[error(transparent)]
Tunnel(#[from] deno_net::tunnel::Error),
}
#[derive(Debug, Clone)]
enum Connector {
Http(HttpsConnector<HttpConnector>),
Tunnel(TunnelConnection),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Vsock(VsockAddr),
}
#[allow(clippy::large_enum_variant)]
#[pin_project::pin_project(project = IOProj)]
enum IO {
Tls(#[pin] TokioIo<MaybeHttpsStream<TokioIo<TcpStream>>>),
Tunnel(#[pin] TunnelStream),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Vsock(#[pin] VsockStream),
}
impl tokio::io::AsyncRead for IO {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
match self.project() {
IOProj::Tls(stream) => stream.poll_read(cx, buf),
IOProj::Tunnel(stream) => stream.poll_read(cx, buf),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
IOProj::Vsock(stream) => stream.poll_read(cx, buf),
}
}
}
impl tokio::io::AsyncWrite for IO {
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
match self.project() {
IOProj::Tls(stream) => stream.poll_write(cx, buf),
IOProj::Tunnel(stream) => stream.poll_write(cx, buf),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
IOProj::Vsock(stream) => stream.poll_write(cx, buf),
}
}
fn poll_flush(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match self.project() {
IOProj::Tls(stream) => stream.poll_flush(cx),
IOProj::Tunnel(stream) => stream.poll_flush(cx),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
IOProj::Vsock(stream) => stream.poll_flush(cx),
}
}
fn poll_shutdown(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match self.project() {
IOProj::Tls(stream) => stream.poll_shutdown(cx),
IOProj::Tunnel(stream) => stream.poll_shutdown(cx),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
IOProj::Vsock(stream) => stream.poll_shutdown(cx),
}
}
fn is_write_vectored(&self) -> bool {
match self {
IO::Tls(stream) => stream.is_write_vectored(),
IO::Tunnel(stream) => stream.is_write_vectored(),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
IO::Vsock(stream) => stream.is_write_vectored(),
}
}
fn poll_write_vectored(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
match self.project() {
IOProj::Tls(stream) => stream.poll_write_vectored(cx, bufs),
IOProj::Tunnel(stream) => stream.poll_write_vectored(cx, bufs),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
IOProj::Vsock(stream) => stream.poll_write_vectored(cx, bufs),
}
}
}
impl hyper_util::client::legacy::connect::Connection for IO {
fn connected(&self) -> Connected {
match self {
Self::Tls(stream) => stream.connected(),
Self::Tunnel(_) => Connected::new().proxy(true),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Self::Vsock(_) => Connected::new().proxy(true),
}
}
}
impl tower_service::Service<Uri> for Connector {
type Response = TokioIo<IO>;
type Error = Error;
type Future = Pin<
Box<
dyn std::future::Future<Output = Result<Self::Response, Self::Error>>
+ Send,
>,
>;
fn poll_ready(
&mut self,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), Self::Error>> {
match self {
Self::Http(c) => c.poll_ready(cx).map_err(Into::into),
Self::Tunnel(_) => Poll::Ready(Ok(())),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Self::Vsock(_) => Poll::Ready(Ok(())),
}
}
fn call(&mut self, dst: Uri) -> Self::Future {
let this = self.clone();
Box::pin(async move {
match this {
Self::Http(mut connector) => {
let stream = connector.call(dst).await?;
Ok(TokioIo::new(IO::Tls(TokioIo::new(stream))))
}
Self::Tunnel(listener) => {
let stream = listener.create_agent_stream().await?;
Ok(TokioIo::new(IO::Tunnel(stream)))
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Self::Vsock(addr) => {
let stream = VsockStream::connect(addr).await?;
Ok(TokioIo::new(IO::Vsock(stream)))
}
}
})
}
}
#[derive(Debug, Clone)]
pub struct HyperClient {
inner: Client<Connector, Full<Bytes>>,
}
impl HyperClient {
pub fn new() -> deno_core::anyhow::Result<Self> {
let connector = if let Some(tunnel) = get_tunnel() {
Connector::Tunnel(tunnel.clone())
} else if let Ok(addr) = std::env::var("OTEL_DENO_VSOCK") {
#[cfg(not(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
)))]
{
let _ = addr;
deno_core::anyhow::bail!("vsock is not supported on this platform")
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
{
let Some((cid, port)) = addr.split_once(':') else {
deno_core::anyhow::bail!("invalid vsock addr");
};
let cid = if cid == "-1" { u32::MAX } else { cid.parse()? };
let port = port.parse()?;
let addr = VsockAddr::new(cid, port);
Connector::Vsock(addr)
}
} else {
let ca_certs = match std::env::var("OTEL_EXPORTER_OTLP_CERTIFICATE") {
Ok(path) => vec![std::fs::read(path)?],
_ => vec![],
};
let keys = match (
std::env::var("OTEL_EXPORTER_OTLP_CLIENT_KEY"),
std::env::var("OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE"),
) {
(Ok(key_path), Ok(cert_path)) => {
let key = std::fs::read(key_path)?;
let cert = std::fs::read(cert_path)?;
let certs = load_certs(&mut std::io::Cursor::new(cert))?;
let key = load_private_keys(&key)?.into_iter().next().unwrap();
TlsKeys::Static(TlsKey(certs, key))
}
_ => TlsKeys::Null,
};
let tls_config =
create_client_config(deno_tls::TlsClientConfigOptions {
root_cert_store: None,
ca_certs,
unsafely_ignore_certificate_errors: None,
unsafely_disable_hostname_verification: false,
cert_chain_and_key: keys,
socket_use: SocketUse::Http,
})?;
let mut http_connector = HttpConnector::new();
http_connector.enforce_http(false);
let connector = HttpsConnector::from((http_connector, tls_config));
Connector::Http(connector)
};
Ok(Self {
inner: Client::builder(OtelSharedRuntime).build(connector),
})
}
}
#[async_trait::async_trait]
impl opentelemetry_http::HttpClient for HyperClient {
async fn send(
&self,
request: Request<Vec<u8>>,
) -> Result<Response<Bytes>, HttpError> {
let (parts, body) = request.into_parts();
let request = Request::from_parts(parts, Full::from(body));
let response = self.inner.request(request).await?;
let (parts, body) = response.into_parts();
let body = body.collect().await?.to_bytes();
let response = Response::from_parts(parts, body);
Ok(response.error_for_status()?)
}
}
}
#[derive(Debug)]
pub struct OtelGlobals {
pub span_processor: BatchSpanProcessor<OtelSharedRuntime>,
pub log_processor: BatchLogProcessor<OtelSharedRuntime>,
pub id_generator: DenoIdGenerator,
pub meter_provider: SdkMeterProvider,
pub builtin_instrumentation_scope: InstrumentationScope,
pub config: OtelConfig,
}
impl OtelGlobals {
pub fn has_tracing(&self) -> bool {
self.config.tracing_enabled
}
pub fn has_metrics(&self) -> bool {
self.config.metrics_enabled
}
}
pub static OTEL_GLOBALS: OnceCell<OtelGlobals> = OnceCell::new();
pub fn init(
rt_config: OtelRuntimeConfig,
config: OtelConfig,
) -> deno_core::anyhow::Result<()> {
if !config.metrics_enabled
&& !config.tracing_enabled
&& config.console == OtelConsoleConfig::Ignore
{
return Ok(());
}
// Parse the `OTEL_EXPORTER_OTLP_PROTOCOL` variable. The opentelemetry_*
// crates don't do this automatically.
// TODO(piscisaureus): enable GRPC support.
let protocol = match env::var("OTEL_EXPORTER_OTLP_PROTOCOL").as_deref() {
Ok("http/protobuf") => Protocol::HttpBinary,
Ok("http/json") => Protocol::HttpJson,
Ok("") | Err(env::VarError::NotPresent) => Protocol::HttpBinary,
Ok(protocol) => {
return Err(deno_core::anyhow::anyhow!(
"Env var OTEL_EXPORTER_OTLP_PROTOCOL specifies an unsupported protocol: {}",
protocol
));
}
Err(err) => {
return Err(deno_core::anyhow::anyhow!(
"Failed to read env var OTEL_EXPORTER_OTLP_PROTOCOL: {}",
err
));
}
};
// Define the resource attributes that will be attached to all log records.
// These attributes are sourced as follows (in order of precedence):
// * The `service.name` attribute from the `OTEL_SERVICE_NAME` env var.
// * Additional attributes from the `OTEL_RESOURCE_ATTRIBUTES` env var.
// * Default attribute values defined here.
// TODO(piscisaureus): add more default attributes (e.g. script path).
let mut resource = Resource::default();
// Add the runtime name and version to the resource attributes. Also override
// the `telemetry.sdk` attributes to include the Deno runtime.
resource = resource.merge(&Resource::new(vec![
KeyValue::new(PROCESS_RUNTIME_NAME, rt_config.runtime_name),
KeyValue::new(PROCESS_RUNTIME_VERSION, rt_config.runtime_version.clone()),
KeyValue::new(
TELEMETRY_SDK_LANGUAGE,
format!(
"deno-{}",
resource.get(Key::new(TELEMETRY_SDK_LANGUAGE)).unwrap()
),
),
KeyValue::new(
TELEMETRY_SDK_NAME,
format!(
"deno-{}",
resource.get(Key::new(TELEMETRY_SDK_NAME)).unwrap()
),
),
KeyValue::new(
TELEMETRY_SDK_VERSION,
format!(
"{}-{}",
rt_config.runtime_version,
resource.get(Key::new(TELEMETRY_SDK_VERSION)).unwrap()
),
),
]));
// The OTLP endpoint is automatically picked up from the
// `OTEL_EXPORTER_OTLP_ENDPOINT` environment variable. Additional headers can
// be specified using `OTEL_EXPORTER_OTLP_HEADERS`.
let client = hyper_client::HyperClient::new()?;
let span_exporter = HttpExporterBuilder::default()
.with_http_client(client.clone())
.with_protocol(protocol)
.build_span_exporter()?;
let mut span_processor =
BatchSpanProcessor::builder(span_exporter, OtelSharedRuntime).build();
span_processor.set_resource(&resource);
let temporality_preference =
env::var("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE")
.ok()
.map(|s| s.to_lowercase());
let temporality = match temporality_preference.as_deref() {
None | Some("cumulative") => Temporality::Cumulative,
Some("delta") => Temporality::Delta,
Some("lowmemory") => Temporality::LowMemory,
Some(other) => {
return Err(deno_core::anyhow::anyhow!(
"Invalid value for OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE: {}",
other
));
}
};
let metric_exporter = HttpExporterBuilder::default()
.with_http_client(client.clone())
.with_protocol(protocol)
.build_metrics_exporter(temporality)?;
let metric_reader = DenoPeriodicReader::new(metric_exporter);
let meter_provider = SdkMeterProvider::builder()
.with_reader(metric_reader)
.with_resource(resource.clone())
.build();
let log_exporter = HttpExporterBuilder::default()
.with_http_client(client)
.with_protocol(protocol)
.build_log_exporter()?;
let log_processor =
BatchLogProcessor::builder(log_exporter, OtelSharedRuntime).build();
log_processor.set_resource(&resource);
let builtin_instrumentation_scope =
opentelemetry::InstrumentationScope::builder("deno")
.with_version(rt_config.runtime_version.clone())
.build();
let id_generator = if let Some(prefix) = config.deterministic_prefix {
DenoIdGenerator::deterministic(prefix)
} else {
DenoIdGenerator::random()
};
OTEL_GLOBALS
.set(OtelGlobals {
log_processor,
span_processor,
id_generator,
meter_provider,
builtin_instrumentation_scope,
config,
})
.map_err(|_| deno_core::anyhow::anyhow!("failed to set otel globals"))?;
deno_signals::before_exit(before_exit);
deno_net::tunnel::disable_before_exit();
Ok(())
}
fn before_exit() {
log::trace!("deno_telemetry::before_exit");
let Some(OtelGlobals {
span_processor: spans,
log_processor: logs,
meter_provider,
..
}) = OTEL_GLOBALS.get()
else {
return;
};
let r = spans.shutdown();
log::trace!("spans={:?}", r);
let r = logs.shutdown();
log::trace!("logs={:?}", r);
let r = meter_provider.shutdown();
log::trace!("meters={:?}", r);
deno_net::tunnel::before_exit();
}
pub fn handle_log(record: &log::Record) {
use log::Level;
let Some(OtelGlobals {
log_processor: logs,
builtin_instrumentation_scope,
..
}) = OTEL_GLOBALS.get()
else {
return;
};
let mut log_record = LogRecord::default();
let now = SystemTime::now();
log_record.set_timestamp(now);
log_record.set_observed_timestamp(now);
log_record.set_severity_number(match record.level() {
Level::Error => Severity::Error,
Level::Warn => Severity::Warn,
Level::Info => Severity::Info,
Level::Debug => Severity::Debug,
Level::Trace => Severity::Trace,
});
log_record.set_severity_text(record.level().as_str());
log_record.set_body(record.args().to_string().into());
log_record.set_target(record.metadata().target().to_string());
struct Visitor<'s>(&'s mut LogRecord);
impl<'kvs> log::kv::VisitSource<'kvs> for Visitor<'_> {
fn visit_pair(
&mut self,
key: log::kv::Key<'kvs>,
value: log::kv::Value<'kvs>,
) -> Result<(), log::kv::Error> {
#[allow(clippy::manual_map)]
let value = if let Some(v) = value.to_bool() {
Some(AnyValue::Boolean(v))
} else if let Some(v) = value.to_borrowed_str() {
Some(AnyValue::String(v.to_owned().into()))
} else if let Some(v) = value.to_f64() {
Some(AnyValue::Double(v))
} else if let Some(v) = value.to_i64() {
Some(AnyValue::Int(v))
} else {
None
};
if let Some(value) = value {
let key = Key::from(key.as_str().to_owned());
self.0.add_attribute(key, value);
}
Ok(())
}
}
let _ = record.key_values().visit(&mut Visitor(&mut log_record));
logs.emit(&mut log_record, builtin_instrumentation_scope);
}
#[derive(Debug)]
pub enum DenoIdGenerator {
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fetch/lib.rs | ext/fetch/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub mod dns;
mod fs_fetch_handler;
mod proxy;
#[cfg(test)]
mod tests;
use std::borrow::Cow;
use std::cell::RefCell;
use std::cmp::min;
use std::convert::From;
use std::future;
use std::future::Future;
use std::net::IpAddr;
use std::path::Path;
#[cfg(not(windows))]
use std::path::PathBuf;
use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use bytes::Bytes;
// Re-export data_url
pub use data_url;
use data_url::DataUrl;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::ByteString;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::CancelTryFuture;
use deno_core::Canceled;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::futures::FutureExt;
use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
use deno_core::futures::TryFutureExt;
use deno_core::futures::stream::Peekable;
use deno_core::op2;
use deno_core::url;
use deno_core::url::Url;
use deno_core::v8;
use deno_error::JsErrorBox;
pub use deno_fs::FsError;
use deno_path_util::PathToUrlError;
use deno_permissions::OpenAccessKind;
use deno_permissions::PermissionCheckError;
use deno_permissions::PermissionsContainer;
use deno_tls::Proxy;
use deno_tls::RootCertStoreProvider;
use deno_tls::SocketUse;
use deno_tls::TlsKey;
use deno_tls::TlsKeys;
use deno_tls::TlsKeysHolder;
use deno_tls::rustls::RootCertStore;
pub use fs_fetch_handler::FsFetchHandler;
use http::Extensions;
use http::HeaderMap;
use http::Method;
use http::Uri;
use http::header::ACCEPT;
use http::header::ACCEPT_ENCODING;
use http::header::AUTHORIZATION;
use http::header::CONTENT_LENGTH;
use http::header::HOST;
use http::header::HeaderName;
use http::header::HeaderValue;
use http::header::PROXY_AUTHORIZATION;
use http::header::RANGE;
use http::header::USER_AGENT;
use http_body_util::BodyExt;
use http_body_util::combinators::BoxBody;
use hyper::body::Frame;
use hyper_util::client::legacy::Builder as HyperClientBuilder;
use hyper_util::client::legacy::connect::Connection;
use hyper_util::client::legacy::connect::HttpConnector;
use hyper_util::client::legacy::connect::HttpInfo;
use hyper_util::rt::TokioExecutor;
use hyper_util::rt::TokioIo;
use hyper_util::rt::TokioTimer;
pub use proxy::basic_auth;
use serde::Deserialize;
use serde::Serialize;
use tower::BoxError;
use tower::Service;
use tower::ServiceExt;
use tower::retry;
use tower_http::decompression::Decompression;
#[derive(Clone)]
pub struct Options {
pub user_agent: String,
pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
pub proxy: Option<Proxy>,
/// A callback to customize HTTP client configuration.
///
/// The settings applied with this hook may be overridden by the options
/// provided through `Deno.createHttpClient()` API. For instance, if the hook
/// calls [`hyper_util::client::legacy::Builder::pool_max_idle_per_host`] with
/// a value of 99, and a user calls `Deno.createHttpClient({ poolMaxIdlePerHost: 42 })`,
/// the value that will take effect is 42.
///
/// For more info on what can be configured, see [`hyper_util::client::legacy::Builder`].
pub client_builder_hook: Option<fn(HyperClientBuilder) -> HyperClientBuilder>,
#[allow(clippy::type_complexity)]
pub request_builder_hook:
Option<fn(&mut http::Request<ReqBody>) -> Result<(), JsErrorBox>>,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub client_cert_chain_and_key: TlsKeys,
pub file_fetch_handler: Rc<dyn FetchHandler>,
pub resolver: dns::Resolver,
}
impl Options {
pub fn root_cert_store(&self) -> Result<Option<RootCertStore>, JsErrorBox> {
Ok(match &self.root_cert_store_provider {
Some(provider) => Some(provider.get_or_try_init()?.clone()),
None => None,
})
}
}
impl Default for Options {
fn default() -> Self {
Self {
user_agent: "".to_string(),
root_cert_store_provider: None,
proxy: None,
client_builder_hook: None,
request_builder_hook: None,
unsafely_ignore_certificate_errors: None,
client_cert_chain_and_key: TlsKeys::Null,
file_fetch_handler: Rc::new(DefaultFileFetchHandler),
resolver: dns::Resolver::default(),
}
}
}
deno_core::extension!(deno_fetch,
deps = [ deno_webidl, deno_web ],
ops = [
op_fetch,
op_fetch_send,
op_utf8_to_byte_string,
op_fetch_custom_client,
op_fetch_promise_is_settled,
],
esm = [
"20_headers.js",
"21_formdata.js",
"22_body.js",
"22_http_client.js",
"23_request.js",
"23_response.js",
"26_fetch.js",
"27_eventsource.js"
],
options = {
options: Options,
},
state = |state, options| {
state.put::<Options>(options.options);
},
);
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum FetchError {
#[class(inherit)]
#[error(transparent)]
Resource(#[from] deno_core::error::ResourceError),
#[class(inherit)]
#[error(transparent)]
Permission(#[from] PermissionCheckError),
#[class(type)]
#[error("NetworkError when attempting to fetch resource")]
NetworkError,
#[class(type)]
#[error("Fetching files only supports the GET method: received {0}")]
FsNotGet(Method),
#[class(inherit)]
#[error(transparent)]
PathToUrl(#[from] PathToUrlError),
#[class(type)]
#[error("Invalid URL {0}")]
InvalidUrl(Url),
#[class(type)]
#[error(transparent)]
InvalidHeaderName(#[from] http::header::InvalidHeaderName),
#[class(type)]
#[error(transparent)]
InvalidHeaderValue(#[from] http::header::InvalidHeaderValue),
#[class(type)]
#[error("{0:?}")]
DataUrl(data_url::DataUrlError),
#[class(type)]
#[error("{0:?}")]
Base64(data_url::forgiving_base64::InvalidBase64),
#[class(type)]
#[error("Blob for the given URL not found.")]
BlobNotFound,
#[class(type)]
#[error("Url scheme '{0}' not supported")]
SchemeNotSupported(String),
#[class(type)]
#[error("Request was cancelled")]
RequestCanceled,
#[class(generic)]
#[error(transparent)]
Http(#[from] http::Error),
#[class(inherit)]
#[error(transparent)]
ClientCreate(#[from] HttpClientCreateError),
#[class(inherit)]
#[error(transparent)]
Url(#[from] url::ParseError),
#[class(type)]
#[error(transparent)]
Method(#[from] http::method::InvalidMethod),
#[class(inherit)]
#[error(transparent)]
ClientSend(#[from] ClientSendError),
#[class(inherit)]
#[error(transparent)]
RequestBuilderHook(JsErrorBox),
#[class(inherit)]
#[error(transparent)]
Io(#[from] std::io::Error),
#[class(generic)]
#[error(transparent)]
Dns(hickory_resolver::ResolveError),
#[class(generic)]
#[error(transparent)]
PermissionCheck(PermissionCheckError),
}
impl From<deno_fs::FsError> for FetchError {
fn from(value: deno_fs::FsError) -> Self {
match value {
deno_fs::FsError::Io(_)
| deno_fs::FsError::FileBusy
| deno_fs::FsError::NotSupported => FetchError::NetworkError,
deno_fs::FsError::PermissionCheck(err) => {
FetchError::PermissionCheck(err)
}
}
}
}
pub type CancelableResponseFuture =
Pin<Box<dyn Future<Output = CancelableResponseResult>>>;
pub trait FetchHandler: dyn_clone::DynClone {
// Return the result of the fetch request consisting of a tuple of the
// cancelable response result, the optional fetch body resource and the
// optional cancel handle.
fn fetch_file(
&self,
state: &mut OpState,
url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>);
}
dyn_clone::clone_trait_object!(FetchHandler);
/// A default implementation which will error for every request.
#[derive(Clone)]
pub struct DefaultFileFetchHandler;
impl FetchHandler for DefaultFileFetchHandler {
fn fetch_file(
&self,
_state: &mut OpState,
_url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) {
let fut = async move { Ok(Err(FetchError::NetworkError)) };
(Box::pin(fut), None)
}
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FetchReturn {
pub request_rid: ResourceId,
pub cancel_handle_rid: Option<ResourceId>,
}
pub fn get_or_create_client_from_state(
state: &mut OpState,
) -> Result<Client, HttpClientCreateError> {
if let Some(client) = state.try_borrow::<Client>() {
Ok(client.clone())
} else {
let options = state.borrow::<Options>();
let client = create_client_from_options(options)?;
state.put::<Client>(client.clone());
Ok(client)
}
}
pub fn create_client_from_options(
options: &Options,
) -> Result<Client, HttpClientCreateError> {
create_http_client(
&options.user_agent,
CreateHttpClientOptions {
root_cert_store: options
.root_cert_store()
.map_err(HttpClientCreateError::RootCertStore)?,
ca_certs: vec![],
proxy: options.proxy.clone(),
dns_resolver: options.resolver.clone(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
client_cert_chain_and_key: options
.client_cert_chain_and_key
.clone()
.try_into()
.unwrap_or_default(),
pool_max_idle_per_host: None,
pool_idle_timeout: None,
http1: true,
http2: true,
local_address: None,
client_builder_hook: options.client_builder_hook,
},
)
}
#[allow(clippy::type_complexity)]
pub struct ResourceToBodyAdapter(
Rc<dyn Resource>,
Option<Pin<Box<dyn Future<Output = Result<BufView, JsErrorBox>>>>>,
);
impl ResourceToBodyAdapter {
pub fn new(resource: Rc<dyn Resource>) -> Self {
let future = resource.clone().read(64 * 1024);
Self(resource, Some(future))
}
}
// SAFETY: we only use this on a single-threaded executor
unsafe impl Send for ResourceToBodyAdapter {}
// SAFETY: we only use this on a single-threaded executor
unsafe impl Sync for ResourceToBodyAdapter {}
impl Stream for ResourceToBodyAdapter {
type Item = Result<Bytes, JsErrorBox>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match this.1.take() {
Some(mut fut) => match fut.poll_unpin(cx) {
Poll::Pending => {
this.1 = Some(fut);
Poll::Pending
}
Poll::Ready(res) => match res {
Ok(buf) if buf.is_empty() => Poll::Ready(None),
Ok(buf) => {
this.1 = Some(this.0.clone().read(64 * 1024));
Poll::Ready(Some(Ok(buf.to_vec().into())))
}
Err(err) => Poll::Ready(Some(Err(err))),
},
},
_ => Poll::Ready(None),
}
}
}
impl hyper::body::Body for ResourceToBodyAdapter {
type Data = Bytes;
type Error = JsErrorBox;
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
match self.poll_next(cx) {
Poll::Ready(Some(res)) => Poll::Ready(Some(res.map(Frame::data))),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
impl Drop for ResourceToBodyAdapter {
fn drop(&mut self) {
self.0.clone().close()
}
}
#[op2(stack_trace)]
#[serde]
#[allow(clippy::too_many_arguments)]
#[allow(clippy::large_enum_variant)]
#[allow(clippy::result_large_err)]
pub fn op_fetch(
state: &mut OpState,
#[serde] method: ByteString,
#[string] url: String,
#[serde] headers: Vec<(ByteString, ByteString)>,
#[smi] client_rid: Option<u32>,
has_body: bool,
#[buffer] data: Option<JsBuffer>,
#[smi] resource: Option<ResourceId>,
) -> Result<FetchReturn, FetchError> {
let (client, allow_host) = if let Some(rid) = client_rid {
let r = state.resource_table.get::<HttpClientResource>(rid)?;
(r.client.clone(), r.allow_host)
} else {
(get_or_create_client_from_state(state)?, false)
};
let method = Method::from_bytes(&method)?;
let mut url = Url::parse(&url)?;
// Check scheme before asking for net permission
let scheme = url.scheme();
let (request_rid, cancel_handle_rid) = match scheme {
"file" => {
if method != Method::GET {
return Err(FetchError::FsNotGet(method));
}
let Options {
file_fetch_handler, ..
} = state.borrow_mut::<Options>();
let file_fetch_handler = file_fetch_handler.clone();
let (future, maybe_cancel_handle) =
file_fetch_handler.fetch_file(state, &url);
let request_rid = state
.resource_table
.add(FetchRequestResource { future, url });
let maybe_cancel_handle_rid = maybe_cancel_handle
.map(|ch| state.resource_table.add(FetchCancelHandle(ch)));
(request_rid, maybe_cancel_handle_rid)
}
"http" | "https" => {
let permissions = state.borrow_mut::<PermissionsContainer>();
permissions.check_net_url(&url, "fetch()")?;
let maybe_authority = extract_authority(&mut url);
let uri = url
.as_str()
.parse::<Uri>()
.map_err(|_| FetchError::InvalidUrl(url.clone()))?;
let mut con_len = None;
let body = if has_body {
match (data, resource) {
(Some(data), _) => {
// If a body is passed, we use it, and don't return a body for streaming.
con_len = Some(data.len() as u64);
ReqBody::full(data.to_vec().into())
}
(_, Some(resource)) => {
let resource = state.resource_table.take_any(resource)?;
match resource.size_hint() {
(body_size, Some(n)) if body_size == n && body_size > 0 => {
con_len = Some(body_size);
}
_ => {}
}
ReqBody::streaming(ResourceToBodyAdapter::new(resource))
}
(None, None) => unreachable!(),
}
} else {
// POST and PUT requests should always have a 0 length content-length,
// if there is no body. https://fetch.spec.whatwg.org/#http-network-or-cache-fetch
if matches!(method, Method::POST | Method::PUT) {
con_len = Some(0);
}
ReqBody::empty()
};
let mut request = http::Request::new(body);
*request.method_mut() = method.clone();
*request.uri_mut() = uri.clone();
if let Some((username, password)) = maybe_authority {
request.headers_mut().insert(
AUTHORIZATION,
proxy::basic_auth(&username, password.as_deref()),
);
}
if let Some(len) = con_len {
request.headers_mut().insert(CONTENT_LENGTH, len.into());
}
for (key, value) in headers {
let name = HeaderName::from_bytes(&key)?;
let v = HeaderValue::from_bytes(&value)?;
if (name != HOST || allow_host) && name != CONTENT_LENGTH {
request.headers_mut().append(name, v);
}
}
if request.headers().contains_key(RANGE) {
// https://fetch.spec.whatwg.org/#http-network-or-cache-fetch step 18
// If httpRequestβs header list contains `Range`, then append (`Accept-Encoding`, `identity`)
request
.headers_mut()
.insert(ACCEPT_ENCODING, HeaderValue::from_static("identity"));
}
let options = state.borrow::<Options>();
if let Some(request_builder_hook) = options.request_builder_hook {
request_builder_hook(&mut request)
.map_err(FetchError::RequestBuilderHook)?;
}
let cancel_handle = CancelHandle::new_rc();
let cancel_handle_ = cancel_handle.clone();
let fut = async move {
client
.send(request)
.map_err(Into::into)
.or_cancel(cancel_handle_)
.await
};
let request_rid = state.resource_table.add(FetchRequestResource {
future: Box::pin(fut),
url,
});
let cancel_handle_rid =
state.resource_table.add(FetchCancelHandle(cancel_handle));
(request_rid, Some(cancel_handle_rid))
}
"data" => {
let data_url =
DataUrl::process(url.as_str()).map_err(FetchError::DataUrl)?;
let (body, _) = data_url.decode_to_vec().map_err(FetchError::Base64)?;
let body = http_body_util::Full::new(body.into())
.map_err(|never| match never {})
.boxed();
let response = http::Response::builder()
.status(http::StatusCode::OK)
.header(http::header::CONTENT_TYPE, data_url.mime_type().to_string())
.body(body)?;
let fut = async move { Ok(Ok(response)) };
let request_rid = state.resource_table.add(FetchRequestResource {
future: Box::pin(fut),
url,
});
(request_rid, None)
}
"blob" => {
// Blob URL resolution happens in the JS side of fetch. If we got here is
// because the URL isn't an object URL.
return Err(FetchError::BlobNotFound);
}
_ => return Err(FetchError::SchemeNotSupported(scheme.to_string())),
};
Ok(FetchReturn {
request_rid,
cancel_handle_rid,
})
}
#[derive(Default, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FetchResponse {
pub status: u16,
pub status_text: String,
pub headers: Vec<(ByteString, ByteString)>,
pub url: String,
pub response_rid: ResourceId,
pub content_length: Option<u64>,
/// This field is populated if some error occurred which needs to be
/// reconstructed in the JS side to set the error _cause_.
/// In the tuple, the first element is an error message and the second one is
/// an error cause.
pub error: Option<(String, String)>,
}
#[op2(async)]
#[serde]
pub async fn op_fetch_send(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> Result<FetchResponse, FetchError> {
let request = state
.borrow_mut()
.resource_table
.take::<FetchRequestResource>(rid)?;
let request = Rc::try_unwrap(request)
.ok()
.expect("multiple op_fetch_send ongoing");
let res = match request.future.await {
Ok(Ok(res)) => res,
Ok(Err(err)) => {
// We're going to try and rescue the error cause from a stream and return it from this fetch.
// If any error in the chain is a hyper body error, return that as a special result we can use to
// reconstruct an error chain (eg: `new TypeError(..., { cause: new Error(...) })`).
// TODO(mmastrac): it would be a lot easier if we just passed a v8::Global through here instead
if let FetchError::ClientSend(err_src) = &err
&& let Some(client_err) = std::error::Error::source(&err_src.source)
&& let Some(err_src) = client_err.downcast_ref::<hyper::Error>()
&& let Some(err_src) = std::error::Error::source(err_src)
{
return Ok(FetchResponse {
error: Some((err.to_string(), err_src.to_string())),
..Default::default()
});
}
return Err(err);
}
Err(_) => return Err(FetchError::RequestCanceled),
};
let status = res.status();
let url = request.url.into();
let mut res_headers = Vec::new();
for (key, val) in res.headers().iter() {
res_headers.push((key.as_str().into(), val.as_bytes().into()));
}
let content_length = hyper::body::Body::size_hint(res.body()).exact();
let response_rid = state
.borrow_mut()
.resource_table
.add(FetchResponseResource::new(res, content_length));
Ok(FetchResponse {
status: status.as_u16(),
status_text: status.canonical_reason().unwrap_or("").to_string(),
headers: res_headers,
url,
response_rid,
content_length,
error: None,
})
}
type CancelableResponseResult =
Result<Result<http::Response<ResBody>, FetchError>, Canceled>;
pub struct FetchRequestResource {
pub future: Pin<Box<dyn Future<Output = CancelableResponseResult>>>,
pub url: Url,
}
impl Resource for FetchRequestResource {
fn name(&self) -> Cow<'_, str> {
"fetchRequest".into()
}
}
pub struct FetchCancelHandle(pub Rc<CancelHandle>);
impl Resource for FetchCancelHandle {
fn name(&self) -> Cow<'_, str> {
"fetchCancelHandle".into()
}
fn close(self: Rc<Self>) {
self.0.cancel()
}
}
type BytesStream =
Pin<Box<dyn Stream<Item = Result<bytes::Bytes, std::io::Error>> + Unpin>>;
pub enum FetchResponseReader {
Start(http::Response<ResBody>),
BodyReader(Peekable<BytesStream>),
}
impl Default for FetchResponseReader {
fn default() -> Self {
let stream: BytesStream = Box::pin(deno_core::futures::stream::empty());
Self::BodyReader(stream.peekable())
}
}
#[derive(Debug)]
pub struct FetchResponseResource {
pub response_reader: AsyncRefCell<FetchResponseReader>,
pub cancel: CancelHandle,
pub size: Option<u64>,
}
impl FetchResponseResource {
pub fn new(response: http::Response<ResBody>, size: Option<u64>) -> Self {
Self {
response_reader: AsyncRefCell::new(FetchResponseReader::Start(response)),
cancel: CancelHandle::default(),
size,
}
}
pub async fn upgrade(self) -> Result<hyper::upgrade::Upgraded, hyper::Error> {
let reader = self.response_reader.into_inner();
match reader {
FetchResponseReader::Start(resp) => Ok(hyper::upgrade::on(resp).await?),
_ => unreachable!(),
}
}
}
impl Resource for FetchResponseResource {
fn name(&self) -> Cow<'_, str> {
"fetchResponse".into()
}
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
Box::pin(async move {
let mut reader =
RcRef::map(&self, |r| &r.response_reader).borrow_mut().await;
let body = loop {
match &mut *reader {
FetchResponseReader::BodyReader(reader) => break reader,
FetchResponseReader::Start(_) => {}
}
match std::mem::take(&mut *reader) {
FetchResponseReader::Start(resp) => {
let stream: BytesStream = Box::pin(
resp
.into_body()
.into_data_stream()
.map(|r| r.map_err(std::io::Error::other)),
);
*reader = FetchResponseReader::BodyReader(stream.peekable());
}
FetchResponseReader::BodyReader(_) => unreachable!(),
}
};
let fut = async move {
let mut reader = Pin::new(body);
loop {
match reader.as_mut().peek_mut().await {
Some(Ok(chunk)) if !chunk.is_empty() => {
let len = min(limit, chunk.len());
let chunk = chunk.split_to(len);
break Ok(chunk.into());
}
// This unwrap is safe because `peek_mut()` returned `Some`, and thus
// currently has a peeked value that can be synchronously returned
// from `next()`.
//
// The future returned from `next()` is always ready, so we can
// safely call `await` on it without creating a race condition.
Some(_) => match reader.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()),
Err(err) => break Err(JsErrorBox::type_error(err.to_string())),
},
None => break Ok(BufView::empty()),
}
}
};
let cancel_handle = RcRef::map(self, |r| &r.cancel);
fut
.try_or_cancel(cancel_handle)
.await
.map_err(JsErrorBox::from_err)
})
}
fn size_hint(&self) -> (u64, Option<u64>) {
(self.size.unwrap_or(0), self.size)
}
fn close(self: Rc<Self>) {
self.cancel.cancel()
}
}
pub struct HttpClientResource {
pub client: Client,
pub allow_host: bool,
}
impl Resource for HttpClientResource {
fn name(&self) -> Cow<'_, str> {
"httpClient".into()
}
}
impl HttpClientResource {
fn new(client: Client, allow_host: bool) -> Self {
Self { client, allow_host }
}
}
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct CreateHttpClientArgs {
ca_certs: Vec<String>,
proxy: Option<Proxy>,
pool_max_idle_per_host: Option<usize>,
pool_idle_timeout: Option<serde_json::Value>,
#[serde(default = "default_true")]
http1: bool,
#[serde(default = "default_true")]
http2: bool,
#[serde(default)]
allow_host: bool,
local_address: Option<String>,
}
fn default_true() -> bool {
true
}
#[op2(stack_trace)]
#[smi]
#[allow(clippy::result_large_err)]
pub fn op_fetch_custom_client(
state: &mut OpState,
#[serde] mut args: CreateHttpClientArgs,
#[cppgc] tls_keys: &TlsKeysHolder,
) -> Result<ResourceId, FetchError> {
if let Some(proxy) = &mut args.proxy {
let permissions = state.borrow_mut::<PermissionsContainer>();
match proxy {
Proxy::Http { url, .. } => {
let url = Url::parse(url)?;
permissions.check_net_url(&url, "Deno.createHttpClient()")?;
}
Proxy::Tcp { hostname, port } => {
permissions
.check_net(&(hostname, Some(*port)), "Deno.createHttpClient()")?;
}
Proxy::Unix {
path: original_path,
} => {
let path = Path::new(original_path);
let resolved_path = permissions
.check_open(
Cow::Borrowed(path),
OpenAccessKind::ReadWriteNoFollow,
Some("Deno.createHttpClient()"),
)?
.into_path();
if path != resolved_path {
*original_path = resolved_path.to_string_lossy().into_owned();
}
}
Proxy::Vsock { cid, port } => {
let permissions = state.borrow_mut::<PermissionsContainer>();
permissions.check_net_vsock(*cid, *port, "Deno.createHttpClient()")?;
}
}
}
let options = state.borrow::<Options>();
let ca_certs = args
.ca_certs
.into_iter()
.map(|cert| cert.into_bytes())
.collect::<Vec<_>>();
let client = create_http_client(
&options.user_agent,
CreateHttpClientOptions {
root_cert_store: options
.root_cert_store()
.map_err(HttpClientCreateError::RootCertStore)?,
ca_certs,
proxy: args.proxy,
dns_resolver: dns::Resolver::default(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
client_cert_chain_and_key: tls_keys.take().try_into().unwrap(),
pool_max_idle_per_host: args.pool_max_idle_per_host,
pool_idle_timeout: args.pool_idle_timeout.and_then(
|timeout| match timeout {
serde_json::Value::Bool(true) => None,
serde_json::Value::Bool(false) => Some(None),
serde_json::Value::Number(specify) => {
Some(Some(specify.as_u64().unwrap_or_default()))
}
_ => Some(None),
},
),
http1: args.http1,
http2: args.http2,
local_address: args.local_address,
client_builder_hook: options.client_builder_hook,
},
)?;
let rid = state
.resource_table
.add(HttpClientResource::new(client, args.allow_host));
Ok(rid)
}
#[derive(Debug, Clone)]
pub struct CreateHttpClientOptions {
pub root_cert_store: Option<RootCertStore>,
pub ca_certs: Vec<Vec<u8>>,
pub proxy: Option<Proxy>,
pub dns_resolver: dns::Resolver,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub client_cert_chain_and_key: Option<TlsKey>,
pub pool_max_idle_per_host: Option<usize>,
pub pool_idle_timeout: Option<Option<u64>>,
pub http1: bool,
pub http2: bool,
pub local_address: Option<String>,
pub client_builder_hook: Option<fn(HyperClientBuilder) -> HyperClientBuilder>,
}
impl Default for CreateHttpClientOptions {
fn default() -> Self {
CreateHttpClientOptions {
root_cert_store: None,
ca_certs: vec![],
proxy: None,
dns_resolver: dns::Resolver::default(),
unsafely_ignore_certificate_errors: None,
client_cert_chain_and_key: None,
pool_max_idle_per_host: None,
pool_idle_timeout: None,
http1: true,
http2: true,
local_address: None,
client_builder_hook: None,
}
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class(type)]
pub enum HttpClientCreateError {
#[error(transparent)]
Tls(deno_tls::TlsError),
#[error("Illegal characters in User-Agent: received {0}")]
InvalidUserAgent(String),
#[error("Invalid address: {0}")]
InvalidAddress(String),
#[error("invalid proxy url")]
InvalidProxyUrl,
#[error(
"Cannot create Http Client: either `http1` or `http2` needs to be set to true"
)]
HttpVersionSelectionInvalid,
#[class(inherit)]
#[error(transparent)]
RootCertStore(JsErrorBox),
#[error("Unix proxy is not supported on Windows")]
UnixProxyNotSupportedOnWindows,
#[error("Vsock proxy is not supported on this platform")]
VsockProxyNotSupported,
}
/// Create new instance of async Client. This client supports
/// proxies and doesn't follow redirects.
pub fn create_http_client(
user_agent: &str,
options: CreateHttpClientOptions,
) -> Result<Client, HttpClientCreateError> {
let mut tls_config =
deno_tls::create_client_config(deno_tls::TlsClientConfigOptions {
root_cert_store: options.root_cert_store,
ca_certs: options.ca_certs,
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors,
unsafely_disable_hostname_verification: false,
cert_chain_and_key: options.client_cert_chain_and_key.into(),
socket_use: deno_tls::SocketUse::Http,
})
.map_err(HttpClientCreateError::Tls)?;
// Proxy TLS should not send ALPN
tls_config.alpn_protocols.clear();
let proxy_tls_config = Arc::from(tls_config.clone());
let mut alpn_protocols = vec![];
if options.http2 {
alpn_protocols.push("h2".into());
}
if options.http1 {
alpn_protocols.push("http/1.1".into());
}
tls_config.alpn_protocols = alpn_protocols;
let tls_config = Arc::from(tls_config);
let mut http_connector =
HttpConnector::new_with_resolver(options.dns_resolver.clone());
http_connector.enforce_http(false);
if let Some(local_address) = options.local_address {
let local_addr = local_address
.parse::<IpAddr>()
.map_err(|_| HttpClientCreateError::InvalidAddress(local_address))?;
http_connector.set_local_address(Some(local_addr));
}
let user_agent = user_agent.parse::<HeaderValue>().map_err(|_| {
HttpClientCreateError::InvalidUserAgent(user_agent.to_string())
})?;
let mut builder = HyperClientBuilder::new(TokioExecutor::new());
builder.timer(TokioTimer::new());
builder.pool_timer(TokioTimer::new());
if let Some(client_builder_hook) = options.client_builder_hook {
builder = client_builder_hook(builder);
}
let mut proxies = proxy::from_env();
if let Some(proxy) = options.proxy {
let intercept = match proxy {
Proxy::Http { url, basic_auth } => {
let target = proxy::Target::parse(&url)
.ok_or_else(|| HttpClientCreateError::InvalidProxyUrl)?;
let mut intercept = proxy::Intercept::all(target);
if let Some(basic_auth) = &basic_auth {
intercept.set_auth(&basic_auth.username, &basic_auth.password);
}
intercept
}
Proxy::Tcp {
hostname: host,
port,
} => {
let target = proxy::Target::new_tcp(host, port);
proxy::Intercept::all(target)
}
#[cfg(not(windows))]
Proxy::Unix { path } => {
let target = proxy::Target::new_unix(PathBuf::from(path));
proxy::Intercept::all(target)
}
#[cfg(windows)]
Proxy::Unix { .. } => {
return Err(HttpClientCreateError::UnixProxyNotSupportedOnWindows);
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxy::Vsock { cid, port } => {
let target = proxy::Target::new_vsock(cid, port);
proxy::Intercept::all(target)
}
#[cfg(not(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
)))]
Proxy::Vsock { .. } => {
return Err(HttpClientCreateError::VsockProxyNotSupported);
}
};
proxies.prepend(intercept);
}
let proxies = Arc::new(proxies);
let connector = proxy::ProxyConnector {
http: http_connector,
proxies,
tls: tls_config,
tls_proxy: proxy_tls_config,
user_agent: Some(user_agent.clone()),
};
if let Some(pool_max_idle_per_host) = options.pool_max_idle_per_host {
builder.pool_max_idle_per_host(pool_max_idle_per_host);
}
if let Some(pool_idle_timeout) = options.pool_idle_timeout {
builder.pool_idle_timeout(
pool_idle_timeout.map(std::time::Duration::from_millis),
);
}
match (options.http1, options.http2) {
(true, false) => {} // noop, handled by ALPN above
(false, true) => {
builder.http2_only(true);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fetch/tests.rs | ext/fetch/tests.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;
use bytes::Bytes;
use fast_socks5::server::Config as Socks5Config;
use fast_socks5::server::Socks5Socket;
use http_body_util::BodyExt;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use super::CreateHttpClientOptions;
use super::create_http_client;
use crate::dns;
static EXAMPLE_CRT: &[u8] = include_bytes!("../tls/testdata/example1_cert.der");
static EXAMPLE_KEY: &[u8] =
include_bytes!("../tls/testdata/example1_prikey.der");
#[test]
fn test_userspace_resolver() {
let thread_counter = Arc::new(AtomicUsize::new(0));
let thread_counter_ref = thread_counter.clone();
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.on_thread_start(move || {
thread_counter_ref.fetch_add(1, SeqCst);
})
.build()
.unwrap();
rt.block_on(async move {
assert_eq!(thread_counter.load(SeqCst), 0);
let src_addr = create_https_server(true).await;
assert_eq!(src_addr.ip().to_string(), "127.0.0.1");
// use `localhost` to ensure dns step happens.
let addr = format!("localhost:{}", src_addr.port());
let hickory = hickory_resolver::Resolver::builder_tokio().unwrap().build();
assert_eq!(thread_counter.load(SeqCst), 0);
rust_test_client_with_resolver(
None,
addr.clone(),
"https",
http::Version::HTTP_2,
dns::Resolver::hickory_from_resolver(hickory),
)
.await;
assert_eq!(thread_counter.load(SeqCst), 0, "userspace resolver shouldn't spawn new threads.");
rust_test_client_with_resolver(
None,
addr.clone(),
"https",
http::Version::HTTP_2,
dns::Resolver::gai(),
)
.await;
assert_eq!(thread_counter.load(SeqCst), 1, "getaddrinfo is called inside spawn_blocking, so tokio spawn a new worker thread for it.");
});
}
#[tokio::test]
async fn test_https_proxy_http11() {
let src_addr = create_https_server(false).await;
let prx_addr = create_http_proxy(src_addr).await;
run_test_client(prx_addr, src_addr, "http", http::Version::HTTP_11).await;
}
#[tokio::test]
async fn test_https_proxy_h2() {
let src_addr = create_https_server(true).await;
let prx_addr = create_http_proxy(src_addr).await;
run_test_client(prx_addr, src_addr, "http", http::Version::HTTP_2).await;
}
#[tokio::test]
async fn test_https_proxy_https_h2() {
let src_addr = create_https_server(true).await;
let prx_addr = create_https_proxy(src_addr).await;
run_test_client(prx_addr, src_addr, "https", http::Version::HTTP_2).await;
}
#[tokio::test]
async fn test_socks_proxy_http11() {
let src_addr = create_https_server(false).await;
let prx_addr = create_socks_proxy(src_addr).await;
run_test_client(prx_addr, src_addr, "socks5", http::Version::HTTP_11).await;
}
#[tokio::test]
async fn test_socks_proxy_h2() {
let src_addr = create_https_server(true).await;
let prx_addr = create_socks_proxy(src_addr).await;
run_test_client(prx_addr, src_addr, "socks5", http::Version::HTTP_2).await;
}
async fn rust_test_client_with_resolver(
prx_addr: Option<SocketAddr>,
src_addr: String,
proto: &str,
ver: http::Version,
resolver: dns::Resolver,
) {
let client = create_http_client(
"fetch/test",
CreateHttpClientOptions {
root_cert_store: None,
ca_certs: vec![],
proxy: prx_addr.map(|p| deno_tls::Proxy::Http {
url: format!("{}://{}", proto, p),
basic_auth: None,
}),
unsafely_ignore_certificate_errors: Some(vec![]),
client_cert_chain_and_key: None,
pool_max_idle_per_host: None,
pool_idle_timeout: None,
dns_resolver: resolver,
http1: true,
http2: true,
local_address: None,
client_builder_hook: None,
},
)
.unwrap();
let req = http::Request::builder()
.uri(format!("https://{}/foo", src_addr))
.body(crate::ReqBody::empty())
.unwrap();
let resp = client.send(req).await.unwrap();
assert_eq!(resp.status(), http::StatusCode::OK);
assert_eq!(resp.version(), ver);
let hello = resp.collect().await.unwrap().to_bytes();
assert_eq!(hello, "hello from server");
}
async fn run_test_client(
prx_addr: SocketAddr,
src_addr: SocketAddr,
proto: &str,
ver: http::Version,
) {
rust_test_client_with_resolver(
Some(prx_addr),
src_addr.to_string(),
proto,
ver,
Default::default(),
)
.await
}
async fn create_https_server(allow_h2: bool) -> SocketAddr {
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let mut tls_config = deno_tls::rustls::server::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
vec![EXAMPLE_CRT.into()],
webpki::types::PrivateKeyDer::try_from(EXAMPLE_KEY).unwrap(),
)
.unwrap();
if allow_h2 {
tls_config.alpn_protocols.push("h2".into());
}
tls_config.alpn_protocols.push("http/1.1".into());
let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::from(tls_config));
let src_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let src_addr = src_tcp.local_addr().unwrap();
tokio::spawn(async move {
while let Ok((sock, _)) = src_tcp.accept().await {
let conn = tls_acceptor.accept(sock).await.unwrap();
if conn.get_ref().1.alpn_protocol() == Some(b"h2") {
let fut = hyper::server::conn::http2::Builder::new(
hyper_util::rt::TokioExecutor::new(),
)
.serve_connection(
hyper_util::rt::TokioIo::new(conn),
hyper::service::service_fn(|_req| async {
Ok::<_, std::convert::Infallible>(http::Response::new(
http_body_util::Full::<Bytes>::new("hello from server".into()),
))
}),
);
tokio::spawn(fut);
} else {
let fut = hyper::server::conn::http1::Builder::new().serve_connection(
hyper_util::rt::TokioIo::new(conn),
hyper::service::service_fn(|_req| async {
Ok::<_, std::convert::Infallible>(http::Response::new(
http_body_util::Full::<Bytes>::new("hello from server".into()),
))
}),
);
tokio::spawn(fut);
}
}
});
src_addr
}
async fn create_http_proxy(src_addr: SocketAddr) -> SocketAddr {
let prx_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let prx_addr = prx_tcp.local_addr().unwrap();
tokio::spawn(async move {
while let Ok((mut sock, _)) = prx_tcp.accept().await {
let fut = async move {
let mut buf = [0u8; 4096];
let _n = sock.read(&mut buf).await.unwrap();
assert_eq!(&buf[..7], b"CONNECT");
let mut dst_tcp =
tokio::net::TcpStream::connect(src_addr).await.unwrap();
sock.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap();
tokio::io::copy_bidirectional(&mut sock, &mut dst_tcp)
.await
.unwrap();
};
tokio::spawn(fut);
}
});
prx_addr
}
async fn create_https_proxy(src_addr: SocketAddr) -> SocketAddr {
let mut tls_config = deno_tls::rustls::server::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(
vec![EXAMPLE_CRT.into()],
webpki::types::PrivateKeyDer::try_from(EXAMPLE_KEY).unwrap(),
)
.unwrap();
// Set ALPN, to check our proxy connector. But we shouldn't receive anything.
tls_config.alpn_protocols.push("h2".into());
tls_config.alpn_protocols.push("http/1.1".into());
let tls_acceptor = tokio_rustls::TlsAcceptor::from(Arc::from(tls_config));
let prx_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let prx_addr = prx_tcp.local_addr().unwrap();
tokio::spawn(async move {
while let Ok((sock, _)) = prx_tcp.accept().await {
let mut sock = tls_acceptor.accept(sock).await.unwrap();
assert_eq!(sock.get_ref().1.alpn_protocol(), None);
let fut = async move {
let mut buf = [0u8; 4096];
let _n = sock.read(&mut buf).await.unwrap();
assert_eq!(&buf[..7], b"CONNECT");
let mut dst_tcp =
tokio::net::TcpStream::connect(src_addr).await.unwrap();
sock.write_all(b"HTTP/1.1 200 OK\r\n\r\n").await.unwrap();
tokio::io::copy_bidirectional(&mut sock, &mut dst_tcp)
.await
.unwrap();
};
tokio::spawn(fut);
}
});
prx_addr
}
async fn create_socks_proxy(src_addr: SocketAddr) -> SocketAddr {
let prx_tcp = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let prx_addr = prx_tcp.local_addr().unwrap();
tokio::spawn(async move {
while let Ok((sock, _)) = prx_tcp.accept().await {
let cfg: Socks5Config = Default::default();
let mut socks_conn = Socks5Socket::new(sock, cfg.into())
.upgrade_to_socks5()
.await
.unwrap();
let fut = async move {
let mut dst_tcp =
tokio::net::TcpStream::connect(src_addr).await.unwrap();
tokio::io::copy_bidirectional(&mut socks_conn, &mut dst_tcp)
.await
.unwrap();
};
tokio::spawn(fut);
}
});
prx_addr
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fetch/fs_fetch_handler.rs | ext/fetch/fs_fetch_handler.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::rc::Rc;
use deno_core::CancelFuture;
use deno_core::OpState;
use deno_core::futures::FutureExt;
use deno_core::futures::TryFutureExt;
use deno_core::futures::TryStreamExt;
use deno_core::url::Url;
use deno_error::JsErrorBox;
use deno_fs::OpenOptions;
use deno_fs::open_options_for_checked_path;
use deno_permissions::OpenAccessKind;
use deno_permissions::PermissionsContainer;
use http::StatusCode;
use http_body_util::BodyExt;
use tokio_util::io::ReaderStream;
use crate::CancelHandle;
use crate::CancelableResponseFuture;
use crate::FetchHandler;
/// An implementation which tries to read file URLs from the file system via
/// tokio::fs.
#[derive(Clone)]
pub struct FsFetchHandler;
impl FetchHandler for FsFetchHandler {
fn fetch_file(
&self,
state: &mut OpState,
url: &Url,
) -> (CancelableResponseFuture, Option<Rc<CancelHandle>>) {
let cancel_handle = CancelHandle::new_rc();
let path = match url.to_file_path() {
Ok(path) => path,
Err(_) => {
let fut = async move { Err::<_, _>(()) };
return (
fut
.map_err(move |_| super::FetchError::NetworkError)
.or_cancel(&cancel_handle)
.boxed_local(),
Some(cancel_handle),
);
}
};
let path_and_opts_result = {
state
.borrow::<PermissionsContainer>()
.check_open(Cow::Owned(path), OpenAccessKind::Read, Some("fetch()"))
.map(|path| {
(
open_options_for_checked_path(
OpenOptions {
read: true,
..Default::default()
},
&path,
),
path.into_owned(),
)
})
};
let response_fut = async move {
let (opts, path) = path_and_opts_result?;
let file = tokio::fs::OpenOptions::from(opts)
.open(path)
.await
.map_err(|_| super::FetchError::NetworkError)?;
let stream = ReaderStream::new(file)
.map_ok(hyper::body::Frame::data)
.map_err(JsErrorBox::from_err);
let body = http_body_util::StreamBody::new(stream).boxed();
let response = http::Response::builder()
.status(StatusCode::OK)
.body(body)
.map_err(move |_| super::FetchError::NetworkError)?;
Ok::<_, _>(response)
}
.or_cancel(&cancel_handle)
.boxed_local();
(response_fut, Some(cancel_handle))
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fetch/dns.rs | ext/fetch/dns.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Poll;
use std::task::{self};
use std::vec;
use hickory_resolver::name_server::TokioConnectionProvider;
use hyper_util::client::legacy::connect::dns::GaiResolver;
use hyper_util::client::legacy::connect::dns::Name;
use tokio::task::JoinHandle;
use tower::Service;
#[allow(clippy::large_enum_variant)]
#[derive(Clone, Debug)]
pub enum Resolver {
/// A resolver using blocking `getaddrinfo` calls in a threadpool.
Gai(GaiResolver),
/// hickory-resolver's userspace resolver.
Hickory(hickory_resolver::Resolver<TokioConnectionProvider>),
/// A custom resolver that implements `Resolve`.
Custom(Arc<dyn Resolve>),
}
/// Alias for the `Future` type returned by a custom DNS resolver.
// The future has to be `Send` as `tokio::spawn` is used to execute the future.
pub type Resolving =
Pin<Box<dyn Future<Output = Result<SocketAddrs, io::Error>> + Send>>;
/// A trait for customizing DNS resolution in ext/fetch.
// The resolver needs to be `Send` and `Sync` for two reasons. One is it is
// wrapped inside an `Arc` and will be cloned and moved to an async block to
// perfrom DNS resolution. That async block will be executed by `tokio::spawn`,
// so to make that async block `Send`, `Arc<dyn Resolve>` needs to be
// `Send`. The other is `Resolver` needs to be `Send` to make the wrapping
// `HttpConnector` `Send`.
pub trait Resolve: Send + Sync + std::fmt::Debug {
fn resolve(&self, name: Name) -> Resolving;
}
impl Default for Resolver {
fn default() -> Self {
Self::gai()
}
}
impl Resolver {
pub fn gai() -> Self {
Self::Gai(GaiResolver::new())
}
/// Create a [`AsyncResolver`] from system conf.
pub fn hickory() -> Result<Self, hickory_resolver::ResolveError> {
Ok(Self::Hickory(
hickory_resolver::Resolver::builder_tokio()?.build(),
))
}
pub fn hickory_from_resolver(
resolver: hickory_resolver::Resolver<TokioConnectionProvider>,
) -> Self {
Self::Hickory(resolver)
}
}
type SocketAddrs = vec::IntoIter<SocketAddr>;
pub struct ResolveFut {
inner: JoinHandle<Result<SocketAddrs, io::Error>>,
}
impl Future for ResolveFut {
type Output = Result<SocketAddrs, io::Error>;
fn poll(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
) -> Poll<Self::Output> {
Pin::new(&mut self.inner).poll(cx).map(|res| match res {
Ok(Ok(addrs)) => Ok(addrs),
Ok(Err(e)) => Err(e),
Err(join_err) => {
if join_err.is_cancelled() {
Err(io::Error::new(io::ErrorKind::Interrupted, join_err))
} else {
Err(io::Error::other(join_err))
}
}
})
}
}
impl Service<Name> for Resolver {
type Response = SocketAddrs;
type Error = io::Error;
type Future = ResolveFut;
fn poll_ready(
&mut self,
_cx: &mut task::Context<'_>,
) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, name: Name) -> Self::Future {
let task = match self {
Resolver::Gai(gai_resolver) => {
let mut resolver = gai_resolver.clone();
tokio::spawn(async move {
let result = resolver.call(name).await?;
let x: Vec<_> = result.into_iter().collect();
let iter: SocketAddrs = x.into_iter();
Ok(iter)
})
}
Resolver::Hickory(async_resolver) => {
let resolver = async_resolver.clone();
tokio::spawn(async move {
let result = resolver.lookup_ip(name.as_str()).await?;
let x: Vec<_> =
result.into_iter().map(|x| SocketAddr::new(x, 0)).collect();
let iter: SocketAddrs = x.into_iter();
Ok(iter)
})
}
Resolver::Custom(resolver) => {
let resolver = resolver.clone();
tokio::spawn(async move { resolver.resolve(name).await })
}
};
ResolveFut { inner: task }
}
}
#[cfg(test)]
mod tests {
use std::str::FromStr;
use super::*;
// A resolver that resolves any name into the same address.
#[derive(Debug)]
struct DebugResolver(SocketAddr);
impl Resolve for DebugResolver {
fn resolve(&self, _name: Name) -> Resolving {
let addr = self.0;
Box::pin(async move { Ok(vec![addr].into_iter()) })
}
}
#[tokio::test]
async fn custom_dns_resolver() {
let mut resolver = Resolver::Custom(Arc::new(DebugResolver(
"127.0.0.1:8080".parse().unwrap(),
)));
let mut addr = resolver
.call(Name::from_str("foo.com").unwrap())
.await
.unwrap();
let addr = addr.next().unwrap();
assert_eq!(addr, "127.0.0.1:8080".parse().unwrap());
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fetch/proxy.rs | ext/fetch/proxy.rs | // Copyright 2018-2025 the Deno authors. MIT license.
//! Parts of this module should be able to be replaced with other crates
//! eventually, once generic versions appear in hyper-util, et al.
use std::env;
use std::future::Future;
use std::net::IpAddr;
#[cfg(not(windows))]
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Context;
use std::task::Poll;
use deno_core::futures::TryFutureExt;
use deno_tls::rustls::ClientConfig as TlsConfig;
use http::Uri;
use http::header::HeaderValue;
use http::uri::Scheme;
use hyper_rustls::HttpsConnector;
use hyper_rustls::MaybeHttpsStream;
use hyper_util::client::legacy::connect::Connected;
use hyper_util::client::legacy::connect::Connection;
use hyper_util::rt::TokioIo;
use ipnet::IpNet;
use percent_encoding::percent_decode_str;
use tokio::net::TcpStream;
#[cfg(not(windows))]
use tokio::net::UnixStream;
use tokio_rustls::TlsConnector;
use tokio_rustls::client::TlsStream;
use tokio_socks::tcp::Socks5Stream;
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
use tokio_vsock::VsockStream;
use tower_service::Service;
#[derive(Debug, Clone)]
pub(crate) struct ProxyConnector<C> {
pub(crate) http: C,
pub(crate) proxies: Arc<Proxies>,
/// TLS config when destination is not a proxy
pub(crate) tls: Arc<TlsConfig>,
/// TLS config when destination is a proxy
/// Notably, does not include ALPN
pub(crate) tls_proxy: Arc<TlsConfig>,
pub(crate) user_agent: Option<HeaderValue>,
}
impl<C> ProxyConnector<C> {
pub(crate) fn h1_only(self) -> Option<ProxyConnector<C>>
where
C: Service<Uri>,
{
if !self.tls.alpn_protocols.is_empty() {
self.tls.alpn_protocols.iter().find(|p| *p == b"http/1.1")?;
}
let mut tls = (*self.tls).clone();
tls.alpn_protocols = vec![b"http/1.1".to_vec()];
Some(ProxyConnector {
http: self.http,
proxies: self.proxies,
tls: Arc::new(tls),
tls_proxy: self.tls_proxy,
user_agent: self.user_agent,
})
}
pub(crate) fn h2_only(self) -> Option<ProxyConnector<C>>
where
C: Service<Uri>,
{
if !self.tls.alpn_protocols.is_empty() {
self.tls.alpn_protocols.iter().find(|p| *p == b"h2")?;
}
let mut tls = (*self.tls).clone();
tls.alpn_protocols = vec![b"h2".to_vec()];
Some(ProxyConnector {
http: self.http,
proxies: self.proxies,
tls: Arc::new(tls),
tls_proxy: self.tls_proxy,
user_agent: self.user_agent,
})
}
}
#[derive(Debug)]
pub(crate) struct Proxies {
no: Option<NoProxy>,
intercepts: Vec<Intercept>,
}
#[derive(Clone)]
pub(crate) struct Intercept {
filter: Filter,
target: Target,
}
#[derive(Clone)]
pub(crate) enum Target {
Http {
dst: Uri,
auth: Option<HeaderValue>,
},
Https {
dst: Uri,
auth: Option<HeaderValue>,
},
Socks {
dst: Uri,
auth: Option<(String, String)>,
},
Tcp {
hostname: String,
port: u16,
},
#[cfg(not(windows))]
Unix {
path: PathBuf,
},
#[cfg(any(target_os = "android", target_os = "linux", target_os = "macos"))]
Vsock {
cid: u32,
port: u32,
},
}
#[derive(Debug, Clone, Copy)]
enum Filter {
Http,
Https,
All,
}
pub(crate) fn from_env() -> Proxies {
let mut intercepts = Vec::new();
match parse_env_var("ALL_PROXY", Filter::All) {
Some(proxy) => {
intercepts.push(proxy);
}
_ => {
if let Some(proxy) = parse_env_var("all_proxy", Filter::All) {
intercepts.push(proxy);
}
}
}
match parse_env_var("HTTPS_PROXY", Filter::Https) {
Some(proxy) => {
intercepts.push(proxy);
}
_ => {
if let Some(proxy) = parse_env_var("https_proxy", Filter::Https) {
intercepts.push(proxy);
}
}
}
// In a CGI context, headers become environment variables. So, "Proxy:" becomes HTTP_PROXY.
// To prevent an attacker from injecting a proxy, check if we are in CGI.
if env::var_os("REQUEST_METHOD").is_none() {
match parse_env_var("HTTP_PROXY", Filter::Http) {
Some(proxy) => {
intercepts.push(proxy);
}
_ => {
if let Some(proxy) = parse_env_var("http_proxy", Filter::Http) {
intercepts.push(proxy);
}
}
}
}
let no = NoProxy::from_env();
Proxies { intercepts, no }
}
pub fn basic_auth(user: &str, pass: Option<&str>) -> HeaderValue {
use std::io::Write;
use base64::prelude::BASE64_STANDARD;
use base64::write::EncoderWriter;
let mut buf = b"Basic ".to_vec();
{
let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD);
let _ = write!(encoder, "{user}:");
if let Some(password) = pass {
let _ = write!(encoder, "{password}");
}
}
let mut header =
HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue");
header.set_sensitive(true);
header
}
fn parse_env_var(name: &str, filter: Filter) -> Option<Intercept> {
let val = env::var(name).ok()?;
let target = Target::parse(&val)?;
Some(Intercept { filter, target })
}
impl Intercept {
pub(crate) fn all(target: Target) -> Self {
Intercept {
filter: Filter::All,
target,
}
}
pub(crate) fn set_auth(&mut self, user: &str, pass: &str) {
match self.target {
Target::Http { ref mut auth, .. } => {
*auth = Some(basic_auth(user, Some(pass)));
}
Target::Https { ref mut auth, .. } => {
*auth = Some(basic_auth(user, Some(pass)));
}
Target::Socks { ref mut auth, .. } => {
*auth = Some((user.into(), pass.into()));
}
Target::Tcp { .. } => {
// Auth not supported for Tcp sockets
}
#[cfg(not(windows))]
Target::Unix { .. } => {
// Auth not supported for Unix sockets
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Target::Vsock { .. } => {
// Auth not supported for Vsock sockets
}
}
}
}
impl std::fmt::Debug for Intercept {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Intercept")
.field("filter", &self.filter)
.finish()
}
}
impl Target {
pub(crate) fn parse(val: &str) -> Option<Self> {
// unix:<path> is valid RFC3986 but not as an http::Uri
#[cfg(not(windows))]
if let Some(encoded_path) = val.strip_prefix("unix:") {
use std::os::unix::ffi::OsStringExt;
let decoded = std::ffi::OsString::from_vec(
percent_decode_str(encoded_path).collect::<Vec<u8>>(),
);
return Some(Target::Unix {
path: decoded.into(),
});
}
// vsock:<cid>:<port> is valid RFC3986 but not as an http::Uri
#[cfg(any(target_os = "linux", target_os = "macos"))]
if let Some(cid_port) = val.strip_prefix("vsock:") {
let (left, right) = cid_port.split_once(":")?;
let cid = left.parse::<u32>().ok()?;
let port = right.parse::<u32>().ok()?;
return Some(Target::Vsock { cid, port });
}
let uri = val.parse::<Uri>().ok()?;
let mut builder = Uri::builder();
let mut is_socks = false;
let mut http_auth = None;
let mut socks_auth = None;
builder = builder.scheme(match uri.scheme() {
Some(s) => {
if s == &Scheme::HTTP || s == &Scheme::HTTPS {
s.clone()
} else if s.as_str() == "socks5" || s.as_str() == "socks5h" {
is_socks = true;
s.clone()
} else {
// can't use this proxy scheme
return None;
}
}
// if no scheme provided, assume they meant 'http'
None => Scheme::HTTP,
});
let authority = uri.authority()?;
if let Some((userinfo, host_port)) = authority.as_str().split_once('@') {
let (user, pass) = userinfo.split_once(':')?;
let user = percent_decode_str(user).decode_utf8_lossy();
let pass = percent_decode_str(pass).decode_utf8_lossy();
if is_socks {
socks_auth = Some((user.into(), pass.into()));
} else {
http_auth = Some(basic_auth(&user, Some(&pass)));
}
builder = builder.authority(host_port);
} else {
builder = builder.authority(authority.clone());
}
// removing any path, but we MUST specify one or the builder errors
builder = builder.path_and_query("/");
let dst = builder.build().ok()?;
let target = match dst.scheme().unwrap().as_str() {
"https" => Target::Https {
dst,
auth: http_auth,
},
"http" => Target::Http {
dst,
auth: http_auth,
},
"socks5" | "socks5h" => Target::Socks {
dst,
auth: socks_auth,
},
// shouldn't happen
_ => return None,
};
Some(target)
}
pub(crate) fn new_tcp(hostname: String, port: u16) -> Self {
Target::Tcp { hostname, port }
}
#[cfg(not(windows))]
pub(crate) fn new_unix(path: PathBuf) -> Self {
Target::Unix { path }
}
#[cfg(any(target_os = "android", target_os = "linux", target_os = "macos"))]
pub(crate) fn new_vsock(cid: u32, port: u32) -> Self {
Target::Vsock { cid, port }
}
}
#[derive(Debug)]
struct NoProxy {
domains: DomainMatcher,
ips: IpMatcher,
}
/// Represents a possible matching entry for an IP address
#[derive(Clone, Debug)]
enum Ip {
Address(IpAddr),
Network(IpNet),
}
/// A wrapper around a list of IP cidr blocks or addresses with a [IpMatcher::contains] method for
/// checking if an IP address is contained within the matcher
#[derive(Clone, Debug, Default)]
struct IpMatcher(Vec<Ip>);
/// A wrapper around a list of domains with a [DomainMatcher::contains] method for checking if a
/// domain is contained within the matcher
#[derive(Clone, Debug, Default)]
struct DomainMatcher(Vec<String>);
impl NoProxy {
/// Returns a new no-proxy configuration based on environment variables (or `None` if no variables are set)
/// see [self::NoProxy::from_string()] for the string format
fn from_env() -> Option<NoProxy> {
let raw = env::var("NO_PROXY")
.or_else(|_| env::var("no_proxy"))
.unwrap_or_default();
Self::from_string(&raw)
}
/// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables
/// are set)
/// The rules are as follows:
/// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked
/// * If neither environment variable is set, `None` is returned
/// * Entries are expected to be comma-separated (whitespace between entries is ignored)
/// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size,
/// for example "`192.168.1.0/24`").
/// * An entry "`*`" matches all hostnames (this is the only wildcard allowed)
/// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com`
/// and `.google.com` are equivalent) and would match both that domain AND all subdomains.
///
/// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match
/// (and therefore would bypass the proxy):
/// * `http://google.com/`
/// * `http://www.google.com/`
/// * `http://192.168.1.42/`
///
/// The URL `http://notgoogle.com/` would not match.
fn from_string(no_proxy_list: &str) -> Option<Self> {
if no_proxy_list.is_empty() {
return None;
}
let mut ips = Vec::new();
let mut domains = Vec::new();
let parts = no_proxy_list.split(',').map(str::trim);
for part in parts {
match part.parse::<IpNet>() {
// If we can parse an IP net or address, then use it, otherwise, assume it is a domain
Ok(ip) => ips.push(Ip::Network(ip)),
Err(_) => match part.parse::<IpAddr>() {
Ok(addr) => ips.push(Ip::Address(addr)),
Err(_) => domains.push(part.to_owned()),
},
}
}
Some(NoProxy {
ips: IpMatcher(ips),
domains: DomainMatcher(domains),
})
}
fn contains(&self, host: &str) -> bool {
// According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off
// the end in order to parse correctly
let host = if host.starts_with('[') {
let x: &[_] = &['[', ']'];
host.trim_matches(x)
} else {
host
};
match host.parse::<IpAddr>() {
// If we can parse an IP addr, then use it, otherwise, assume it is a domain
Ok(ip) => self.ips.contains(ip),
Err(_) => self.domains.contains(host),
}
}
}
impl IpMatcher {
fn contains(&self, addr: IpAddr) -> bool {
for ip in &self.0 {
match ip {
Ip::Address(address) => {
if &addr == address {
return true;
}
}
Ip::Network(net) => {
if net.contains(&addr) {
return true;
}
}
}
}
false
}
}
impl DomainMatcher {
// The following links may be useful to understand the origin of these rules:
// * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html
// * https://github.com/curl/curl/issues/1208
fn contains(&self, domain: &str) -> bool {
let domain_len = domain.len();
for d in &self.0 {
if d == domain || d.strip_prefix('.') == Some(domain) {
return true;
} else if domain.ends_with(d) {
if d.starts_with('.') {
// If the first character of d is a dot, that means the first character of domain
// must also be a dot, so we are looking at a subdomain of d and that matches
return true;
} else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.')
{
// Given that d is a prefix of domain, if the prior character in domain is a dot
// then that means we must be matching a subdomain of d, and that matches
return true;
}
} else if d == "*" {
return true;
}
}
false
}
}
impl<C> ProxyConnector<C> {
fn intercept(&self, dst: &Uri) -> Option<&Intercept> {
self.proxies.intercept(dst)
}
}
impl Proxies {
pub(crate) fn prepend(&mut self, intercept: Intercept) {
self.intercepts.insert(0, intercept);
}
pub(crate) fn http_forward_auth(&self, dst: &Uri) -> Option<&HeaderValue> {
let intercept = self.intercept(dst)?;
match intercept.target {
// Only if the proxy target is http
Target::Http { ref auth, .. } => auth.as_ref(),
_ => None,
}
}
fn intercept(&self, dst: &Uri) -> Option<&Intercept> {
if let Some(no_proxy) = self.no.as_ref()
&& no_proxy.contains(dst.host()?)
{
return None;
}
for intercept in &self.intercepts {
return match (
intercept.filter,
dst.scheme().map(Scheme::as_str).unwrap_or(""),
) {
(Filter::All, _) => Some(intercept),
(Filter::Https, "https") => Some(intercept),
(Filter::Http, "http") => Some(intercept),
_ => continue,
};
}
None
}
}
type BoxFuture<T> = Pin<Box<dyn Future<Output = T> + Send>>;
type BoxError = Box<dyn std::error::Error + Send + Sync>;
// These variatns are not to be inspected.
#[allow(clippy::large_enum_variant)]
pub enum Proxied<T> {
/// Not proxied
PassThrough(T),
/// Forwarded via TCP socket
Tcp(T),
/// Tunneled through HTTP CONNECT
HttpTunneled(Box<TokioIo<TlsStream<TokioIo<T>>>>),
/// Tunneled through SOCKS
Socks(TokioIo<TcpStream>),
/// Tunneled through SOCKS and TLS
SocksTls(TokioIo<TlsStream<TokioIo<TokioIo<TcpStream>>>>),
/// Forwarded via Unix socket
#[cfg(not(windows))]
Unix(TokioIo<UnixStream>),
/// Forwarded via Vsock socket
#[cfg(any(target_os = "android", target_os = "linux", target_os = "macos"))]
Vsock(TokioIo<VsockStream>),
}
impl<C> Service<Uri> for ProxyConnector<C>
where
C: Service<Uri> + Clone,
C::Response:
hyper::rt::Read + hyper::rt::Write + Connection + Unpin + Send + 'static,
C::Future: Send + 'static,
C::Error: Into<BoxError> + 'static,
{
type Response = Proxied<MaybeHttpsStream<C::Response>>;
type Error = BoxError;
type Future = BoxFuture<Result<Self::Response, Self::Error>>;
fn poll_ready(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
self.http.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, orig_dst: Uri) -> Self::Future {
if let Some(intercept) = self.intercept(&orig_dst).cloned() {
let is_https = orig_dst.scheme() == Some(&Scheme::HTTPS);
let user_agent = self.user_agent.clone();
return match intercept.target {
Target::Http {
dst: proxy_dst,
auth,
}
| Target::Https {
dst: proxy_dst,
auth,
} => {
let mut connector =
HttpsConnector::from((self.http.clone(), self.tls_proxy.clone()));
let connecting = connector.call(proxy_dst);
let tls = TlsConnector::from(self.tls.clone());
Box::pin(async move {
let mut io = connecting.await?;
if is_https {
tunnel(&mut io, &orig_dst, user_agent, auth).await?;
let tokio_io = TokioIo::new(io);
let io = tls
.connect(
TryFrom::try_from(orig_dst.host().unwrap().to_owned())?,
tokio_io,
)
.await?;
Ok(Proxied::HttpTunneled(Box::new(TokioIo::new(io))))
} else {
Ok(Proxied::Tcp(io))
}
})
}
Target::Socks {
dst: proxy_dst,
auth,
} => {
let tls = TlsConnector::from(self.tls.clone());
Box::pin(async move {
let socks_addr = (
proxy_dst.host().unwrap(),
proxy_dst.port().map(|p| p.as_u16()).unwrap_or(1080),
);
let host = orig_dst.host().ok_or("no host in url")?;
let port = match orig_dst.port() {
Some(p) => p.as_u16(),
None if is_https => 443,
_ => 80,
};
let io = if let Some((user, pass)) = auth {
Socks5Stream::connect_with_password(
socks_addr,
(host, port),
&user,
&pass,
)
.await?
} else {
Socks5Stream::connect(socks_addr, (host, port)).await?
};
let io = TokioIo::new(io.into_inner());
if is_https {
let tokio_io = TokioIo::new(io);
let io = tls
.connect(TryFrom::try_from(host.to_owned())?, tokio_io)
.await?;
Ok(Proxied::SocksTls(TokioIo::new(io)))
} else {
Ok(Proxied::Socks(io))
}
})
}
Target::Tcp {
hostname: host,
port,
} => {
let mut connector =
HttpsConnector::from((self.http.clone(), self.tls_proxy.clone()));
let Ok(uri) = format!("http://{}:{}", host, port).parse() else {
return Box::pin(async {
Err("failed to parse tcp proxy uri".into())
});
};
let connecting = connector.call(uri);
Box::pin(async move {
let io = connecting.await?;
Ok(Proxied::Tcp(io))
})
}
#[cfg(not(windows))]
Target::Unix { path } => {
let path = path.clone();
Box::pin(async move {
let io = UnixStream::connect(&path).await?;
Ok(Proxied::Unix(TokioIo::new(io)))
})
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Target::Vsock { cid, port } => Box::pin(async move {
let addr = tokio_vsock::VsockAddr::new(cid, port);
let io = VsockStream::connect(addr).await?;
Ok(Proxied::Vsock(TokioIo::new(io)))
}),
};
}
let mut connector =
HttpsConnector::from((self.http.clone(), self.tls.clone()));
Box::pin(
connector
.call(orig_dst)
.map_ok(Proxied::PassThrough)
.map_err(Into::into),
)
}
}
async fn tunnel<T>(
io: &mut T,
dst: &Uri,
user_agent: Option<HeaderValue>,
auth: Option<HeaderValue>,
) -> Result<(), BoxError>
where
T: hyper::rt::Read + hyper::rt::Write + Unpin,
{
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
let host = dst.host().expect("proxy dst has host");
let port = match dst.port() {
Some(p) => p.as_u16(),
None => match dst.scheme().map(Scheme::as_str).unwrap_or("") {
"https" => 443,
"http" => 80,
_ => return Err("proxy dst unexpected scheme".into()),
},
};
let mut buf = format!(
"\
CONNECT {host}:{port} HTTP/1.1\r\n\
Host: {host}:{port}\r\n\
"
)
.into_bytes();
// user-agent
if let Some(user_agent) = user_agent {
buf.extend_from_slice(b"User-Agent: ");
buf.extend_from_slice(user_agent.as_bytes());
buf.extend_from_slice(b"\r\n");
}
// proxy-authorization
if let Some(value) = auth {
buf.extend_from_slice(b"Proxy-Authorization: ");
buf.extend_from_slice(value.as_bytes());
buf.extend_from_slice(b"\r\n");
}
// headers end
buf.extend_from_slice(b"\r\n");
let mut tokio_conn = TokioIo::new(io);
tokio_conn.write_all(&buf).await?;
let mut buf = [0; 8192];
let mut pos = 0;
loop {
let n = tokio_conn.read(&mut buf[pos..]).await?;
if n == 0 {
return Err("unexpected eof while tunneling".into());
}
pos += n;
let recvd = &buf[..pos];
if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200")
{
if recvd.ends_with(b"\r\n\r\n") {
return Ok(());
}
if pos == buf.len() {
return Err("proxy headers too long for tunnel".into());
}
// else read more
} else if recvd.starts_with(b"HTTP/1.1 407") {
return Err("proxy authentication required".into());
} else {
return Err("unsuccessful tunnel".into());
}
}
}
impl<T> hyper::rt::Read for Proxied<T>
where
T: hyper::rt::Read + hyper::rt::Write + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::Tcp(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::Socks(ref mut p) => Pin::new(p).poll_read(cx, buf),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_read(cx, buf),
#[cfg(not(windows))]
Proxied::Unix(ref mut p) => Pin::new(p).poll_read(cx, buf),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(ref mut p) => Pin::new(p).poll_read(cx, buf),
}
}
}
impl<T> hyper::rt::Write for Proxied<T>
where
T: hyper::rt::Read + hyper::rt::Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::Tcp(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::Socks(ref mut p) => Pin::new(p).poll_write(cx, buf),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write(cx, buf),
#[cfg(not(windows))]
Proxied::Unix(ref mut p) => Pin::new(p).poll_write(cx, buf),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(ref mut p) => Pin::new(p).poll_write(cx, buf),
}
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::Tcp(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::Socks(ref mut p) => Pin::new(p).poll_flush(cx),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_flush(cx),
#[cfg(not(windows))]
Proxied::Unix(ref mut p) => Pin::new(p).poll_flush(cx),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(ref mut p) => Pin::new(p).poll_flush(cx),
}
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::Tcp(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::HttpTunneled(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::Socks(ref mut p) => Pin::new(p).poll_shutdown(cx),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_shutdown(cx),
#[cfg(not(windows))]
Proxied::Unix(ref mut p) => Pin::new(p).poll_shutdown(cx),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(ref mut p) => Pin::new(p).poll_shutdown(cx),
}
}
fn is_write_vectored(&self) -> bool {
match *self {
Proxied::PassThrough(ref p) => p.is_write_vectored(),
Proxied::Tcp(ref p) => p.is_write_vectored(),
Proxied::HttpTunneled(ref p) => p.is_write_vectored(),
Proxied::Socks(ref p) => p.is_write_vectored(),
Proxied::SocksTls(ref p) => p.is_write_vectored(),
#[cfg(not(windows))]
Proxied::Unix(ref p) => p.is_write_vectored(),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(ref p) => p.is_write_vectored(),
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
match *self {
Proxied::PassThrough(ref mut p) => {
Pin::new(p).poll_write_vectored(cx, bufs)
}
Proxied::Tcp(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
Proxied::HttpTunneled(ref mut p) => {
Pin::new(p).poll_write_vectored(cx, bufs)
}
Proxied::Socks(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
Proxied::SocksTls(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
#[cfg(not(windows))]
Proxied::Unix(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(ref mut p) => Pin::new(p).poll_write_vectored(cx, bufs),
}
}
}
impl<T> Connection for Proxied<T>
where
T: Connection,
{
fn connected(&self) -> Connected {
match self {
Proxied::PassThrough(p) => p.connected(),
Proxied::Tcp(p) => p.connected().proxy(true),
Proxied::HttpTunneled(p) => {
let tunneled_tls = p.inner().get_ref();
if tunneled_tls.1.alpn_protocol() == Some(b"h2") {
tunneled_tls.0.connected().negotiated_h2()
} else {
tunneled_tls.0.connected()
}
}
Proxied::Socks(p) => p.connected(),
Proxied::SocksTls(p) => {
let tunneled_tls = p.inner().get_ref();
if tunneled_tls.1.alpn_protocol() == Some(b"h2") {
tunneled_tls.0.connected().negotiated_h2()
} else {
tunneled_tls.0.connected()
}
}
#[cfg(not(windows))]
Proxied::Unix(_) => Connected::new().proxy(true),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Proxied::Vsock(_) => Connected::new().proxy(true),
}
}
}
#[test]
fn test_proxy_parse_from_env() {
fn parse(s: &str) -> Target {
Target::parse(s).unwrap()
}
// normal
match parse("http://127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
assert!(auth.is_none());
}
_ => panic!("bad target"),
}
// without scheme
match parse("127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
assert!(auth.is_none());
}
_ => panic!("bad target"),
}
// with userinfo
match parse("user:pass@127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
assert!(auth.is_some());
assert!(auth.unwrap().is_sensitive());
}
_ => panic!("bad target"),
}
// percent encoded user info
match parse("us%2Fer:p%2Fass@127.0.0.1:6666") {
Target::Http { dst, auth } => {
assert_eq!(dst, "http://127.0.0.1:6666");
let auth = auth.unwrap();
assert_eq!(auth.to_str().unwrap(), "Basic dXMvZXI6cC9hc3M=");
}
_ => panic!("bad target"),
}
// socks
match parse("socks5://user:pass@127.0.0.1:6666") {
Target::Socks { dst, auth } => {
assert_eq!(dst, "socks5://127.0.0.1:6666");
assert!(auth.is_some());
}
_ => panic!("bad target"),
}
// socks5h
match parse("socks5h://localhost:6666") {
Target::Socks { dst, auth } => {
assert_eq!(dst, "socks5h://localhost:6666");
assert!(auth.is_none());
}
_ => panic!("bad target"),
}
// unix
#[cfg(not(windows))]
match parse("unix:foo%20bar/baz") {
Target::Unix { path } => {
assert_eq!(path.to_str(), Some("foo bar/baz"));
}
_ => panic!("bad target"),
}
// vsock
#[cfg(any(target_os = "linux", target_os = "macos"))]
match parse("vsock:1234:5678") {
Target::Vsock { cid, port } => {
assert_eq!(cid, 1234);
assert_eq!(port, 5678);
}
_ => panic!("bad target"),
}
}
#[test]
fn test_domain_matcher() {
let domains = vec![".foo.bar".into(), "bar.foo".into()];
let matcher = DomainMatcher(domains);
// domains match with leading `.`
assert!(matcher.contains("foo.bar"));
// subdomains match with leading `.`
assert!(matcher.contains("www.foo.bar"));
// domains match with no leading `.`
assert!(matcher.contains("bar.foo"));
// subdomains match with no leading `.`
assert!(matcher.contains("www.bar.foo"));
// non-subdomain string prefixes don't match
assert!(!matcher.contains("notfoo.bar"));
assert!(!matcher.contains("notbar.foo"));
}
#[test]
fn test_no_proxy_wildcard() {
let no_proxy = NoProxy::from_string("*").unwrap();
assert!(no_proxy.contains("any.where"));
}
#[test]
fn test_no_proxy_ip_ranges() {
let no_proxy = NoProxy::from_string(
".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17",
)
.unwrap();
let should_not_match = [
// random url, not in no_proxy
"deno.com",
// make sure that random non-subdomain string prefixes don't match
"notfoo.bar",
// make sure that random non-subdomain string prefixes don't match
"notbar.baz",
// ipv4 address out of range
"10.43.1.1",
// ipv4 address out of range
"10.124.7.7",
// ipv6 address out of range
"[ffff:db8:a0b:12f0::1]",
// ipv6 address out of range
"[2005:db8:a0b:12f0::1]",
];
for host in &should_not_match {
assert!(!no_proxy.contains(host), "should not contain {:?}", host);
}
let should_match = [
// make sure subdomains (with leading .) match
"hello.foo.bar",
// make sure exact matches (without leading .) match (also makes sure spaces between entries work)
"bar.baz",
// make sure subdomains (without leading . in no_proxy) match
"foo.bar.baz",
// make sure subdomains (without leading . in no_proxy) match - this differs from cURL
"foo.bar",
// ipv4 address match within range
"10.42.1.100",
// ipv6 address exact match
"[::1]",
// ipv6 address match within range
"[2001:db8:a0b:12f0::1]",
// ipv4 address exact match
"10.124.7.8",
];
for host in &should_match {
assert!(no_proxy.contains(host), "should contain {:?}", host);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/webidl.rs | ext/webgpu/webidl.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
#[allow(clippy::disallowed_types)]
use std::collections::HashSet;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::v8;
use deno_core::webidl::ContextFn;
use deno_core::webidl::IntOptions;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_core::webidl::WebIdlErrorKind;
use deno_error::JsErrorBox;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUExtent3DDict {
#[options(enforce_range = true)]
width: u32,
#[webidl(default = 1)]
#[options(enforce_range = true)]
height: u32,
#[webidl(default = 1)]
#[options(enforce_range = true)]
depth_or_array_layers: u32,
}
pub(crate) enum GPUExtent3D {
Dict(GPUExtent3DDict),
Sequence((u32, u32, u32)),
}
impl<'a> WebIdlConverter<'a> for GPUExtent3D {
type Options = ();
fn convert<'b>(
scope: &mut v8::PinScope<'a, '_>,
value: v8::Local<'a, v8::Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
if value.is_null_or_undefined() {
return Ok(GPUExtent3D::Dict(GPUExtent3DDict::convert(
scope,
value,
prefix,
context.borrowed(),
options,
)?));
}
if let Ok(obj) = value.try_cast::<v8::Object>() {
let iter = v8::Symbol::get_iterator(scope);
if let Some(iter) = obj.get(scope, iter.into())
&& !iter.is_undefined()
{
let conv = <Vec<u32>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?;
if conv.is_empty() || conv.len() > 3 {
return Err(WebIdlError::other(
prefix,
context,
JsErrorBox::type_error(format!(
"A sequence of number used as a GPUExtent3D must have between 1 and 3 elements, received {} elements",
conv.len()
)),
));
}
let mut iter = conv.into_iter();
return Ok(GPUExtent3D::Sequence((
iter.next().unwrap(),
iter.next().unwrap_or(1),
iter.next().unwrap_or(1),
)));
}
return Ok(GPUExtent3D::Dict(GPUExtent3DDict::convert(
scope, value, prefix, context, options,
)?));
}
Err(WebIdlError::new(
prefix,
context,
WebIdlErrorKind::ConvertToConverterType(
"sequence<GPUIntegerCoordinate> or GPUExtent3DDict",
),
))
}
}
impl From<GPUExtent3D> for wgpu_types::Extent3d {
fn from(value: GPUExtent3D) -> Self {
match value {
GPUExtent3D::Dict(dict) => Self {
width: dict.width,
height: dict.height,
depth_or_array_layers: dict.depth_or_array_layers,
},
GPUExtent3D::Sequence((width, height, depth)) => Self {
width,
height,
depth_or_array_layers: depth,
},
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUOrigin3DDict {
#[webidl(default = 0)]
#[options(enforce_range = true)]
x: u32,
#[webidl(default = 0)]
#[options(enforce_range = true)]
y: u32,
#[webidl(default = 0)]
#[options(enforce_range = true)]
z: u32,
}
pub(crate) enum GPUOrigin3D {
Dict(GPUOrigin3DDict),
Sequence((u32, u32, u32)),
}
impl Default for GPUOrigin3D {
fn default() -> Self {
GPUOrigin3D::Sequence((0, 0, 0))
}
}
impl<'a> WebIdlConverter<'a> for GPUOrigin3D {
type Options = ();
fn convert<'b>(
scope: &mut v8::PinScope<'a, '_>,
value: v8::Local<'a, v8::Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
if value.is_null_or_undefined() {
return Ok(GPUOrigin3D::Dict(GPUOrigin3DDict::convert(
scope,
value,
prefix,
context.borrowed(),
options,
)?));
}
if let Ok(obj) = value.try_cast::<v8::Object>() {
let iter = v8::Symbol::get_iterator(scope);
if let Some(iter) = obj.get(scope, iter.into())
&& !iter.is_undefined()
{
let conv = <Vec<u32>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?;
if conv.len() > 3 {
return Err(WebIdlError::other(
prefix,
context,
JsErrorBox::type_error(format!(
"A sequence of number used as a GPUOrigin3D must have at most 3 elements, received {} elements",
conv.len()
)),
));
}
let mut iter = conv.into_iter();
return Ok(GPUOrigin3D::Sequence((
iter.next().unwrap_or(0),
iter.next().unwrap_or(0),
iter.next().unwrap_or(0),
)));
}
return Ok(GPUOrigin3D::Dict(GPUOrigin3DDict::convert(
scope, value, prefix, context, options,
)?));
}
Err(WebIdlError::new(
prefix,
context,
WebIdlErrorKind::ConvertToConverterType(
"sequence<GPUIntegerCoordinate> or GPUOrigin3DDict",
),
))
}
}
impl From<GPUOrigin3D> for wgpu_types::Origin3d {
fn from(value: GPUOrigin3D) -> Self {
match value {
GPUOrigin3D::Dict(dict) => Self {
x: dict.x,
y: dict.y,
z: dict.z,
},
GPUOrigin3D::Sequence((x, y, z)) => Self { x, y, z },
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUColorDict {
r: f64,
g: f64,
b: f64,
a: f64,
}
pub(crate) enum GPUColor {
Dict(GPUColorDict),
Sequence((f64, f64, f64, f64)),
}
impl<'a> WebIdlConverter<'a> for GPUColor {
type Options = ();
fn convert<'b>(
scope: &mut v8::PinScope<'a, '_>,
value: v8::Local<'a, v8::Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
if value.is_null_or_undefined() {
return Ok(GPUColor::Dict(GPUColorDict::convert(
scope,
value,
prefix,
context.borrowed(),
options,
)?));
}
if let Ok(obj) = value.try_cast::<v8::Object>() {
let iter = v8::Symbol::get_iterator(scope);
if let Some(iter) = obj.get(scope, iter.into())
&& !iter.is_undefined()
{
let conv = <Vec<f64>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)?;
if conv.len() != 4 {
return Err(WebIdlError::other(
prefix,
context,
JsErrorBox::type_error(format!(
"A sequence of number used as a GPUColor must have exactly 4 elements, received {} elements",
conv.len()
)),
));
}
let mut iter = conv.into_iter();
return Ok(GPUColor::Sequence((
iter.next().unwrap(),
iter.next().unwrap(),
iter.next().unwrap(),
iter.next().unwrap(),
)));
}
return Ok(GPUColor::Dict(GPUColorDict::convert(
scope, value, prefix, context, options,
)?));
}
Err(WebIdlError::new(
prefix,
context,
WebIdlErrorKind::ConvertToConverterType(
"sequence<GPUIntegerCoordinate> or GPUOrigin3DDict",
),
))
}
}
impl From<GPUColor> for wgpu_types::Color {
fn from(value: GPUColor) -> Self {
match value {
GPUColor::Dict(dict) => Self {
r: dict.r,
g: dict.g,
b: dict.b,
a: dict.a,
},
GPUColor::Sequence((r, g, b, a)) => Self { r, g, b, a },
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUAutoLayoutMode {
Auto,
}
pub(crate) enum GPUPipelineLayoutOrGPUAutoLayoutMode {
PipelineLayout(Ref<crate::pipeline_layout::GPUPipelineLayout>),
AutoLayoutMode(GPUAutoLayoutMode),
}
impl From<GPUPipelineLayoutOrGPUAutoLayoutMode>
for Option<wgpu_core::id::PipelineLayoutId>
{
fn from(value: GPUPipelineLayoutOrGPUAutoLayoutMode) -> Self {
match value {
GPUPipelineLayoutOrGPUAutoLayoutMode::PipelineLayout(layout) => {
Some(layout.id)
}
GPUPipelineLayoutOrGPUAutoLayoutMode::AutoLayoutMode(
GPUAutoLayoutMode::Auto,
) => None,
}
}
}
impl<'a> WebIdlConverter<'a> for GPUPipelineLayoutOrGPUAutoLayoutMode {
type Options = ();
fn convert<'b>(
scope: &mut v8::PinScope<'a, '_>,
value: v8::Local<'a, v8::Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
if value.is_object() {
Ok(Self::PipelineLayout(WebIdlConverter::convert(
scope, value, prefix, context, options,
)?))
} else {
Ok(Self::AutoLayoutMode(WebIdlConverter::convert(
scope, value, prefix, context, options,
)?))
}
}
}
#[derive(WebIDL, Clone, Hash, Eq, PartialEq)]
#[webidl(enum)]
pub enum GPUFeatureName {
#[webidl(rename = "depth-clip-control")]
DepthClipControl,
#[webidl(rename = "timestamp-query")]
TimestampQuery,
#[webidl(rename = "indirect-first-instance")]
IndirectFirstInstance,
#[webidl(rename = "shader-f16")]
ShaderF16,
#[webidl(rename = "depth32float-stencil8")]
Depth32floatStencil8,
#[webidl(rename = "texture-compression-bc")]
TextureCompressionBc,
#[webidl(rename = "texture-compression-bc-sliced-3d")]
TextureCompressionBcSliced3d,
#[webidl(rename = "texture-compression-etc2")]
TextureCompressionEtc2,
#[webidl(rename = "texture-compression-astc")]
TextureCompressionAstc,
#[webidl(rename = "texture-compression-astc-sliced-3d")]
TextureCompressionAstcSliced3d,
#[webidl(rename = "rg11b10ufloat-renderable")]
Rg11b10ufloatRenderable,
#[webidl(rename = "bgra8unorm-storage")]
Bgra8unormStorage,
#[webidl(rename = "float32-filterable")]
Float32Filterable,
#[webidl(rename = "dual-source-blending")]
DualSourceBlending,
#[webidl(rename = "subgroups")]
Subgroups,
// extended from spec
#[webidl(rename = "texture-format-16-bit-norm")]
TextureFormat16BitNorm,
#[webidl(rename = "texture-compression-astc-hdr")]
TextureCompressionAstcHdr,
#[webidl(rename = "texture-adapter-specific-format-features")]
TextureAdapterSpecificFormatFeatures,
#[webidl(rename = "pipeline-statistics-query")]
PipelineStatisticsQuery,
#[webidl(rename = "timestamp-query-inside-passes")]
TimestampQueryInsidePasses,
#[webidl(rename = "mappable-primary-buffers")]
MappablePrimaryBuffers,
#[webidl(rename = "texture-binding-array")]
TextureBindingArray,
#[webidl(rename = "buffer-binding-array")]
BufferBindingArray,
#[webidl(rename = "storage-resource-binding-array")]
StorageResourceBindingArray,
#[webidl(
rename = "sampled-texture-and-storage-buffer-array-non-uniform-indexing"
)]
SampledTextureAndStorageBufferArrayNonUniformIndexing,
#[webidl(rename = "storage-texture-array-non-uniform-indexing")]
StorageTextureArrayNonUniformIndexing,
#[webidl(rename = "uniform-buffer-binding-arrays")]
UniformBufferBindingArrays,
#[webidl(rename = "partially-bound-binding-array")]
PartiallyBoundBindingArray,
#[webidl(rename = "multi-draw-indirect-count")]
MultiDrawIndirectCount,
#[webidl(rename = "immediate-data")]
ImmediateData,
#[webidl(rename = "address-mode-clamp-to-zero")]
AddressModeClampToZero,
#[webidl(rename = "address-mode-clamp-to-border")]
AddressModeClampToBorder,
#[webidl(rename = "polygon-mode-line")]
PolygonModeLine,
#[webidl(rename = "polygon-mode-point")]
PolygonModePoint,
#[webidl(rename = "conservative-rasterization")]
ConservativeRasterization,
#[webidl(rename = "vertex-writable-storage")]
VertexWritableStorage,
#[webidl(rename = "clear-texture")]
ClearTexture,
#[webidl(rename = "multiview")]
Multiview,
#[webidl(rename = "vertex-attribute-64-bit")]
VertexAttribute64Bit,
#[webidl(rename = "shader-f64")]
ShaderF64,
#[webidl(rename = "shader-i16")]
ShaderI16,
#[webidl(rename = "shader-primitive-index")]
ShaderPrimitiveIndex,
#[webidl(rename = "shader-early-depth-test")]
ShaderEarlyDepthTest,
#[webidl(rename = "passthrough-shaders")]
PassthroughShaders,
}
pub fn feature_names_to_features(
names: Vec<GPUFeatureName>,
) -> wgpu_types::Features {
use wgpu_types::Features;
let mut features = Features::empty();
for name in names {
#[rustfmt::skip]
let feature = match name {
GPUFeatureName::DepthClipControl => Features::DEPTH_CLIP_CONTROL,
GPUFeatureName::TimestampQuery => Features::TIMESTAMP_QUERY,
GPUFeatureName::IndirectFirstInstance => Features::INDIRECT_FIRST_INSTANCE,
GPUFeatureName::ShaderF16 => Features::SHADER_F16,
GPUFeatureName::Depth32floatStencil8 => Features::DEPTH32FLOAT_STENCIL8,
GPUFeatureName::TextureCompressionBc => Features::TEXTURE_COMPRESSION_BC,
GPUFeatureName::TextureCompressionBcSliced3d => Features::TEXTURE_COMPRESSION_BC_SLICED_3D,
GPUFeatureName::TextureCompressionEtc2 => Features::TEXTURE_COMPRESSION_ETC2,
GPUFeatureName::TextureCompressionAstc => Features::TEXTURE_COMPRESSION_ASTC,
GPUFeatureName::TextureCompressionAstcSliced3d => Features::TEXTURE_COMPRESSION_ASTC_SLICED_3D,
GPUFeatureName::Rg11b10ufloatRenderable => Features::RG11B10UFLOAT_RENDERABLE,
GPUFeatureName::Bgra8unormStorage => Features::BGRA8UNORM_STORAGE,
GPUFeatureName::Float32Filterable => Features::FLOAT32_FILTERABLE,
GPUFeatureName::DualSourceBlending => Features::DUAL_SOURCE_BLENDING,
GPUFeatureName::Subgroups => Features::SUBGROUP,
GPUFeatureName::TextureFormat16BitNorm => Features::TEXTURE_FORMAT_16BIT_NORM,
GPUFeatureName::TextureCompressionAstcHdr => Features::TEXTURE_COMPRESSION_ASTC_HDR,
GPUFeatureName::TextureAdapterSpecificFormatFeatures => Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
GPUFeatureName::PipelineStatisticsQuery => Features::PIPELINE_STATISTICS_QUERY,
GPUFeatureName::TimestampQueryInsidePasses => Features::TIMESTAMP_QUERY_INSIDE_PASSES,
GPUFeatureName::MappablePrimaryBuffers => Features::MAPPABLE_PRIMARY_BUFFERS,
GPUFeatureName::TextureBindingArray => Features::TEXTURE_BINDING_ARRAY,
GPUFeatureName::BufferBindingArray => Features::BUFFER_BINDING_ARRAY,
GPUFeatureName::StorageResourceBindingArray => Features::STORAGE_RESOURCE_BINDING_ARRAY,
GPUFeatureName::SampledTextureAndStorageBufferArrayNonUniformIndexing => Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
GPUFeatureName::StorageTextureArrayNonUniformIndexing => Features::STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
GPUFeatureName::UniformBufferBindingArrays => Features::UNIFORM_BUFFER_BINDING_ARRAYS,
GPUFeatureName::PartiallyBoundBindingArray => Features::PARTIALLY_BOUND_BINDING_ARRAY,
GPUFeatureName::MultiDrawIndirectCount => Features::MULTI_DRAW_INDIRECT_COUNT,
GPUFeatureName::ImmediateData => Features::IMMEDIATES,
GPUFeatureName::AddressModeClampToZero => Features::ADDRESS_MODE_CLAMP_TO_ZERO,
GPUFeatureName::AddressModeClampToBorder => Features::ADDRESS_MODE_CLAMP_TO_BORDER,
GPUFeatureName::PolygonModeLine => Features::POLYGON_MODE_LINE,
GPUFeatureName::PolygonModePoint => Features::POLYGON_MODE_POINT,
GPUFeatureName::ConservativeRasterization => Features::CONSERVATIVE_RASTERIZATION,
GPUFeatureName::VertexWritableStorage => Features::VERTEX_WRITABLE_STORAGE,
GPUFeatureName::ClearTexture => Features::CLEAR_TEXTURE,
GPUFeatureName::Multiview => Features::MULTIVIEW,
GPUFeatureName::VertexAttribute64Bit => Features::VERTEX_ATTRIBUTE_64BIT,
GPUFeatureName::ShaderF64 => Features::SHADER_F64,
GPUFeatureName::ShaderI16 => Features::SHADER_I16,
GPUFeatureName::ShaderPrimitiveIndex => Features::SHADER_PRIMITIVE_INDEX,
GPUFeatureName::ShaderEarlyDepthTest => Features::SHADER_EARLY_DEPTH_TEST,
GPUFeatureName::PassthroughShaders => Features::EXPERIMENTAL_PASSTHROUGH_SHADERS,
};
features.set(feature, true);
}
features
}
#[allow(clippy::disallowed_types)]
pub fn features_to_feature_names(
features: wgpu_types::Features,
) -> HashSet<GPUFeatureName> {
use GPUFeatureName::*;
let mut return_features = HashSet::new();
// api
if features.contains(wgpu_types::Features::DEPTH_CLIP_CONTROL) {
return_features.insert(DepthClipControl);
}
if features.contains(wgpu_types::Features::TIMESTAMP_QUERY) {
return_features.insert(TimestampQuery);
}
if features.contains(wgpu_types::Features::INDIRECT_FIRST_INSTANCE) {
return_features.insert(IndirectFirstInstance);
}
// shader
if features.contains(wgpu_types::Features::SHADER_F16) {
return_features.insert(ShaderF16);
}
// texture formats
if features.contains(wgpu_types::Features::DEPTH32FLOAT_STENCIL8) {
return_features.insert(Depth32floatStencil8);
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_BC) {
return_features.insert(TextureCompressionBc);
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_BC_SLICED_3D) {
return_features.insert(TextureCompressionBcSliced3d);
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ETC2) {
return_features.insert(TextureCompressionEtc2);
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ASTC) {
return_features.insert(TextureCompressionAstc);
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ASTC_SLICED_3D)
{
return_features.insert(TextureCompressionAstcSliced3d);
}
if features.contains(wgpu_types::Features::RG11B10UFLOAT_RENDERABLE) {
return_features.insert(Rg11b10ufloatRenderable);
}
if features.contains(wgpu_types::Features::BGRA8UNORM_STORAGE) {
return_features.insert(Bgra8unormStorage);
}
if features.contains(wgpu_types::Features::FLOAT32_FILTERABLE) {
return_features.insert(Float32Filterable);
}
if features.contains(wgpu_types::Features::DUAL_SOURCE_BLENDING) {
return_features.insert(DualSourceBlending);
}
if features.contains(wgpu_types::Features::SUBGROUP) {
return_features.insert(Subgroups);
}
// extended from spec
// texture formats
if features.contains(wgpu_types::Features::TEXTURE_FORMAT_16BIT_NORM) {
return_features.insert(TextureFormat16BitNorm);
}
if features.contains(wgpu_types::Features::TEXTURE_COMPRESSION_ASTC_HDR) {
return_features.insert(TextureCompressionAstcHdr);
}
if features
.contains(wgpu_types::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES)
{
return_features.insert(TextureAdapterSpecificFormatFeatures);
}
// api
if features.contains(wgpu_types::Features::PIPELINE_STATISTICS_QUERY) {
return_features.insert(PipelineStatisticsQuery);
}
if features.contains(wgpu_types::Features::TIMESTAMP_QUERY_INSIDE_PASSES) {
return_features.insert(TimestampQueryInsidePasses);
}
if features.contains(wgpu_types::Features::MAPPABLE_PRIMARY_BUFFERS) {
return_features.insert(MappablePrimaryBuffers);
}
if features.contains(wgpu_types::Features::TEXTURE_BINDING_ARRAY) {
return_features.insert(TextureBindingArray);
}
if features.contains(wgpu_types::Features::BUFFER_BINDING_ARRAY) {
return_features.insert(BufferBindingArray);
}
if features.contains(wgpu_types::Features::STORAGE_RESOURCE_BINDING_ARRAY) {
return_features.insert(StorageResourceBindingArray);
}
if features.contains(
wgpu_types::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
) {
return_features.insert(SampledTextureAndStorageBufferArrayNonUniformIndexing);
}
if features
.contains(wgpu_types::Features::STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING)
{
return_features.insert(StorageTextureArrayNonUniformIndexing);
}
if features.contains(wgpu_types::Features::UNIFORM_BUFFER_BINDING_ARRAYS) {
return_features.insert(UniformBufferBindingArrays);
}
if features.contains(wgpu_types::Features::PARTIALLY_BOUND_BINDING_ARRAY) {
return_features.insert(PartiallyBoundBindingArray);
}
if features.contains(wgpu_types::Features::MULTI_DRAW_INDIRECT_COUNT) {
return_features.insert(MultiDrawIndirectCount);
}
if features.contains(wgpu_types::Features::IMMEDIATES) {
return_features.insert(ImmediateData);
}
if features.contains(wgpu_types::Features::ADDRESS_MODE_CLAMP_TO_ZERO) {
return_features.insert(AddressModeClampToZero);
}
if features.contains(wgpu_types::Features::ADDRESS_MODE_CLAMP_TO_BORDER) {
return_features.insert(AddressModeClampToBorder);
}
if features.contains(wgpu_types::Features::POLYGON_MODE_LINE) {
return_features.insert(PolygonModeLine);
}
if features.contains(wgpu_types::Features::POLYGON_MODE_POINT) {
return_features.insert(PolygonModePoint);
}
if features.contains(wgpu_types::Features::CONSERVATIVE_RASTERIZATION) {
return_features.insert(ConservativeRasterization);
}
if features.contains(wgpu_types::Features::VERTEX_WRITABLE_STORAGE) {
return_features.insert(VertexWritableStorage);
}
if features.contains(wgpu_types::Features::CLEAR_TEXTURE) {
return_features.insert(ClearTexture);
}
if features.contains(wgpu_types::Features::MULTIVIEW) {
return_features.insert(Multiview);
}
if features.contains(wgpu_types::Features::VERTEX_ATTRIBUTE_64BIT) {
return_features.insert(VertexAttribute64Bit);
}
// shader
if features.contains(wgpu_types::Features::SHADER_F64) {
return_features.insert(ShaderF64);
}
if features.contains(wgpu_types::Features::SHADER_I16) {
return_features.insert(ShaderI16);
}
if features.contains(wgpu_types::Features::SHADER_PRIMITIVE_INDEX) {
return_features.insert(ShaderPrimitiveIndex);
}
if features.contains(wgpu_types::Features::SHADER_EARLY_DEPTH_TEST) {
return_features.insert(ShaderEarlyDepthTest);
}
if features.contains(wgpu_types::Features::EXPERIMENTAL_PASSTHROUGH_SHADERS) {
return_features.insert(PassthroughShaders);
}
return_features
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/lib.rs | ext/webgpu/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#![cfg(not(target_arch = "wasm32"))]
#![warn(unsafe_op_in_unsafe_fn)]
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
pub use wgpu_core;
pub use wgpu_types;
use wgpu_types::PowerPreference;
use crate::error::GPUGenericError;
mod adapter;
mod bind_group;
mod bind_group_layout;
pub mod buffer;
mod byow;
mod command_buffer;
mod command_encoder;
mod compute_pass;
mod compute_pipeline;
mod device;
pub mod error;
mod pipeline_layout;
mod query_set;
mod queue;
mod render_bundle;
mod render_pass;
mod render_pipeline;
mod sampler;
mod shader;
mod surface;
pub mod texture;
mod webidl;
pub const UNSTABLE_FEATURE_NAME: &str = "webgpu";
#[allow(clippy::print_stdout)]
pub fn print_linker_flags(name: &str) {
if cfg!(windows) {
// these dls load slowly, so delay loading them
let dlls = [
// webgpu
"d3dcompiler_47",
"OPENGL32",
// network related functions
"iphlpapi",
];
for dll in dlls {
println!("cargo:rustc-link-arg-bin={name}=/delayload:{dll}.dll");
}
// enable delay loading
println!("cargo:rustc-link-arg-bin={name}=delayimp.lib");
}
}
pub type Instance = Arc<wgpu_core::global::Global>;
deno_core::extension!(
deno_webgpu,
deps = [deno_webidl, deno_web],
ops = [
op_create_gpu,
device::op_webgpu_device_start_capture,
device::op_webgpu_device_stop_capture,
],
objects = [
GPU,
adapter::GPUAdapter,
adapter::GPUAdapterInfo,
bind_group::GPUBindGroup,
bind_group_layout::GPUBindGroupLayout,
buffer::GPUBuffer,
command_buffer::GPUCommandBuffer,
command_encoder::GPUCommandEncoder,
compute_pass::GPUComputePassEncoder,
compute_pipeline::GPUComputePipeline,
device::GPUDevice,
device::GPUDeviceLostInfo,
pipeline_layout::GPUPipelineLayout,
query_set::GPUQuerySet,
queue::GPUQueue,
render_bundle::GPURenderBundle,
render_bundle::GPURenderBundleEncoder,
render_pass::GPURenderPassEncoder,
render_pipeline::GPURenderPipeline,
sampler::GPUSampler,
shader::GPUCompilationInfo,
shader::GPUCompilationMessage,
shader::GPUShaderModule,
adapter::GPUSupportedFeatures,
adapter::GPUSupportedLimits,
texture::GPUTexture,
texture::GPUTextureView,
texture::GPUExternalTexture,
byow::UnsafeWindowSurface,
surface::GPUCanvasContext,
],
esm = ["00_init.js", "02_surface.js"],
lazy_loaded_esm = ["01_webgpu.js"],
);
#[op2]
#[cppgc]
pub fn op_create_gpu(
state: &mut OpState,
scope: &mut v8::PinScope<'_, '_>,
webidl_brand: v8::Local<v8::Value>,
set_event_target_data: v8::Local<v8::Value>,
error_event_class: v8::Local<v8::Value>,
) -> GPU {
state.put(EventTargetSetup {
brand: v8::Global::new(scope, webidl_brand),
set_event_target_data: v8::Global::new(scope, set_event_target_data),
});
state.put(ErrorEventClass(v8::Global::new(scope, error_event_class)));
GPU
}
struct EventTargetSetup {
brand: v8::Global<v8::Value>,
set_event_target_data: v8::Global<v8::Value>,
}
struct ErrorEventClass(v8::Global<v8::Value>);
pub struct GPU;
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPU {
fn trace(&self, _visitor: &mut v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPU"
}
}
#[op2]
impl GPU {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPU, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[async_method]
#[cppgc]
async fn request_adapter(
&self,
state: Rc<RefCell<OpState>>,
#[webidl] options: adapter::GPURequestAdapterOptions,
) -> Option<adapter::GPUAdapter> {
let mut state = state.borrow_mut();
let backends = std::env::var("DENO_WEBGPU_BACKEND").map_or_else(
|_| wgpu_types::Backends::all(),
|s| wgpu_types::Backends::from_comma_list(&s),
);
let instance = if let Some(instance) = state.try_borrow::<Instance>() {
instance
} else {
state.put(Arc::new(wgpu_core::global::Global::new(
"webgpu",
&wgpu_types::InstanceDescriptor {
backends,
flags: wgpu_types::InstanceFlags::from_build_config(),
memory_budget_thresholds: wgpu_types::MemoryBudgetThresholds {
for_resource_creation: Some(97),
for_device_loss: Some(99),
},
backend_options: wgpu_types::BackendOptions {
dx12: wgpu_types::Dx12BackendOptions {
shader_compiler: wgpu_types::Dx12Compiler::Fxc,
..Default::default()
},
gl: wgpu_types::GlBackendOptions::default(),
noop: wgpu_types::NoopBackendOptions::default(),
},
},
None,
)));
state.borrow::<Instance>()
};
let descriptor = wgpu_core::instance::RequestAdapterOptions {
power_preference: options
.power_preference
.map(|pp| match pp {
adapter::GPUPowerPreference::LowPower => PowerPreference::LowPower,
adapter::GPUPowerPreference::HighPerformance => {
PowerPreference::HighPerformance
}
})
.unwrap_or_default(),
force_fallback_adapter: options.force_fallback_adapter,
compatible_surface: None, // windowless
};
let id = instance.request_adapter(&descriptor, backends, None).ok()?;
Some(adapter::GPUAdapter {
instance: instance.clone(),
features: SameObject::new(),
limits: SameObject::new(),
info: Rc::new(SameObject::new()),
id,
})
}
#[string]
fn getPreferredCanvasFormat(&self) -> &'static str {
// https://github.com/mozilla/gecko-dev/blob/b75080bb8b11844d18cb5f9ac6e68a866ef8e243/dom/webgpu/Instance.h#L42-L47
if cfg!(target_os = "android") {
texture::GPUTextureFormat::Rgba8unorm.as_str()
} else {
texture::GPUTextureFormat::Bgra8unorm.as_str()
}
}
}
fn transform_label<'a>(label: String) -> Option<std::borrow::Cow<'a, str>> {
if label.is_empty() {
None
} else {
Some(std::borrow::Cow::Owned(label))
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/device.rs | ext/webgpu/device.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::num::NonZeroU64;
use std::rc::Rc;
use deno_core::GarbageCollected;
use deno_core::cppgc::SameObject;
use deno_core::cppgc::make_cppgc_object;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_error::JsErrorBox;
use wgpu_core::binding_model::BindingResource;
use wgpu_core::pipeline::ProgrammableStageDescriptor;
use wgpu_types::BindingType;
use super::bind_group::GPUBindGroup;
use super::bind_group::GPUBindingResource;
use super::bind_group_layout::GPUBindGroupLayout;
use super::buffer::GPUBuffer;
use super::compute_pipeline::GPUComputePipeline;
use super::pipeline_layout::GPUPipelineLayout;
use super::queue::GPUQueue;
use super::sampler::GPUSampler;
use super::shader::GPUShaderModule;
use super::texture::GPUTexture;
use crate::Instance;
use crate::adapter::GPUAdapterInfo;
use crate::adapter::GPUSupportedFeatures;
use crate::adapter::GPUSupportedLimits;
use crate::command_encoder::GPUCommandEncoder;
use crate::error::GPUError;
use crate::error::GPUGenericError;
use crate::query_set::GPUQuerySet;
use crate::render_bundle::GPURenderBundleEncoder;
use crate::render_pipeline::GPURenderPipeline;
use crate::shader::GPUCompilationInfo;
use crate::webidl::features_to_feature_names;
pub struct GPUDevice {
pub instance: Instance,
pub id: wgpu_core::id::DeviceId,
pub adapter: wgpu_core::id::AdapterId,
pub queue: wgpu_core::id::QueueId,
pub label: String,
pub features: SameObject<GPUSupportedFeatures>,
pub limits: SameObject<GPUSupportedLimits>,
pub adapter_info: Rc<SameObject<GPUAdapterInfo>>,
pub queue_obj: SameObject<GPUQueue>,
pub error_handler: super::error::ErrorHandler,
pub lost_promise: v8::Global<v8::Promise>,
pub has_active_capture: RefCell<bool>,
}
impl Drop for GPUDevice {
fn drop(&mut self) {
self.instance.device_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUDevice {
const NAME: &'static str = "GPUDevice";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUDevice {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUDevice"
}
}
// EventTarget is extended in JS
#[op2]
impl GPUDevice {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUDevice, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[getter]
#[global]
fn features(
&self,
scope: &mut v8::PinScope<'_, '_>,
) -> v8::Global<v8::Object> {
self.features.get(scope, |scope| {
let features = self.instance.device_features(self.id);
let features = features_to_feature_names(features);
GPUSupportedFeatures::new(scope, features)
})
}
#[getter]
#[global]
fn limits(&self, scope: &mut v8::PinScope<'_, '_>) -> v8::Global<v8::Object> {
self.limits.get(scope, |_| {
let limits = self.instance.device_limits(self.id);
GPUSupportedLimits(limits)
})
}
#[getter]
#[global]
fn adapter_info(
&self,
scope: &mut v8::PinScope<'_, '_>,
) -> v8::Global<v8::Object> {
self.adapter_info.get(scope, |_| {
let info = self.instance.adapter_get_info(self.adapter);
GPUAdapterInfo { info }
})
}
#[getter]
#[global]
fn queue(&self, scope: &mut v8::PinScope<'_, '_>) -> v8::Global<v8::Object> {
self.queue_obj.get(scope, |_| GPUQueue {
id: self.queue,
device: self.id,
error_handler: self.error_handler.clone(),
instance: self.instance.clone(),
label: self.label.clone(),
})
}
#[fast]
#[undefined]
fn destroy(&self) {
self.instance.device_destroy(self.id);
self
.error_handler
.push_error(Some(GPUError::Lost(GPUDeviceLostReason::Destroyed)));
}
#[required(1)]
#[cppgc]
fn create_buffer(
&self,
#[webidl] descriptor: super::buffer::GPUBufferDescriptor,
) -> Result<GPUBuffer, JsErrorBox> {
// wgpu-core would also check this, but it needs to be reported via a JS
// error, not a validation error. (WebGPU specifies this check on the
// content timeline.)
if descriptor.mapped_at_creation
&& !descriptor
.size
.is_multiple_of(wgpu_types::COPY_BUFFER_ALIGNMENT)
{
return Err(JsErrorBox::range_error(format!(
"The size of a buffer that is mapped at creation must be a multiple of {}",
wgpu_types::COPY_BUFFER_ALIGNMENT,
)));
}
// Validation of the usage needs to happen on the device timeline, so
// don't raise an error immediately if it isn't valid. wgpu will
// reject `BufferUsages::empty()`.
let usage = wgpu_types::BufferUsages::from_bits(descriptor.usage)
.unwrap_or(wgpu_types::BufferUsages::empty());
let wgpu_descriptor = wgpu_core::resource::BufferDescriptor {
label: crate::transform_label(descriptor.label.clone()),
size: descriptor.size,
usage,
mapped_at_creation: descriptor.mapped_at_creation,
};
let (id, err) =
self
.instance
.device_create_buffer(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUBuffer {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
device: self.id,
label: descriptor.label,
size: descriptor.size,
usage: descriptor.usage,
map_state: RefCell::new(if descriptor.mapped_at_creation {
"mapped"
} else {
"unmapped"
}),
map_mode: RefCell::new(if descriptor.mapped_at_creation {
Some(wgpu_core::device::HostMap::Write)
} else {
None
}),
mapped_js_buffers: RefCell::new(vec![]),
})
}
#[required(1)]
#[cppgc]
fn create_texture(
&self,
#[webidl] descriptor: super::texture::GPUTextureDescriptor,
) -> Result<GPUTexture, JsErrorBox> {
let wgpu_descriptor = wgpu_core::resource::TextureDescriptor {
label: crate::transform_label(descriptor.label.clone()),
size: descriptor.size.into(),
mip_level_count: descriptor.mip_level_count,
sample_count: descriptor.sample_count,
dimension: descriptor.dimension.clone().into(),
format: descriptor.format.clone().into(),
usage: wgpu_types::TextureUsages::from_bits(descriptor.usage)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?,
view_formats: descriptor
.view_formats
.into_iter()
.map(Into::into)
.collect(),
};
let (id, err) =
self
.instance
.device_create_texture(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUTexture {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
device_id: self.id,
queue_id: self.queue,
default_view_id: Default::default(),
label: descriptor.label,
size: wgpu_descriptor.size,
mip_level_count: wgpu_descriptor.mip_level_count,
sample_count: wgpu_descriptor.sample_count,
dimension: descriptor.dimension,
format: descriptor.format,
usage: descriptor.usage,
})
}
#[cppgc]
fn create_sampler(
&self,
#[webidl] descriptor: super::sampler::GPUSamplerDescriptor,
) -> Result<GPUSampler, JsErrorBox> {
let wgpu_descriptor = wgpu_core::resource::SamplerDescriptor {
label: crate::transform_label(descriptor.label.clone()),
address_modes: [
descriptor.address_mode_u.into(),
descriptor.address_mode_v.into(),
descriptor.address_mode_w.into(),
],
mag_filter: descriptor.mag_filter.into(),
min_filter: descriptor.min_filter.into(),
mipmap_filter: descriptor.mipmap_filter.into(),
lod_min_clamp: descriptor.lod_min_clamp,
lod_max_clamp: descriptor.lod_max_clamp,
compare: descriptor.compare.map(Into::into),
anisotropy_clamp: descriptor.max_anisotropy,
border_color: None,
};
let (id, err) =
self
.instance
.device_create_sampler(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUSampler {
instance: self.instance.clone(),
id,
label: descriptor.label,
})
}
#[required(1)]
#[cppgc]
fn create_bind_group_layout(
&self,
#[webidl]
descriptor: super::bind_group_layout::GPUBindGroupLayoutDescriptor,
) -> Result<GPUBindGroupLayout, JsErrorBox> {
let mut entries = Vec::with_capacity(descriptor.entries.len());
for entry in descriptor.entries {
let n_entries = [
entry.buffer.is_some(),
entry.sampler.is_some(),
entry.texture.is_some(),
entry.storage_texture.is_some(),
]
.into_iter()
.filter(|t| *t)
.count();
if n_entries != 1 {
return Err(JsErrorBox::type_error(
"Only one of 'buffer', 'sampler', 'texture' and 'storageTexture' may be specified",
));
}
let ty = if let Some(buffer) = entry.buffer {
BindingType::Buffer {
ty: buffer.r#type.into(),
has_dynamic_offset: buffer.has_dynamic_offset,
min_binding_size: NonZeroU64::new(buffer.min_binding_size),
}
} else if let Some(sampler) = entry.sampler {
BindingType::Sampler(sampler.r#type.into())
} else if let Some(texture) = entry.texture {
BindingType::Texture {
sample_type: texture.sample_type.into(),
view_dimension: texture.view_dimension.into(),
multisampled: texture.multisampled,
}
} else if let Some(storage_texture) = entry.storage_texture {
BindingType::StorageTexture {
access: storage_texture.access.into(),
format: storage_texture.format.into(),
view_dimension: storage_texture.view_dimension.into(),
}
} else {
unreachable!()
};
entries.push(wgpu_types::BindGroupLayoutEntry {
binding: entry.binding,
visibility: wgpu_types::ShaderStages::from_bits(entry.visibility)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?,
ty,
count: None, // native-only
});
}
let wgpu_descriptor = wgpu_core::binding_model::BindGroupLayoutDescriptor {
label: crate::transform_label(descriptor.label.clone()),
entries: Cow::Owned(entries),
};
let (id, err) = self.instance.device_create_bind_group_layout(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
Ok(GPUBindGroupLayout {
instance: self.instance.clone(),
id,
label: descriptor.label,
})
}
#[required(1)]
#[cppgc]
fn create_pipeline_layout(
&self,
#[webidl] descriptor: super::pipeline_layout::GPUPipelineLayoutDescriptor,
) -> GPUPipelineLayout {
let bind_group_layouts = descriptor
.bind_group_layouts
.into_iter()
.map(|bind_group_layout| bind_group_layout.id)
.collect();
let wgpu_descriptor = wgpu_core::binding_model::PipelineLayoutDescriptor {
label: crate::transform_label(descriptor.label.clone()),
bind_group_layouts: Cow::Owned(bind_group_layouts),
immediate_size: 0,
};
let (id, err) = self.instance.device_create_pipeline_layout(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
GPUPipelineLayout {
instance: self.instance.clone(),
id,
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_bind_group(
&self,
#[webidl] descriptor: super::bind_group::GPUBindGroupDescriptor,
) -> GPUBindGroup {
let entries = descriptor
.entries
.into_iter()
.map(|entry| wgpu_core::binding_model::BindGroupEntry {
binding: entry.binding,
resource: match entry.resource {
GPUBindingResource::Sampler(sampler) => {
BindingResource::Sampler(sampler.id)
}
GPUBindingResource::Texture(texture) => {
BindingResource::TextureView(texture.default_view_id())
}
GPUBindingResource::TextureView(texture_view) => {
BindingResource::TextureView(texture_view.id)
}
GPUBindingResource::Buffer(buffer) => {
BindingResource::Buffer(wgpu_core::binding_model::BufferBinding {
buffer: buffer.id,
offset: 0,
size: Some(buffer.size),
})
}
GPUBindingResource::BufferBinding(buffer_binding) => {
BindingResource::Buffer(wgpu_core::binding_model::BufferBinding {
buffer: buffer_binding.buffer.id,
offset: buffer_binding.offset,
size: buffer_binding.size,
})
}
},
})
.collect::<Vec<_>>();
let wgpu_descriptor = wgpu_core::binding_model::BindGroupDescriptor {
label: crate::transform_label(descriptor.label.clone()),
layout: descriptor.layout.id,
entries: Cow::Owned(entries),
};
let (id, err) =
self
.instance
.device_create_bind_group(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
GPUBindGroup {
instance: self.instance.clone(),
id,
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_shader_module(
&self,
scope: &mut v8::PinScope<'_, '_>,
#[webidl] descriptor: super::shader::GPUShaderModuleDescriptor,
) -> GPUShaderModule {
let wgpu_descriptor = wgpu_core::pipeline::ShaderModuleDescriptor {
label: crate::transform_label(descriptor.label.clone()),
runtime_checks: wgpu_types::ShaderRuntimeChecks::default(),
};
let (id, err) = self.instance.device_create_shader_module(
self.id,
&wgpu_descriptor,
wgpu_core::pipeline::ShaderModuleSource::Wgsl(Cow::Borrowed(
&descriptor.code,
)),
None,
);
let compilation_info =
GPUCompilationInfo::new(scope, err.iter(), &descriptor.code);
let compilation_info = make_cppgc_object(scope, compilation_info);
let compilation_info = v8::Global::new(scope, compilation_info);
self.error_handler.push_error(err);
GPUShaderModule {
instance: self.instance.clone(),
id,
label: descriptor.label,
compilation_info,
}
}
#[required(1)]
#[cppgc]
fn create_compute_pipeline(
&self,
#[webidl] descriptor: super::compute_pipeline::GPUComputePipelineDescriptor,
) -> GPUComputePipeline {
self.new_compute_pipeline(descriptor)
}
#[required(1)]
#[cppgc]
fn create_render_pipeline(
&self,
#[webidl] descriptor: super::render_pipeline::GPURenderPipelineDescriptor,
) -> Result<GPURenderPipeline, JsErrorBox> {
self.new_render_pipeline(descriptor)
}
#[async_method]
#[required(1)]
#[cppgc]
async fn create_compute_pipeline_async(
&self,
#[webidl] descriptor: super::compute_pipeline::GPUComputePipelineDescriptor,
) -> GPUComputePipeline {
self.new_compute_pipeline(descriptor)
}
#[async_method]
#[required(1)]
#[cppgc]
async fn create_render_pipeline_async(
&self,
#[webidl] descriptor: super::render_pipeline::GPURenderPipelineDescriptor,
) -> Result<GPURenderPipeline, JsErrorBox> {
self.new_render_pipeline(descriptor)
}
fn create_command_encoder<'a>(
&self,
scope: &mut v8::PinScope<'a, '_>,
#[webidl] descriptor: Option<
super::command_encoder::GPUCommandEncoderDescriptor,
>,
) -> v8::Local<'a, v8::Object> {
// Metal imposes a limit on the number of outstanding command buffers.
// Attempting to create another command buffer after reaching that limit
// will block, which can result in a deadlock if GC is required to
// recover old command buffers. To encourage V8 to garbage collect
// command buffers before that happens, we associate some external
// memory with each command buffer.
#[cfg(target_vendor = "apple")]
const EXTERNAL_MEMORY_AMOUNT: i64 = 1 << 16;
let label = descriptor.map(|d| d.label).unwrap_or_default();
let wgpu_descriptor = wgpu_types::CommandEncoderDescriptor {
label: Some(Cow::Owned(label.clone())),
};
#[cfg(target_vendor = "apple")]
scope.adjust_amount_of_external_allocated_memory(EXTERNAL_MEMORY_AMOUNT);
let (id, err) = self.instance.device_create_command_encoder(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
let encoder = GPUCommandEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label,
#[cfg(target_vendor = "apple")]
weak: std::sync::OnceLock::new(),
};
let obj = make_cppgc_object(scope, encoder);
#[cfg(target_vendor = "apple")]
{
let finalizer = v8::Weak::with_finalizer(
scope,
obj,
Box::new(|isolate: &mut v8::Isolate| {
isolate.adjust_amount_of_external_allocated_memory(
-EXTERNAL_MEMORY_AMOUNT,
);
}),
);
deno_core::cppgc::try_unwrap_cppgc_object::<GPUCommandEncoder>(
scope,
obj.into(),
)
.unwrap()
.weak
.set(finalizer)
.unwrap();
}
obj
}
#[required(1)]
#[cppgc]
fn create_render_bundle_encoder(
&self,
#[webidl]
descriptor: super::render_bundle::GPURenderBundleEncoderDescriptor,
) -> GPURenderBundleEncoder {
let wgpu_descriptor = wgpu_core::command::RenderBundleEncoderDescriptor {
label: crate::transform_label(descriptor.label.clone()),
color_formats: Cow::Owned(
descriptor
.color_formats
.into_iter()
.map(|format| format.into_option().map(Into::into))
.collect::<Vec<_>>(),
),
depth_stencil: descriptor.depth_stencil_format.map(|format| {
wgpu_types::RenderBundleDepthStencil {
format: format.into(),
depth_read_only: descriptor.depth_read_only,
stencil_read_only: descriptor.stencil_read_only,
}
}),
sample_count: descriptor.sample_count,
multiview: None,
};
let res =
wgpu_core::command::RenderBundleEncoder::new(&wgpu_descriptor, self.id);
let (encoder, err) = match res {
Ok(encoder) => (encoder, None),
Err(e) => (
wgpu_core::command::RenderBundleEncoder::dummy(self.id),
Some(e),
),
};
self.error_handler.push_error(err);
GPURenderBundleEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
encoder: RefCell::new(Some(encoder)),
label: descriptor.label,
}
}
#[required(1)]
#[cppgc]
fn create_query_set(
&self,
#[webidl] descriptor: crate::query_set::GPUQuerySetDescriptor,
) -> GPUQuerySet {
let wgpu_descriptor = wgpu_core::resource::QuerySetDescriptor {
label: crate::transform_label(descriptor.label.clone()),
ty: descriptor.r#type.clone().into(),
count: descriptor.count,
};
let (id, err) =
self
.instance
.device_create_query_set(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
GPUQuerySet {
instance: self.instance.clone(),
id,
r#type: descriptor.r#type,
count: descriptor.count,
label: descriptor.label,
}
}
#[getter]
#[global]
fn lost(&self) -> v8::Global<v8::Promise> {
self.lost_promise.clone()
}
#[required(1)]
#[undefined]
fn push_error_scope(&self, #[webidl] filter: super::error::GPUErrorFilter) {
self
.error_handler
.scopes
.lock()
.unwrap()
.push((filter, vec![]));
}
#[async_method(fake)]
#[global]
fn pop_error_scope(
&self,
scope: &mut v8::PinScope<'_, '_>,
) -> Result<v8::Global<v8::Value>, JsErrorBox> {
if self.error_handler.is_lost.get().is_some() {
let val = v8::null(scope).cast::<v8::Value>();
return Ok(v8::Global::new(scope, val));
}
let Some((_, errors)) = self.error_handler.scopes.lock().unwrap().pop()
else {
return Err(JsErrorBox::new(
"DOMExceptionOperationError",
"There are no error scopes on the error scope stack",
));
};
let val = if let Some(err) = errors.into_iter().next() {
deno_core::error::to_v8_error(scope, &err)
} else {
v8::null(scope).into()
};
Ok(v8::Global::new(scope, val))
}
}
impl GPUDevice {
fn new_compute_pipeline(
&self,
descriptor: super::compute_pipeline::GPUComputePipelineDescriptor,
) -> GPUComputePipeline {
let wgpu_descriptor = wgpu_core::pipeline::ComputePipelineDescriptor {
label: crate::transform_label(descriptor.label.clone()),
layout: descriptor.layout.into(),
stage: ProgrammableStageDescriptor {
module: descriptor.compute.module.id,
entry_point: descriptor.compute.entry_point.map(Into::into),
constants: descriptor.compute.constants.into_iter().collect(),
zero_initialize_workgroup_memory: true,
},
cache: None,
};
let (id, err) = self.instance.device_create_compute_pipeline(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
GPUComputePipeline {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label: descriptor.label.clone(),
}
}
fn new_render_pipeline(
&self,
descriptor: super::render_pipeline::GPURenderPipelineDescriptor,
) -> Result<GPURenderPipeline, JsErrorBox> {
let vertex = wgpu_core::pipeline::VertexState {
stage: ProgrammableStageDescriptor {
module: descriptor.vertex.module.id,
entry_point: descriptor.vertex.entry_point.map(Into::into),
constants: descriptor.vertex.constants.into_iter().collect(),
zero_initialize_workgroup_memory: true,
},
buffers: Cow::Owned(
descriptor
.vertex
.buffers
.into_iter()
.map(|b| {
b.into_option().map_or_else(
wgpu_core::pipeline::VertexBufferLayout::default,
|layout| wgpu_core::pipeline::VertexBufferLayout {
array_stride: layout.array_stride,
step_mode: layout.step_mode.into(),
attributes: Cow::Owned(
layout
.attributes
.into_iter()
.map(|attr| wgpu_types::VertexAttribute {
format: attr.format.into(),
offset: attr.offset,
shader_location: attr.shader_location,
})
.collect(),
),
},
)
})
.collect(),
),
};
let primitive = wgpu_types::PrimitiveState {
topology: descriptor.primitive.topology.into(),
strip_index_format: descriptor
.primitive
.strip_index_format
.map(Into::into),
front_face: descriptor.primitive.front_face.into(),
cull_mode: descriptor.primitive.cull_mode.into(),
unclipped_depth: descriptor.primitive.unclipped_depth,
polygon_mode: Default::default(),
conservative: false,
};
let depth_stencil = descriptor.depth_stencil.map(|depth_stencil| {
let front = wgpu_types::StencilFaceState {
compare: depth_stencil.stencil_front.compare.into(),
fail_op: depth_stencil.stencil_front.fail_op.into(),
depth_fail_op: depth_stencil.stencil_front.depth_fail_op.into(),
pass_op: depth_stencil.stencil_front.pass_op.into(),
};
let back = wgpu_types::StencilFaceState {
compare: depth_stencil.stencil_back.compare.into(),
fail_op: depth_stencil.stencil_back.fail_op.into(),
depth_fail_op: depth_stencil.stencil_back.depth_fail_op.into(),
pass_op: depth_stencil.stencil_back.pass_op.into(),
};
wgpu_types::DepthStencilState {
format: depth_stencil.format.into(),
depth_write_enabled: depth_stencil
.depth_write_enabled
.unwrap_or_default(),
depth_compare: depth_stencil
.depth_compare
.map(Into::into)
.unwrap_or(wgpu_types::CompareFunction::Never), // TODO(wgpu): should be optional here
stencil: wgpu_types::StencilState {
front,
back,
read_mask: depth_stencil.stencil_read_mask,
write_mask: depth_stencil.stencil_write_mask,
},
bias: wgpu_types::DepthBiasState {
constant: depth_stencil.depth_bias,
slope_scale: depth_stencil.depth_bias_slope_scale,
clamp: depth_stencil.depth_bias_clamp,
},
}
});
let multisample = wgpu_types::MultisampleState {
count: descriptor.multisample.count,
mask: descriptor.multisample.mask as u64,
alpha_to_coverage_enabled: descriptor
.multisample
.alpha_to_coverage_enabled,
};
let fragment = descriptor
.fragment
.map(|fragment| {
Ok::<_, JsErrorBox>(wgpu_core::pipeline::FragmentState {
stage: ProgrammableStageDescriptor {
module: fragment.module.id,
entry_point: fragment.entry_point.map(Into::into),
constants: fragment.constants.into_iter().collect(),
zero_initialize_workgroup_memory: true,
},
targets: Cow::Owned(
fragment
.targets
.into_iter()
.map(|target| {
target
.into_option()
.map(|target| {
Ok(wgpu_types::ColorTargetState {
format: target.format.into(),
blend: target.blend.map(|blend| wgpu_types::BlendState {
color: wgpu_types::BlendComponent {
src_factor: blend.color.src_factor.into(),
dst_factor: blend.color.dst_factor.into(),
operation: blend.color.operation.into(),
},
alpha: wgpu_types::BlendComponent {
src_factor: blend.alpha.src_factor.into(),
dst_factor: blend.alpha.dst_factor.into(),
operation: blend.alpha.operation.into(),
},
}),
write_mask: wgpu_types::ColorWrites::from_bits(
target.write_mask,
)
.ok_or_else(|| {
JsErrorBox::type_error("usage is not valid")
})?,
})
})
.transpose()
})
.collect::<Result<_, JsErrorBox>>()?,
),
})
})
.transpose()?;
let wgpu_descriptor = wgpu_core::pipeline::RenderPipelineDescriptor {
label: crate::transform_label(descriptor.label.clone()),
layout: descriptor.layout.into(),
vertex,
primitive,
depth_stencil,
multisample,
fragment,
cache: None,
multiview_mask: None,
};
let (id, err) = self.instance.device_create_render_pipeline(
self.id,
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
Ok(GPURenderPipeline {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
id,
label: descriptor.label,
})
}
}
#[derive(Clone, Debug, Default, Hash, Eq, PartialEq)]
pub enum GPUDeviceLostReason {
#[default]
Unknown,
Destroyed,
}
impl From<wgpu_types::DeviceLostReason> for GPUDeviceLostReason {
fn from(value: wgpu_types::DeviceLostReason) -> Self {
match value {
wgpu_types::DeviceLostReason::Unknown => Self::Unknown,
wgpu_types::DeviceLostReason::Destroyed => Self::Destroyed,
}
}
}
#[derive(Default)]
pub struct GPUDeviceLostInfo {
pub reason: GPUDeviceLostReason,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUDeviceLostInfo {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUDeviceLostInfo"
}
}
#[op2]
impl GPUDeviceLostInfo {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUDeviceLostInfo, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn reason(&self) -> &'static str {
use GPUDeviceLostReason::*;
match self.reason {
Unknown => "unknown",
Destroyed => "destroyed",
}
}
#[getter]
#[string]
fn message(&self) -> &'static str {
"device was lost"
}
}
#[op2(fast)]
pub fn op_webgpu_device_start_capture(
#[cppgc] device: &GPUDevice,
) -> Result<(), JsErrorBox> {
if *device.has_active_capture.borrow() {
return Err(JsErrorBox::type_error("capture already started"));
}
// safety: active check is above, other safety concerns are related to the debugger itself
unsafe {
device
.instance
.device_start_graphics_debugger_capture(device.id);
}
*device.has_active_capture.borrow_mut() = true;
Ok(())
}
#[op2(fast)]
pub fn op_webgpu_device_stop_capture(
#[cppgc] device: &GPUDevice,
) -> Result<(), JsErrorBox> {
if !*device.has_active_capture.borrow() {
return Err(JsErrorBox::type_error("No capture active"));
}
// safety: active check is above, other safety concerns are related to the debugger itself
unsafe {
device
.instance
.device_stop_graphics_debugger_capture(device.id);
}
*device.has_active_capture.borrow_mut() = false;
Ok(())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/bind_group.rs | ext/webgpu/bind_group.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::v8::Local;
use deno_core::v8::PinScope;
use deno_core::v8::Value;
use deno_core::webidl::ContextFn;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_core::webidl::WebIdlInterfaceConverter;
use crate::Instance;
use crate::buffer::GPUBuffer;
use crate::error::GPUGenericError;
use crate::sampler::GPUSampler;
use crate::texture::GPUTexture;
use crate::texture::GPUTextureView;
pub struct GPUBindGroup {
pub instance: Instance,
pub id: wgpu_core::id::BindGroupId,
pub label: String,
}
impl Drop for GPUBindGroup {
fn drop(&mut self) {
self.instance.bind_group_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUBindGroup {
const NAME: &'static str = "GPUBindGroup";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUBindGroup {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUBindGroup"
}
}
#[op2]
impl GPUBindGroup {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUBindGroup, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub layout: Ref<super::bind_group_layout::GPUBindGroupLayout>,
pub entries: Vec<GPUBindGroupEntry>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupEntry {
#[options(enforce_range = true)]
pub binding: u32,
pub resource: GPUBindingResource,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBufferBinding {
pub buffer: Ref<GPUBuffer>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub offset: u64,
#[options(enforce_range = true)]
pub size: Option<u64>,
}
pub(crate) enum GPUBindingResource {
Sampler(Ref<GPUSampler>),
Texture(Ref<GPUTexture>),
TextureView(Ref<GPUTextureView>),
Buffer(Ref<GPUBuffer>),
BufferBinding(GPUBufferBinding),
}
impl<'a> WebIdlConverter<'a> for GPUBindingResource {
type Options = ();
fn convert<'b>(
scope: &mut PinScope<'a, '_>,
value: Local<'a, Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
<Ref<GPUSampler>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::Sampler)
.or_else(|_| {
<Ref<GPUTexture>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::Texture)
})
.or_else(|_| {
<Ref<GPUTextureView>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::TextureView)
})
.or_else(|_| {
<Ref<GPUBuffer>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::Buffer)
})
.or_else(|_| {
GPUBufferBinding::convert(scope, value, prefix, context, options)
.map(Self::BufferBinding)
})
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/bind_group_layout.rs | ext/webgpu/bind_group_layout.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::op2;
use crate::Instance;
use crate::error::GPUGenericError;
use crate::texture::GPUTextureViewDimension;
pub struct GPUBindGroupLayout {
pub instance: Instance,
pub id: wgpu_core::id::BindGroupLayoutId,
pub label: String,
}
impl Drop for GPUBindGroupLayout {
fn drop(&mut self) {
self.instance.bind_group_layout_drop(self.id);
}
}
impl deno_core::webidl::WebIdlInterfaceConverter for GPUBindGroupLayout {
const NAME: &'static str = "GPUBindGroupLayout";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUBindGroupLayout {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUBindGroupLayout"
}
}
#[op2]
impl GPUBindGroupLayout {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUBindGroupLayout, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupLayoutDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub entries: Vec<GPUBindGroupLayoutEntry>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBindGroupLayoutEntry {
#[options(enforce_range = true)]
pub binding: u32,
#[options(enforce_range = true)]
pub visibility: u32,
pub buffer: Option<GPUBufferBindingLayout>,
pub sampler: Option<GPUSamplerBindingLayout>,
pub texture: Option<GPUTextureBindingLayout>,
pub storage_texture: Option<GPUStorageTextureBindingLayout>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBufferBindingLayout {
#[webidl(default = GPUBufferBindingType::Uniform)]
pub r#type: GPUBufferBindingType,
#[webidl(default = false)]
pub has_dynamic_offset: bool,
#[webidl(default = 0)]
pub min_binding_size: u64,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUBufferBindingType {
Uniform,
Storage,
ReadOnlyStorage,
}
impl From<GPUBufferBindingType> for wgpu_types::BufferBindingType {
fn from(value: GPUBufferBindingType) -> Self {
match value {
GPUBufferBindingType::Uniform => Self::Uniform,
GPUBufferBindingType::Storage => Self::Storage { read_only: false },
GPUBufferBindingType::ReadOnlyStorage => {
Self::Storage { read_only: true }
}
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUSamplerBindingLayout {
#[webidl(default = GPUSamplerBindingType::Filtering)]
pub r#type: GPUSamplerBindingType,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUSamplerBindingType {
Filtering,
NonFiltering,
Comparison,
}
impl From<GPUSamplerBindingType> for wgpu_types::SamplerBindingType {
fn from(value: GPUSamplerBindingType) -> Self {
match value {
GPUSamplerBindingType::Filtering => Self::Filtering,
GPUSamplerBindingType::NonFiltering => Self::NonFiltering,
GPUSamplerBindingType::Comparison => Self::Comparison,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTextureBindingLayout {
#[webidl(default = GPUTextureSampleType::Float)]
pub sample_type: GPUTextureSampleType,
#[webidl(default = GPUTextureViewDimension::D2)]
pub view_dimension: GPUTextureViewDimension,
#[webidl(default = false)]
pub multisampled: bool,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUTextureSampleType {
Float,
UnfilterableFloat,
Depth,
Sint,
Uint,
}
impl From<GPUTextureSampleType> for wgpu_types::TextureSampleType {
fn from(value: GPUTextureSampleType) -> Self {
match value {
GPUTextureSampleType::Float => Self::Float { filterable: true },
GPUTextureSampleType::UnfilterableFloat => {
Self::Float { filterable: false }
}
GPUTextureSampleType::Depth => Self::Depth,
GPUTextureSampleType::Sint => Self::Sint,
GPUTextureSampleType::Uint => Self::Uint,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUStorageTextureBindingLayout {
#[webidl(default = GPUStorageTextureAccess::WriteOnly)]
pub access: GPUStorageTextureAccess,
pub format: super::texture::GPUTextureFormat,
#[webidl(default = GPUTextureViewDimension::D2)]
pub view_dimension: GPUTextureViewDimension,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUStorageTextureAccess {
WriteOnly,
ReadOnly,
ReadWrite,
}
impl From<GPUStorageTextureAccess> for wgpu_types::StorageTextureAccess {
fn from(value: GPUStorageTextureAccess) -> Self {
match value {
GPUStorageTextureAccess::WriteOnly => Self::WriteOnly,
GPUStorageTextureAccess::ReadOnly => Self::ReadOnly,
GPUStorageTextureAccess::ReadWrite => Self::ReadWrite,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/byow.rs | ext/webgpu/byow.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::ffi::c_void;
#[cfg(any(
target_os = "linux",
target_os = "macos",
target_os = "freebsd",
target_os = "openbsd"
))]
use std::ptr::NonNull;
use deno_core::FromV8;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
use deno_core::v8::Local;
use deno_core::v8::Value;
use deno_error::JsErrorBox;
use crate::surface::GPUCanvasContext;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ByowError {
#[cfg(not(any(
target_os = "macos",
target_os = "windows",
target_os = "linux",
target_os = "freebsd",
target_os = "openbsd",
)))]
#[class(type)]
#[error("Unsupported platform")]
Unsupported,
#[class(type)]
#[error(
"Cannot create surface outside of WebGPU context. Did you forget to call `navigator.gpu.requestAdapter()`?"
)]
WebGPUNotInitiated,
#[class(type)]
#[error("Invalid parameters")]
InvalidParameters,
#[class(generic)]
#[error(transparent)]
CreateSurface(wgpu_core::instance::CreateSurfaceError),
#[cfg(target_os = "windows")]
#[class(type)]
#[error("Invalid system on Windows")]
InvalidSystem,
#[cfg(target_os = "macos")]
#[class(type)]
#[error("Invalid system on macOS")]
InvalidSystem,
#[cfg(any(
target_os = "linux",
target_os = "freebsd",
target_os = "openbsd"
))]
#[class(type)]
#[error("Invalid system on Linux/BSD")]
InvalidSystem,
#[cfg(any(
target_os = "windows",
target_os = "linux",
target_os = "freebsd",
target_os = "openbsd"
))]
#[class(type)]
#[error("window is null")]
NullWindow,
#[cfg(any(
target_os = "linux",
target_os = "freebsd",
target_os = "openbsd"
))]
#[class(type)]
#[error("display is null")]
NullDisplay,
#[cfg(target_os = "macos")]
#[class(type)]
#[error("ns_view is null")]
NSViewDisplay,
}
// TODO(@littledivy): This will extend `OffscreenCanvas` when we add it.
pub struct UnsafeWindowSurface {
pub id: wgpu_core::id::SurfaceId,
pub width: RefCell<u32>,
pub height: RefCell<u32>,
pub context: SameObject<GPUCanvasContext>,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for UnsafeWindowSurface {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"UnsafeWindowSurface"
}
}
#[op2]
impl UnsafeWindowSurface {
#[constructor]
#[cppgc]
fn new(
state: &mut OpState,
#[from_v8] options: UnsafeWindowSurfaceOptions,
) -> Result<UnsafeWindowSurface, ByowError> {
let instance = state
.try_borrow::<super::Instance>()
.ok_or(ByowError::WebGPUNotInitiated)?;
// Security note:
//
// The `window_handle` and `display_handle` options are pointers to
// platform-specific window handles.
//
// The code below works under the assumption that:
//
// - handles can only be created by the FFI interface which
// enforces --allow-ffi.
//
// - `*const c_void` deserizalizes null and v8::External.
//
// - Only FFI can export v8::External to user code.
if options.window_handle.is_null() {
return Err(ByowError::InvalidParameters);
}
let (win_handle, display_handle) = raw_window(
options.system,
options.window_handle,
options.display_handle,
)?;
// SAFETY: see above comment
let id = unsafe {
instance
.instance_create_surface(display_handle, win_handle, None)
.map_err(ByowError::CreateSurface)?
};
Ok(UnsafeWindowSurface {
id,
width: RefCell::new(options.width),
height: RefCell::new(options.height),
context: SameObject::new(),
})
}
#[global]
fn get_context(
&self,
#[this] this: v8::Global<v8::Object>,
scope: &mut v8::PinScope<'_, '_>,
) -> v8::Global<v8::Object> {
self.context.get(scope, |_| GPUCanvasContext {
surface_id: self.id,
width: self.width.clone(),
height: self.height.clone(),
config: RefCell::new(None),
texture: RefCell::new(v8::TracedReference::empty()),
canvas: this,
})
}
#[nofast]
fn present(
&self,
scope: &mut v8::PinScope<'_, '_>,
) -> Result<(), JsErrorBox> {
let Some(context) = self.context.try_unwrap(scope) else {
return Err(JsErrorBox::type_error("getContext was never called"));
};
context.present(scope).map_err(JsErrorBox::from_err)
}
#[fast]
fn resize(&self, width: u32, height: u32, scope: &mut v8::PinScope<'_, '_>) {
self.width.replace(width);
self.height.replace(height);
let Some(context) = self.context.try_unwrap(scope) else {
return;
};
context.resize_configure(width, height);
}
}
struct UnsafeWindowSurfaceOptions {
system: UnsafeWindowSurfaceSystem,
window_handle: *const c_void,
display_handle: *const c_void,
width: u32,
height: u32,
}
#[derive(Eq, PartialEq)]
enum UnsafeWindowSurfaceSystem {
Cocoa,
Win32,
X11,
Wayland,
}
impl<'a> FromV8<'a> for UnsafeWindowSurfaceOptions {
type Error = JsErrorBox;
fn from_v8(
scope: &mut v8::PinScope<'a, '_>,
value: Local<'a, Value>,
) -> Result<Self, Self::Error> {
let obj = value
.try_cast::<v8::Object>()
.map_err(|_| JsErrorBox::type_error("is not an object"))?;
let key = v8::String::new(scope, "system").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'system'"))?;
let s = String::from_v8(scope, val).unwrap();
let system = match s.as_str() {
"cocoa" => UnsafeWindowSurfaceSystem::Cocoa,
"win32" => UnsafeWindowSurfaceSystem::Win32,
"x11" => UnsafeWindowSurfaceSystem::X11,
"wayland" => UnsafeWindowSurfaceSystem::Wayland,
_ => {
return Err(JsErrorBox::type_error(format!(
"Invalid system kind '{s}'"
)));
}
};
let key = v8::String::new(scope, "windowHandle").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'windowHandle'"))?;
let Some(window_handle) = deno_core::_ops::to_external_option(&val) else {
return Err(JsErrorBox::type_error("expected external"));
};
let key = v8::String::new(scope, "displayHandle").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'displayHandle'"))?;
let Some(display_handle) = deno_core::_ops::to_external_option(&val) else {
return Err(JsErrorBox::type_error("expected external"));
};
let key = v8::String::new(scope, "width").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'width'"))?;
let width = deno_core::convert::Number::<u32>::from_v8(scope, val)
.map_err(JsErrorBox::from_err)?
.0;
let key = v8::String::new(scope, "height").unwrap();
let val = obj
.get(scope, key.into())
.ok_or_else(|| JsErrorBox::type_error("missing field 'height'"))?;
let height = deno_core::convert::Number::<u32>::from_v8(scope, val)
.map_err(JsErrorBox::from_err)?
.0;
Ok(Self {
system,
window_handle,
display_handle,
width,
height,
})
}
}
type RawHandles = (
raw_window_handle::RawWindowHandle,
raw_window_handle::RawDisplayHandle,
);
#[cfg(target_os = "macos")]
fn raw_window(
system: UnsafeWindowSurfaceSystem,
_ns_window: *const c_void,
ns_view: *const c_void,
) -> Result<RawHandles, ByowError> {
if system != UnsafeWindowSurfaceSystem::Cocoa {
return Err(ByowError::InvalidSystem);
}
let win_handle = raw_window_handle::RawWindowHandle::AppKit(
raw_window_handle::AppKitWindowHandle::new(
NonNull::new(ns_view as *mut c_void).ok_or(ByowError::NSViewDisplay)?,
),
);
let display_handle = raw_window_handle::RawDisplayHandle::AppKit(
raw_window_handle::AppKitDisplayHandle::new(),
);
Ok((win_handle, display_handle))
}
#[cfg(target_os = "windows")]
fn raw_window(
system: UnsafeWindowSurfaceSystem,
window: *const c_void,
hinstance: *const c_void,
) -> Result<RawHandles, ByowError> {
use raw_window_handle::WindowsDisplayHandle;
if system != UnsafeWindowSurfaceSystem::Win32 {
return Err(ByowError::InvalidSystem);
}
let win_handle = {
let mut handle = raw_window_handle::Win32WindowHandle::new(
std::num::NonZeroIsize::new(window as isize)
.ok_or(ByowError::NullWindow)?,
);
handle.hinstance = std::num::NonZeroIsize::new(hinstance as isize);
raw_window_handle::RawWindowHandle::Win32(handle)
};
let display_handle =
raw_window_handle::RawDisplayHandle::Windows(WindowsDisplayHandle::new());
Ok((win_handle, display_handle))
}
#[cfg(any(target_os = "linux", target_os = "freebsd", target_os = "openbsd"))]
fn raw_window(
system: UnsafeWindowSurfaceSystem,
window: *const c_void,
display: *const c_void,
) -> Result<RawHandles, ByowError> {
let (win_handle, display_handle);
if system == UnsafeWindowSurfaceSystem::X11 {
win_handle = raw_window_handle::RawWindowHandle::Xlib(
raw_window_handle::XlibWindowHandle::new(window as *mut c_void as _),
);
display_handle = raw_window_handle::RawDisplayHandle::Xlib(
raw_window_handle::XlibDisplayHandle::new(
NonNull::new(display as *mut c_void),
0,
),
);
} else if system == UnsafeWindowSurfaceSystem::Wayland {
win_handle = raw_window_handle::RawWindowHandle::Wayland(
raw_window_handle::WaylandWindowHandle::new(
NonNull::new(window as *mut c_void).ok_or(ByowError::NullWindow)?,
),
);
display_handle = raw_window_handle::RawDisplayHandle::Wayland(
raw_window_handle::WaylandDisplayHandle::new(
NonNull::new(display as *mut c_void).ok_or(ByowError::NullDisplay)?,
),
);
} else {
return Err(ByowError::InvalidSystem);
}
Ok((win_handle, display_handle))
}
#[cfg(not(any(
target_os = "macos",
target_os = "windows",
target_os = "linux",
target_os = "freebsd",
target_os = "openbsd",
)))]
fn raw_window(
_system: UnsafeWindowSurfaceSystem,
_window: *const c_void,
_display: *const c_void,
) -> Result<RawHandles, ByowError> {
Err(ByowError::Unsupported)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/texture.rs | ext/webgpu/texture.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::OnceLock;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_error::JsErrorBox;
use wgpu_types::AstcBlock;
use wgpu_types::AstcChannel;
use wgpu_types::Extent3d;
use wgpu_types::TextureAspect;
use wgpu_types::TextureDimension;
use wgpu_types::TextureFormat;
use wgpu_types::TextureViewDimension;
use crate::Instance;
use crate::error::GPUGenericError;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTextureDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub size: super::webidl::GPUExtent3D,
#[webidl(default = 1)]
#[options(enforce_range = true)]
pub mip_level_count: u32,
#[webidl(default = 1)]
#[options(enforce_range = true)]
pub sample_count: u32,
#[webidl(default = GPUTextureDimension::D2)]
pub dimension: GPUTextureDimension,
pub format: GPUTextureFormat,
#[options(enforce_range = true)]
pub usage: u32,
#[webidl(default = vec![])]
pub view_formats: Vec<GPUTextureFormat>,
}
pub struct GPUTexture {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub id: wgpu_core::id::TextureId,
// needed by deno
pub device_id: wgpu_core::id::DeviceId,
// needed by deno
pub queue_id: wgpu_core::id::QueueId,
pub default_view_id: OnceLock<wgpu_core::id::TextureViewId>,
pub label: String,
pub size: Extent3d,
pub mip_level_count: u32,
pub sample_count: u32,
pub dimension: GPUTextureDimension,
pub format: GPUTextureFormat,
pub usage: u32,
}
impl GPUTexture {
pub(crate) fn default_view_id(&self) -> wgpu_core::id::TextureViewId {
*self.default_view_id.get_or_init(|| {
let (id, err) =
self
.instance
.texture_create_view(self.id, &Default::default(), None);
if let Some(err) = err {
use wgpu_types::error::WebGpuError;
assert_ne!(
err.webgpu_error_type(),
wgpu_types::error::ErrorType::Validation,
concat!(
"getting default view for a texture ",
"caused a validation error (!?)"
)
);
self.error_handler.push_error(Some(err));
}
id
})
}
}
impl Drop for GPUTexture {
fn drop(&mut self) {
if let Some(id) = self.default_view_id.take() {
self.instance.texture_view_drop(id).unwrap();
}
self.instance.texture_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUTexture {
const NAME: &'static str = "GPUTexture";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUTexture {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUTexture"
}
}
#[op2]
impl GPUTexture {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUTexture, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[getter]
fn width(&self) -> u32 {
self.size.width
}
#[getter]
fn height(&self) -> u32 {
self.size.height
}
#[getter]
fn depth_or_array_layers(&self) -> u32 {
self.size.depth_or_array_layers
}
#[getter]
fn mip_level_count(&self) -> u32 {
self.mip_level_count
}
#[getter]
fn sample_count(&self) -> u32 {
self.sample_count
}
#[getter]
#[string]
fn dimension(&self) -> &'static str {
self.dimension.as_str()
}
#[getter]
#[string]
fn format(&self) -> &'static str {
self.format.as_str()
}
#[getter]
fn usage(&self) -> u32 {
self.usage
}
#[fast]
#[undefined]
fn destroy(&self) {
self.instance.texture_destroy(self.id);
}
#[cppgc]
fn create_view(
&self,
#[webidl] descriptor: GPUTextureViewDescriptor,
) -> Result<GPUTextureView, JsErrorBox> {
let wgpu_descriptor = wgpu_core::resource::TextureViewDescriptor {
label: crate::transform_label(descriptor.label.clone()),
format: descriptor.format.map(Into::into),
dimension: descriptor.dimension.map(Into::into),
usage: Some(
wgpu_types::TextureUsages::from_bits(descriptor.usage)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?,
),
range: wgpu_types::ImageSubresourceRange {
aspect: descriptor.aspect.into(),
base_mip_level: descriptor.base_mip_level,
mip_level_count: descriptor.mip_level_count,
base_array_layer: descriptor.base_array_layer,
array_layer_count: descriptor.array_layer_count,
},
};
let (id, err) =
self
.instance
.texture_create_view(self.id, &wgpu_descriptor, None);
self.error_handler.push_error(err);
Ok(GPUTextureView {
instance: self.instance.clone(),
id,
label: descriptor.label,
})
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
struct GPUTextureViewDescriptor {
#[webidl(default = String::new())]
label: String,
format: Option<GPUTextureFormat>,
dimension: Option<GPUTextureViewDimension>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
usage: u32,
#[webidl(default = GPUTextureAspect::All)]
aspect: GPUTextureAspect,
#[webidl(default = 0)]
#[options(enforce_range = true)]
base_mip_level: u32,
#[options(enforce_range = true)]
mip_level_count: Option<u32>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
base_array_layer: u32,
#[options(enforce_range = true)]
array_layer_count: Option<u32>,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUTextureViewDimension {
#[webidl(rename = "1d")]
D1,
#[webidl(rename = "2d")]
D2,
#[webidl(rename = "2d-array")]
D2Array,
#[webidl(rename = "cube")]
Cube,
#[webidl(rename = "cube-array")]
CubeArray,
#[webidl(rename = "3d")]
D3,
}
impl From<GPUTextureViewDimension> for TextureViewDimension {
fn from(value: GPUTextureViewDimension) -> Self {
match value {
GPUTextureViewDimension::D1 => Self::D1,
GPUTextureViewDimension::D2 => Self::D2,
GPUTextureViewDimension::D3 => Self::D3,
GPUTextureViewDimension::D2Array => Self::D2Array,
GPUTextureViewDimension::Cube => Self::Cube,
GPUTextureViewDimension::CubeArray => Self::CubeArray,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub enum GPUTextureAspect {
All,
StencilOnly,
DepthOnly,
}
impl From<GPUTextureAspect> for TextureAspect {
fn from(value: GPUTextureAspect) -> Self {
match value {
GPUTextureAspect::All => Self::All,
GPUTextureAspect::StencilOnly => Self::StencilOnly,
GPUTextureAspect::DepthOnly => Self::DepthOnly,
}
}
}
pub struct GPUTextureView {
pub instance: Instance,
pub id: wgpu_core::id::TextureViewId,
pub label: String,
}
impl Drop for GPUTextureView {
fn drop(&mut self) {
let _ = self.instance.texture_view_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUTextureView {
const NAME: &'static str = "GPUTextureView";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUTextureView {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUTextureView"
}
}
// TODO(@crowlKats): weakref in texture for view
#[op2]
impl GPUTextureView {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUTextureView, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL, Clone)]
#[webidl(enum)]
pub enum GPUTextureDimension {
#[webidl(rename = "1d")]
D1,
#[webidl(rename = "2d")]
D2,
#[webidl(rename = "3d")]
D3,
}
impl From<GPUTextureDimension> for TextureDimension {
fn from(value: GPUTextureDimension) -> Self {
match value {
GPUTextureDimension::D1 => Self::D1,
GPUTextureDimension::D2 => Self::D2,
GPUTextureDimension::D3 => Self::D3,
}
}
}
#[derive(WebIDL, Clone)]
#[webidl(enum)]
pub enum GPUTextureFormat {
#[webidl(rename = "r8unorm")]
R8unorm,
#[webidl(rename = "r8snorm")]
R8snorm,
#[webidl(rename = "r8uint")]
R8uint,
#[webidl(rename = "r8sint")]
R8sint,
#[webidl(rename = "r16uint")]
R16uint,
#[webidl(rename = "r16sint")]
R16sint,
#[webidl(rename = "r16float")]
R16float,
#[webidl(rename = "rg8unorm")]
Rg8unorm,
#[webidl(rename = "rg8snorm")]
Rg8snorm,
#[webidl(rename = "rg8uint")]
Rg8uint,
#[webidl(rename = "rg8sint")]
Rg8sint,
#[webidl(rename = "r32uint")]
R32uint,
#[webidl(rename = "r32sint")]
R32sint,
#[webidl(rename = "r32float")]
R32float,
#[webidl(rename = "rg16uint")]
Rg16uint,
#[webidl(rename = "rg16sint")]
Rg16sint,
#[webidl(rename = "rg16float")]
Rg16float,
#[webidl(rename = "rgba8unorm")]
Rgba8unorm,
#[webidl(rename = "rgba8unorm-srgb")]
Rgba8unormSrgb,
#[webidl(rename = "rgba8snorm")]
Rgba8snorm,
#[webidl(rename = "rgba8uint")]
Rgba8uint,
#[webidl(rename = "rgba8sint")]
Rgba8sint,
#[webidl(rename = "bgra8unorm")]
Bgra8unorm,
#[webidl(rename = "bgra8unorm-srgb")]
Bgra8unormSrgb,
#[webidl(rename = "rgb9e5ufloat")]
Rgb9e5ufloat,
#[webidl(rename = "rgb10a2uint")]
Rgb10a2uint,
#[webidl(rename = "rgb10a2unorm")]
Rgb10a2unorm,
#[webidl(rename = "rg11b10ufloat")]
Rg11b10ufloat,
#[webidl(rename = "rg32uint")]
Rg32uint,
#[webidl(rename = "rg32sint")]
Rg32sint,
#[webidl(rename = "rg32float")]
Rg32float,
#[webidl(rename = "rgba16uint")]
Rgba16uint,
#[webidl(rename = "rgba16sint")]
Rgba16sint,
#[webidl(rename = "rgba16float")]
Rgba16float,
#[webidl(rename = "rgba32uint")]
Rgba32uint,
#[webidl(rename = "rgba32sint")]
Rgba32sint,
#[webidl(rename = "rgba32float")]
Rgba32float,
#[webidl(rename = "stencil8")]
Stencil8,
#[webidl(rename = "depth16unorm")]
Depth16unorm,
#[webidl(rename = "depth24plus")]
Depth24plus,
#[webidl(rename = "depth24plus-stencil8")]
Depth24plusStencil8,
#[webidl(rename = "depth32float")]
Depth32float,
#[webidl(rename = "depth32float-stencil8")]
Depth32floatStencil8,
#[webidl(rename = "bc1-rgba-unorm")]
Bc1RgbaUnorm,
#[webidl(rename = "bc1-rgba-unorm-srgb")]
Bc1RgbaUnormSrgb,
#[webidl(rename = "bc2-rgba-unorm")]
Bc2RgbaUnorm,
#[webidl(rename = "bc2-rgba-unorm-srgb")]
Bc2RgbaUnormSrgb,
#[webidl(rename = "bc3-rgba-unorm")]
Bc3RgbaUnorm,
#[webidl(rename = "bc3-rgba-unorm-srgb")]
Bc3RgbaUnormSrgb,
#[webidl(rename = "bc4-r-unorm")]
Bc4RUnorm,
#[webidl(rename = "bc4-r-snorm")]
Bc4RSnorm,
#[webidl(rename = "bc5-rg-unorm")]
Bc5RgUnorm,
#[webidl(rename = "bc5-rg-snorm")]
Bc5RgSnorm,
#[webidl(rename = "bc6h-rgb-ufloat")]
Bc6hRgbUfloat,
#[webidl(rename = "bc6h-rgb-float")]
Bc6hRgbFloat,
#[webidl(rename = "bc7-rgba-unorm")]
Bc7RgbaUnorm,
#[webidl(rename = "bc7-rgba-unorm-srgb")]
Bc7RgbaUnormSrgb,
#[webidl(rename = "etc2-rgb8unorm")]
Etc2Rgb8unorm,
#[webidl(rename = "etc2-rgb8unorm-srgb")]
Etc2Rgb8unormSrgb,
#[webidl(rename = "etc2-rgb8a1unorm")]
Etc2Rgb8a1unorm,
#[webidl(rename = "etc2-rgb8a1unorm-srgb")]
Etc2Rgb8a1unormSrgb,
#[webidl(rename = "etc2-rgba8unorm")]
Etc2Rgba8unorm,
#[webidl(rename = "etc2-rgba8unorm-srgb")]
Etc2Rgba8unormSrgb,
#[webidl(rename = "eac-r11unorm")]
EacR11unorm,
#[webidl(rename = "eac-r11snorm")]
EacR11snorm,
#[webidl(rename = "eac-rg11unorm")]
EacRg11unorm,
#[webidl(rename = "eac-rg11snorm")]
EacRg11snorm,
#[webidl(rename = "astc-4x4-unorm")]
Astc4x4Unorm,
#[webidl(rename = "astc-4x4-unorm-srgb")]
Astc4x4UnormSrgb,
#[webidl(rename = "astc-5x4-unorm")]
Astc5x4Unorm,
#[webidl(rename = "astc-5x4-unorm-srgb")]
Astc5x4UnormSrgb,
#[webidl(rename = "astc-5x5-unorm")]
Astc5x5Unorm,
#[webidl(rename = "astc-5x5-unorm-srgb")]
Astc5x5UnormSrgb,
#[webidl(rename = "astc-6x5-unorm")]
Astc6x5Unorm,
#[webidl(rename = "astc-6x5-unorm-srgb")]
Astc6x5UnormSrgb,
#[webidl(rename = "astc-6x6-unorm")]
Astc6x6Unorm,
#[webidl(rename = "astc-6x6-unorm-srgb")]
Astc6x6UnormSrgb,
#[webidl(rename = "astc-8x5-unorm")]
Astc8x5Unorm,
#[webidl(rename = "astc-8x5-unorm-srgb")]
Astc8x5UnormSrgb,
#[webidl(rename = "astc-8x6-unorm")]
Astc8x6Unorm,
#[webidl(rename = "astc-8x6-unorm-srgb")]
Astc8x6UnormSrgb,
#[webidl(rename = "astc-8x8-unorm")]
Astc8x8Unorm,
#[webidl(rename = "astc-8x8-unorm-srgb")]
Astc8x8UnormSrgb,
#[webidl(rename = "astc-10x5-unorm")]
Astc10x5Unorm,
#[webidl(rename = "astc-10x5-unorm-srgb")]
Astc10x5UnormSrgb,
#[webidl(rename = "astc-10x6-unorm")]
Astc10x6Unorm,
#[webidl(rename = "astc-10x6-unorm-srgb")]
Astc10x6UnormSrgb,
#[webidl(rename = "astc-10x8-unorm")]
Astc10x8Unorm,
#[webidl(rename = "astc-10x8-unorm-srgb")]
Astc10x8UnormSrgb,
#[webidl(rename = "astc-10x10-unorm")]
Astc10x10Unorm,
#[webidl(rename = "astc-10x10-unorm-srgb")]
Astc10x10UnormSrgb,
#[webidl(rename = "astc-12x10-unorm")]
Astc12x10Unorm,
#[webidl(rename = "astc-12x10-unorm-srgb")]
Astc12x10UnormSrgb,
#[webidl(rename = "astc-12x12-unorm")]
Astc12x12Unorm,
#[webidl(rename = "astc-12x12-unorm-srgb")]
Astc12x12UnormSrgb,
}
impl From<GPUTextureFormat> for TextureFormat {
fn from(value: GPUTextureFormat) -> Self {
match value {
GPUTextureFormat::R8unorm => Self::R8Unorm,
GPUTextureFormat::R8snorm => Self::R8Snorm,
GPUTextureFormat::R8uint => Self::R8Uint,
GPUTextureFormat::R8sint => Self::R8Sint,
GPUTextureFormat::R16uint => Self::R16Uint,
GPUTextureFormat::R16sint => Self::R16Sint,
GPUTextureFormat::R16float => Self::R16Float,
GPUTextureFormat::Rg8unorm => Self::Rg8Unorm,
GPUTextureFormat::Rg8snorm => Self::Rg8Snorm,
GPUTextureFormat::Rg8uint => Self::Rg8Uint,
GPUTextureFormat::Rg8sint => Self::Rg8Sint,
GPUTextureFormat::R32uint => Self::R32Uint,
GPUTextureFormat::R32sint => Self::R32Sint,
GPUTextureFormat::R32float => Self::R32Float,
GPUTextureFormat::Rg16uint => Self::Rg16Uint,
GPUTextureFormat::Rg16sint => Self::Rg16Sint,
GPUTextureFormat::Rg16float => Self::Rg16Float,
GPUTextureFormat::Rgba8unorm => Self::Rgba8Unorm,
GPUTextureFormat::Rgba8unormSrgb => Self::Rgba8UnormSrgb,
GPUTextureFormat::Rgba8snorm => Self::Rgba8Snorm,
GPUTextureFormat::Rgba8uint => Self::Rgba8Uint,
GPUTextureFormat::Rgba8sint => Self::Rgba8Sint,
GPUTextureFormat::Bgra8unorm => Self::Bgra8Unorm,
GPUTextureFormat::Bgra8unormSrgb => Self::Bgra8UnormSrgb,
GPUTextureFormat::Rgb9e5ufloat => Self::Rgb9e5Ufloat,
GPUTextureFormat::Rgb10a2uint => Self::Rgb10a2Uint,
GPUTextureFormat::Rgb10a2unorm => Self::Rgb10a2Unorm,
GPUTextureFormat::Rg11b10ufloat => Self::Rg11b10Ufloat,
GPUTextureFormat::Rg32uint => Self::Rg32Uint,
GPUTextureFormat::Rg32sint => Self::Rg32Sint,
GPUTextureFormat::Rg32float => Self::Rg32Float,
GPUTextureFormat::Rgba16uint => Self::Rgba16Uint,
GPUTextureFormat::Rgba16sint => Self::Rgba16Sint,
GPUTextureFormat::Rgba16float => Self::Rgba16Float,
GPUTextureFormat::Rgba32uint => Self::Rgba32Uint,
GPUTextureFormat::Rgba32sint => Self::Rgba32Sint,
GPUTextureFormat::Rgba32float => Self::Rgba32Float,
GPUTextureFormat::Stencil8 => Self::Stencil8,
GPUTextureFormat::Depth16unorm => Self::Depth16Unorm,
GPUTextureFormat::Depth24plus => Self::Depth24Plus,
GPUTextureFormat::Depth24plusStencil8 => Self::Depth24PlusStencil8,
GPUTextureFormat::Depth32float => Self::Depth32Float,
GPUTextureFormat::Depth32floatStencil8 => Self::Depth32FloatStencil8,
GPUTextureFormat::Bc1RgbaUnorm => Self::Bc1RgbaUnorm,
GPUTextureFormat::Bc1RgbaUnormSrgb => Self::Bc1RgbaUnormSrgb,
GPUTextureFormat::Bc2RgbaUnorm => Self::Bc2RgbaUnorm,
GPUTextureFormat::Bc2RgbaUnormSrgb => Self::Bc2RgbaUnormSrgb,
GPUTextureFormat::Bc3RgbaUnorm => Self::Bc3RgbaUnorm,
GPUTextureFormat::Bc3RgbaUnormSrgb => Self::Bc3RgbaUnormSrgb,
GPUTextureFormat::Bc4RUnorm => Self::Bc4RUnorm,
GPUTextureFormat::Bc4RSnorm => Self::Bc4RSnorm,
GPUTextureFormat::Bc5RgUnorm => Self::Bc5RgUnorm,
GPUTextureFormat::Bc5RgSnorm => Self::Bc5RgSnorm,
GPUTextureFormat::Bc6hRgbUfloat => Self::Bc6hRgbUfloat,
GPUTextureFormat::Bc6hRgbFloat => Self::Bc6hRgbFloat,
GPUTextureFormat::Bc7RgbaUnorm => Self::Bc7RgbaUnorm,
GPUTextureFormat::Bc7RgbaUnormSrgb => Self::Bc7RgbaUnormSrgb,
GPUTextureFormat::Etc2Rgb8unorm => Self::Etc2Rgb8Unorm,
GPUTextureFormat::Etc2Rgb8unormSrgb => Self::Etc2Rgb8UnormSrgb,
GPUTextureFormat::Etc2Rgb8a1unorm => Self::Etc2Rgb8A1Unorm,
GPUTextureFormat::Etc2Rgb8a1unormSrgb => Self::Etc2Rgb8A1UnormSrgb,
GPUTextureFormat::Etc2Rgba8unorm => Self::Etc2Rgba8Unorm,
GPUTextureFormat::Etc2Rgba8unormSrgb => Self::Etc2Rgba8UnormSrgb,
GPUTextureFormat::EacR11unorm => Self::EacR11Unorm,
GPUTextureFormat::EacR11snorm => Self::EacR11Snorm,
GPUTextureFormat::EacRg11unorm => Self::EacRg11Unorm,
GPUTextureFormat::EacRg11snorm => Self::EacRg11Snorm,
GPUTextureFormat::Astc4x4Unorm => Self::Astc {
block: AstcBlock::B4x4,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc4x4UnormSrgb => Self::Astc {
block: AstcBlock::B4x4,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc5x4Unorm => Self::Astc {
block: AstcBlock::B5x4,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc5x4UnormSrgb => Self::Astc {
block: AstcBlock::B5x4,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc5x5Unorm => Self::Astc {
block: AstcBlock::B5x5,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc5x5UnormSrgb => Self::Astc {
block: AstcBlock::B5x5,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc6x5Unorm => Self::Astc {
block: AstcBlock::B6x5,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc6x5UnormSrgb => Self::Astc {
block: AstcBlock::B6x5,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc6x6Unorm => Self::Astc {
block: AstcBlock::B6x6,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc6x6UnormSrgb => Self::Astc {
block: AstcBlock::B6x6,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc8x5Unorm => Self::Astc {
block: AstcBlock::B8x5,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc8x5UnormSrgb => Self::Astc {
block: AstcBlock::B8x5,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc8x6Unorm => Self::Astc {
block: AstcBlock::B8x6,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc8x6UnormSrgb => Self::Astc {
block: AstcBlock::B8x6,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc8x8Unorm => Self::Astc {
block: AstcBlock::B8x8,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc8x8UnormSrgb => Self::Astc {
block: AstcBlock::B8x8,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc10x5Unorm => Self::Astc {
block: AstcBlock::B10x5,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc10x5UnormSrgb => Self::Astc {
block: AstcBlock::B10x5,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc10x6Unorm => Self::Astc {
block: AstcBlock::B10x6,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc10x6UnormSrgb => Self::Astc {
block: AstcBlock::B10x6,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc10x8Unorm => Self::Astc {
block: AstcBlock::B10x8,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc10x8UnormSrgb => Self::Astc {
block: AstcBlock::B10x8,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc10x10Unorm => Self::Astc {
block: AstcBlock::B10x10,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc10x10UnormSrgb => Self::Astc {
block: AstcBlock::B10x10,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc12x10Unorm => Self::Astc {
block: AstcBlock::B12x10,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc12x10UnormSrgb => Self::Astc {
block: AstcBlock::B12x10,
channel: AstcChannel::UnormSrgb,
},
GPUTextureFormat::Astc12x12Unorm => Self::Astc {
block: AstcBlock::B12x12,
channel: AstcChannel::Unorm,
},
GPUTextureFormat::Astc12x12UnormSrgb => Self::Astc {
block: AstcBlock::B12x12,
channel: AstcChannel::UnormSrgb,
},
}
}
}
pub struct GPUExternalTexture {}
impl WebIdlInterfaceConverter for GPUExternalTexture {
const NAME: &'static str = "GPUExternalTexture";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUExternalTexture {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUExternalTexture"
}
}
#[op2]
impl GPUExternalTexture {}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/compute_pipeline.rs | ext/webgpu/compute_pipeline.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use indexmap::IndexMap;
use crate::Instance;
use crate::bind_group_layout::GPUBindGroupLayout;
use crate::error::GPUGenericError;
use crate::shader::GPUShaderModule;
use crate::webidl::GPUPipelineLayoutOrGPUAutoLayoutMode;
pub struct GPUComputePipeline {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub id: wgpu_core::id::ComputePipelineId,
pub label: String,
}
impl Drop for GPUComputePipeline {
fn drop(&mut self) {
self.instance.compute_pipeline_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUComputePipeline {
const NAME: &'static str = "GPUComputePipeline";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUComputePipeline {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUComputePipeline"
}
}
#[op2]
impl GPUComputePipeline {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUComputePipeline, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[cppgc]
fn get_bind_group_layout(&self, #[webidl] index: u32) -> GPUBindGroupLayout {
let (id, err) = self
.instance
.compute_pipeline_get_bind_group_layout(self.id, index, None);
self.error_handler.push_error(err);
// TODO(wgpu): needs to support retrieving the label
GPUBindGroupLayout {
instance: self.instance.clone(),
id,
label: "".to_string(),
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUComputePipelineDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub compute: GPUProgrammableStage,
pub layout: GPUPipelineLayoutOrGPUAutoLayoutMode,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUProgrammableStage {
pub module: Ref<GPUShaderModule>,
pub entry_point: Option<String>,
#[webidl(default = Default::default())]
pub constants: IndexMap<String, f64>,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/surface.rs | ext/webgpu/surface.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use deno_core::_ops::make_cppgc_object;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Member;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::v8;
use deno_core::v8::TracedReference;
use deno_core::v8::cppgc::Visitor;
use deno_error::JsErrorBox;
use wgpu_types::SurfaceStatus;
use crate::device::GPUDevice;
use crate::error::GPUGenericError;
use crate::texture::GPUTexture;
use crate::texture::GPUTextureFormat;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum SurfaceError {
#[class("DOMExceptionInvalidStateError")]
#[error("Context is not configured")]
UnconfiguredContext,
#[class(generic)]
#[error("Invalid Surface Status")]
InvalidStatus,
#[class(generic)]
#[error(transparent)]
Surface(#[from] wgpu_core::present::SurfaceError),
}
pub struct Configuration {
pub device: Member<GPUDevice>,
pub usage: u32,
pub format: GPUTextureFormat,
pub surface_config:
wgpu_types::SurfaceConfiguration<Vec<wgpu_types::TextureFormat>>,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for Configuration {
fn trace(&self, visitor: &mut Visitor) {
visitor.trace(&self.device);
}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUCanvasContextConfiguration"
}
}
pub struct GPUCanvasContext {
pub surface_id: wgpu_core::id::SurfaceId,
pub width: RefCell<u32>,
pub height: RefCell<u32>,
pub config: RefCell<Option<Configuration>>,
pub texture: RefCell<TracedReference<v8::Object>>,
pub canvas: v8::Global<v8::Object>,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUCanvasContext {
fn trace(&self, visitor: &mut Visitor) {
if let Some(config) = &*self.config.borrow() {
config.trace(visitor);
}
visitor.trace(&*self.texture.borrow());
}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUCanvasContext"
}
}
#[op2]
impl GPUCanvasContext {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUCanvasContext, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[global]
fn canvas(&self) -> v8::Global<v8::Object> {
self.canvas.clone()
}
#[undefined]
fn configure(
&self,
#[webidl] configuration: GPUCanvasConfiguration,
) -> Result<(), JsErrorBox> {
let usage = wgpu_types::TextureUsages::from_bits(configuration.usage)
.ok_or_else(|| JsErrorBox::type_error("usage is not valid"))?;
let format = configuration.format.clone().into();
let conf = wgpu_types::SurfaceConfiguration {
usage,
format,
width: *self.width.borrow(),
height: *self.height.borrow(),
present_mode: configuration
.present_mode
.map(Into::into)
.unwrap_or_default(),
alpha_mode: configuration.alpha_mode.into(),
view_formats: configuration
.view_formats
.into_iter()
.map(Into::into)
.collect(),
desired_maximum_frame_latency: 2,
};
let device = configuration.device;
let err =
device
.instance
.surface_configure(self.surface_id, device.id, &conf);
device.error_handler.push_error(err);
self.config.borrow_mut().replace(Configuration {
device: device.into(),
usage: configuration.usage,
format: configuration.format,
surface_config: conf,
});
Ok(())
}
#[fast]
#[undefined]
fn unconfigure(&self) {
*self.config.borrow_mut() = None;
}
fn get_current_texture<'s>(
&self,
scope: &mut v8::PinScope<'s, '_>,
) -> Result<v8::Local<'s, v8::Object>, SurfaceError> {
let config = self.config.borrow();
let Some(config) = config.as_ref() else {
return Err(SurfaceError::UnconfiguredContext);
};
{
if let Some(obj) = self.texture.borrow().get(scope) {
return Ok(obj);
}
}
let output = config
.device
.instance
.surface_get_current_texture(self.surface_id, None)?;
match output.status {
SurfaceStatus::Good | SurfaceStatus::Suboptimal => {
let id = output.texture.unwrap();
let texture = GPUTexture {
instance: config.device.instance.clone(),
error_handler: config.device.error_handler.clone(),
id,
device_id: config.device.id,
queue_id: config.device.queue,
default_view_id: Default::default(),
label: "".to_string(),
size: wgpu_types::Extent3d {
width: *self.width.borrow(),
height: *self.height.borrow(),
depth_or_array_layers: 1,
},
mip_level_count: 0,
sample_count: 0,
dimension: crate::texture::GPUTextureDimension::D2,
format: config.format.clone(),
usage: config.usage,
};
let obj = make_cppgc_object(scope, texture);
self.texture.borrow_mut().reset(scope, Some(obj));
Ok(obj)
}
_ => Err(SurfaceError::InvalidStatus),
}
}
}
impl GPUCanvasContext {
pub fn present(
&self,
scope: &mut v8::PinScope<'_, '_>,
) -> Result<(), SurfaceError> {
let config = self.config.borrow();
let Some(config) = config.as_ref() else {
return Err(SurfaceError::UnconfiguredContext);
};
config.device.instance.surface_present(self.surface_id)?;
// next `get_current_texture` call would get a new texture
self.texture.borrow_mut().reset(scope, None);
Ok(())
}
pub fn resize_configure(&self, width: u32, height: u32) {
self.width.replace(width);
self.height.replace(height);
let mut config = self.config.borrow_mut();
let Some(config) = &mut *config else {
return;
};
config.surface_config.width = width;
config.surface_config.height = height;
let err = config.device.instance.surface_configure(
self.surface_id,
config.device.id,
&config.surface_config,
);
config.device.error_handler.push_error(err);
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
struct GPUCanvasConfiguration {
device: Ref<GPUDevice>,
format: GPUTextureFormat,
#[webidl(default = wgpu_types::TextureUsages::RENDER_ATTACHMENT.bits())]
#[options(enforce_range = true)]
usage: u32,
#[webidl(default = GPUCanvasAlphaMode::Opaque)]
alpha_mode: GPUCanvasAlphaMode,
// Extended from spec
present_mode: Option<GPUPresentMode>,
#[webidl(default = vec![])]
view_formats: Vec<GPUTextureFormat>,
}
#[derive(WebIDL)]
#[webidl(enum)]
enum GPUCanvasAlphaMode {
Opaque,
Premultiplied,
}
impl From<GPUCanvasAlphaMode> for wgpu_types::CompositeAlphaMode {
fn from(value: GPUCanvasAlphaMode) -> Self {
match value {
GPUCanvasAlphaMode::Opaque => Self::Opaque,
GPUCanvasAlphaMode::Premultiplied => Self::PreMultiplied,
}
}
}
// Extended from spec
#[derive(WebIDL)]
#[webidl(enum)]
enum GPUPresentMode {
#[webidl(rename = "autoVsync")]
AutoVsync,
#[webidl(rename = "autoNoVsync")]
AutoNoVsync,
#[webidl(rename = "fifo")]
Fifo,
#[webidl(rename = "fifoRelaxed")]
FifoRelaxed,
#[webidl(rename = "immediate")]
Immediate,
#[webidl(rename = "mailbox")]
Mailbox,
}
impl From<GPUPresentMode> for wgpu_types::PresentMode {
fn from(value: GPUPresentMode) -> Self {
match value {
GPUPresentMode::AutoVsync => Self::AutoVsync,
GPUPresentMode::AutoNoVsync => Self::AutoNoVsync,
GPUPresentMode::Fifo => Self::Fifo,
GPUPresentMode::FifoRelaxed => Self::FifoRelaxed,
GPUPresentMode::Immediate => Self::Immediate,
GPUPresentMode::Mailbox => Self::Mailbox,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/query_set.rs | ext/webgpu/query_set.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_error::JsErrorBox;
use crate::Instance;
use crate::error::GPUGenericError;
pub struct GPUQuerySet {
pub instance: Instance,
pub id: wgpu_core::id::QuerySetId,
pub r#type: GPUQueryType,
pub count: u32,
pub label: String,
}
impl Drop for GPUQuerySet {
fn drop(&mut self) {
self.instance.query_set_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUQuerySet {
const NAME: &'static str = "GPUQuerySet";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUQuerySet {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUQuerySet"
}
}
#[op2]
impl GPUQuerySet {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUQuerySet, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[fast]
#[undefined]
fn destroy(&self) -> Result<(), JsErrorBox> {
// TODO(https://github.com/gfx-rs/wgpu/issues/6495): Destroy the query
// set. Until that is supported, it is okay to do nothing here, the
// query set will be garbage collected and dropped eventually.
Ok(())
}
#[getter]
#[string]
#[rename("type")]
fn r#type(&self) -> &'static str {
self.r#type.as_str()
}
#[getter]
fn count(&self) -> u32 {
self.count
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUQuerySetDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub r#type: GPUQueryType,
#[options(enforce_range = true)]
pub count: u32,
}
#[derive(WebIDL, Clone)]
#[webidl(enum)]
pub(crate) enum GPUQueryType {
Occlusion,
Timestamp,
}
impl From<GPUQueryType> for wgpu_types::QueryType {
fn from(value: GPUQueryType) -> Self {
match value {
GPUQueryType::Occlusion => Self::Occlusion,
GPUQueryType::Timestamp => Self::Timestamp,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/command_buffer.rs | ext/webgpu/command_buffer.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::op2;
use crate::Instance;
use crate::error::GPUGenericError;
pub struct GPUCommandBuffer {
pub instance: Instance,
pub id: wgpu_core::id::CommandBufferId,
pub label: String,
}
impl Drop for GPUCommandBuffer {
fn drop(&mut self) {
self.instance.command_buffer_drop(self.id);
}
}
impl deno_core::webidl::WebIdlInterfaceConverter for GPUCommandBuffer {
const NAME: &'static str = "GPUCommandBuffer";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUCommandBuffer {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUCommandBuffer"
}
}
#[op2]
impl GPUCommandBuffer {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUCommandBuffer, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUCommandBufferDescriptor {
#[webidl(default = String::new())]
pub label: String,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/adapter.rs | ext/webgpu/adapter.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#[allow(clippy::disallowed_types)]
use std::collections::HashSet;
use std::rc::Rc;
use deno_core::GarbageCollected;
use deno_core::OpState;
use deno_core::V8TaskSpawner;
use deno_core::WebIDL;
use deno_core::cppgc::SameObject;
use deno_core::op2;
use deno_core::v8;
use super::device::GPUDevice;
use crate::Instance;
use crate::error::GPUGenericError;
use crate::webidl::GPUFeatureName;
use crate::webidl::features_to_feature_names;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURequestAdapterOptions {
pub power_preference: Option<GPUPowerPreference>,
#[webidl(default = false)]
pub force_fallback_adapter: bool,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUPowerPreference {
LowPower,
HighPerformance,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
struct GPUDeviceDescriptor {
#[webidl(default = String::new())]
label: String,
#[webidl(default = vec![])]
required_features: Vec<GPUFeatureName>,
#[webidl(default = Default::default())]
#[options(enforce_range = true)]
required_limits: indexmap::IndexMap<String, Option<u64>>,
}
pub struct GPUAdapter {
pub instance: Instance,
pub id: wgpu_core::id::AdapterId,
pub features: SameObject<GPUSupportedFeatures>,
pub limits: SameObject<GPUSupportedLimits>,
pub info: Rc<SameObject<GPUAdapterInfo>>,
}
impl Drop for GPUAdapter {
fn drop(&mut self) {
self.instance.adapter_drop(self.id);
}
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUAdapter {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUAdapter"
}
}
#[op2]
impl GPUAdapter {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUAdapter, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[global]
fn info(&self, scope: &mut v8::PinScope<'_, '_>) -> v8::Global<v8::Object> {
self.info.get(scope, |_| {
let info = self.instance.adapter_get_info(self.id);
GPUAdapterInfo { info }
})
}
#[getter]
#[global]
fn features(
&self,
scope: &mut v8::PinScope<'_, '_>,
) -> v8::Global<v8::Object> {
self.features.get(scope, |scope| {
let features = self.instance.adapter_features(self.id);
// Only expose WebGPU features, not wgpu native-only features
let features = features & wgpu_types::Features::all_webgpu_mask();
let features = features_to_feature_names(features);
GPUSupportedFeatures::new(scope, features)
})
}
#[getter]
#[global]
fn limits(&self, scope: &mut v8::PinScope<'_, '_>) -> v8::Global<v8::Object> {
self.limits.get(scope, |_| {
let adapter_limits = self.instance.adapter_limits(self.id);
GPUSupportedLimits(adapter_limits)
})
}
#[async_method(fake)]
#[global]
fn request_device(
&self,
state: &mut OpState,
scope: &mut v8::PinScope<'_, '_>,
#[webidl] descriptor: GPUDeviceDescriptor,
) -> Result<v8::Global<v8::Value>, CreateDeviceError> {
let features = self.instance.adapter_features(self.id);
let supported_features = features_to_feature_names(features);
#[allow(clippy::disallowed_types)]
let required_features = descriptor
.required_features
.iter()
.cloned()
.collect::<HashSet<_>>();
if !required_features.is_subset(&supported_features) {
return Err(CreateDeviceError::RequiredFeaturesNotASubset);
}
// When support for compatibility mode is added, this will need to look
// at whether the adapter is "compatibility-defaulting" or
// "core-defaulting", and choose the appropriate set of defaults.
//
// Support for compatibility mode is tracked in
// https://github.com/gfx-rs/wgpu/issues/8124.
let required_limits = serde_json::from_value::<wgpu_types::Limits>(
serde_json::to_value(descriptor.required_limits)?,
)?
.or_better_values_from(&wgpu_types::Limits::default());
let trace = std::env::var_os("DENO_WEBGPU_TRACE")
.map(|path| wgpu_types::Trace::Directory(std::path::PathBuf::from(path)))
.unwrap_or_default();
let wgpu_descriptor = wgpu_types::DeviceDescriptor {
label: crate::transform_label(descriptor.label.clone()),
required_features: super::webidl::feature_names_to_features(
descriptor.required_features,
),
required_limits,
experimental_features: wgpu_types::ExperimentalFeatures::disabled(),
memory_hints: Default::default(),
trace,
};
let (device, queue) = self.instance.adapter_request_device(
self.id,
&wgpu_descriptor,
None,
None,
)?;
let spawner = state.borrow::<V8TaskSpawner>().clone();
let lost_resolver = v8::PromiseResolver::new(scope).unwrap();
let lost_promise = lost_resolver.get_promise(scope);
let device = GPUDevice {
instance: self.instance.clone(),
id: device,
queue,
label: descriptor.label,
queue_obj: SameObject::new(),
adapter_info: self.info.clone(),
error_handler: Rc::new(super::error::DeviceErrorHandler::new(
v8::Global::new(scope, lost_resolver),
spawner,
)),
adapter: self.id,
lost_promise: v8::Global::new(scope, lost_promise),
limits: SameObject::new(),
features: SameObject::new(),
has_active_capture: std::cell::RefCell::new(false),
};
let device = deno_core::cppgc::make_cppgc_object(scope, device);
let weak_device = v8::Weak::new(scope, device);
let event_target_setup = state.borrow::<crate::EventTargetSetup>();
let webidl_brand = v8::Local::new(scope, event_target_setup.brand.clone());
device.set(scope, webidl_brand, webidl_brand);
let set_event_target_data =
v8::Local::new(scope, event_target_setup.set_event_target_data.clone())
.cast::<v8::Function>();
let null = v8::null(scope);
set_event_target_data.call(scope, null.into(), &[device.into()]);
// Now that the device is fully constructed, give the error handler a
// weak reference to it.
let device = device.cast::<v8::Value>();
deno_core::cppgc::try_unwrap_cppgc_object::<GPUDevice>(scope, device)
.unwrap()
.error_handler
.set_device(weak_device);
Ok(v8::Global::new(scope, device))
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CreateDeviceError {
#[class(type)]
#[error("requiredFeatures must be a subset of the adapter features")]
RequiredFeaturesNotASubset,
#[class(inherit)]
#[error(transparent)]
Serde(#[from] serde_json::Error),
#[class("DOMExceptionOperationError")]
#[error(transparent)]
Device(#[from] wgpu_core::instance::RequestDeviceError),
}
pub struct GPUSupportedLimits(pub wgpu_types::Limits);
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUSupportedLimits {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUSupportedLimits"
}
}
#[op2]
impl GPUSupportedLimits {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUSupportedLimits, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
fn maxTextureDimension1D(&self) -> u32 {
self.0.max_texture_dimension_1d
}
#[getter]
fn maxTextureDimension2D(&self) -> u32 {
self.0.max_texture_dimension_2d
}
#[getter]
fn maxTextureDimension3D(&self) -> u32 {
self.0.max_texture_dimension_3d
}
#[getter]
fn maxTextureArrayLayers(&self) -> u32 {
self.0.max_texture_array_layers
}
#[getter]
fn maxBindGroups(&self) -> u32 {
self.0.max_bind_groups
}
// TODO(@crowlKats): support max_bind_groups_plus_vertex_buffers
#[getter]
fn maxBindingsPerBindGroup(&self) -> u32 {
self.0.max_bindings_per_bind_group
}
#[getter]
fn maxDynamicUniformBuffersPerPipelineLayout(&self) -> u32 {
self.0.max_dynamic_uniform_buffers_per_pipeline_layout
}
#[getter]
fn maxDynamicStorageBuffersPerPipelineLayout(&self) -> u32 {
self.0.max_dynamic_storage_buffers_per_pipeline_layout
}
#[getter]
fn maxSampledTexturesPerShaderStage(&self) -> u32 {
self.0.max_sampled_textures_per_shader_stage
}
#[getter]
fn maxSamplersPerShaderStage(&self) -> u32 {
self.0.max_samplers_per_shader_stage
}
#[getter]
fn maxStorageBuffersPerShaderStage(&self) -> u32 {
self.0.max_storage_buffers_per_shader_stage
}
#[getter]
fn maxStorageTexturesPerShaderStage(&self) -> u32 {
self.0.max_storage_textures_per_shader_stage
}
#[getter]
fn maxUniformBuffersPerShaderStage(&self) -> u32 {
self.0.max_uniform_buffers_per_shader_stage
}
#[getter]
fn maxUniformBufferBindingSize(&self) -> u32 {
self.0.max_uniform_buffer_binding_size
}
#[getter]
fn maxStorageBufferBindingSize(&self) -> u32 {
self.0.max_storage_buffer_binding_size
}
#[getter]
fn minUniformBufferOffsetAlignment(&self) -> u32 {
self.0.min_uniform_buffer_offset_alignment
}
#[getter]
fn minStorageBufferOffsetAlignment(&self) -> u32 {
self.0.min_storage_buffer_offset_alignment
}
#[getter]
fn maxVertexBuffers(&self) -> u32 {
self.0.max_vertex_buffers
}
#[getter]
#[number]
fn maxBufferSize(&self) -> u64 {
self.0.max_buffer_size
}
#[getter]
fn maxVertexAttributes(&self) -> u32 {
self.0.max_vertex_attributes
}
#[getter]
fn maxVertexBufferArrayStride(&self) -> u32 {
self.0.max_vertex_buffer_array_stride
}
// TODO(@crowlKats): support max_inter_stage_shader_variables
#[getter]
fn maxColorAttachments(&self) -> u32 {
self.0.max_color_attachments
}
#[getter]
fn maxColorAttachmentBytesPerSample(&self) -> u32 {
self.0.max_color_attachment_bytes_per_sample
}
#[getter]
fn maxComputeWorkgroupStorageSize(&self) -> u32 {
self.0.max_compute_workgroup_storage_size
}
#[getter]
fn maxComputeInvocationsPerWorkgroup(&self) -> u32 {
self.0.max_compute_invocations_per_workgroup
}
#[getter]
fn maxComputeWorkgroupSizeX(&self) -> u32 {
self.0.max_compute_workgroup_size_x
}
#[getter]
fn maxComputeWorkgroupSizeY(&self) -> u32 {
self.0.max_compute_workgroup_size_y
}
#[getter]
fn maxComputeWorkgroupSizeZ(&self) -> u32 {
self.0.max_compute_workgroup_size_z
}
#[getter]
fn maxComputeWorkgroupsPerDimension(&self) -> u32 {
self.0.max_compute_workgroups_per_dimension
}
}
pub struct GPUSupportedFeatures(v8::Global<v8::Value>);
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUSupportedFeatures {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUSupportedFeatures"
}
}
impl GPUSupportedFeatures {
#[allow(clippy::disallowed_types)]
pub fn new(
scope: &mut v8::PinScope<'_, '_>,
features: HashSet<GPUFeatureName>,
) -> Self {
let set = v8::Set::new(scope);
for feature in features {
let key = v8::String::new(scope, feature.as_str()).unwrap();
set.add(scope, key.into());
}
Self(v8::Global::new(scope, <v8::Local<v8::Value>>::from(set)))
}
}
#[op2]
impl GPUSupportedFeatures {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUSupportedFeatures, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[global]
#[symbol("setlike_set")]
fn set(&self) -> v8::Global<v8::Value> {
self.0.clone()
}
}
pub struct GPUAdapterInfo {
pub info: wgpu_types::AdapterInfo,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUAdapterInfo {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUAdapterInfo"
}
}
#[op2]
impl GPUAdapterInfo {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUAdapterInfo, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn vendor(&self) -> String {
self.info.vendor.to_string()
}
#[getter]
#[string]
fn architecture(&self) -> &'static str {
"" // TODO: wgpu#2170
}
#[getter]
#[string]
fn device(&self) -> String {
self.info.device.to_string()
}
#[getter]
#[string]
fn description(&self) -> String {
self.info.name.clone()
}
#[getter]
fn subgroup_min_size(&self) -> u32 {
self.info.subgroup_min_size
}
#[getter]
fn subgroup_max_size(&self) -> u32 {
self.info.subgroup_max_size
}
#[getter]
fn is_fallback_adapter(&self) -> bool {
// TODO(lucacasonato): report correctly from wgpu
false
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/error.rs | ext/webgpu/error.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::fmt::Display;
use std::fmt::Formatter;
use std::sync::Mutex;
use std::sync::OnceLock;
use deno_core::JsRuntime;
use deno_core::V8TaskSpawner;
use deno_core::cppgc::make_cppgc_object;
use deno_core::v8;
use wgpu_core::binding_model::CreateBindGroupError;
use wgpu_core::binding_model::CreateBindGroupLayoutError;
use wgpu_core::binding_model::CreatePipelineLayoutError;
use wgpu_core::binding_model::GetBindGroupLayoutError;
use wgpu_core::command::ClearError;
use wgpu_core::command::CommandEncoderError;
use wgpu_core::command::ComputePassError;
use wgpu_core::command::CreateRenderBundleError;
use wgpu_core::command::EncoderStateError;
use wgpu_core::command::PassStateError;
use wgpu_core::command::QueryError;
use wgpu_core::command::RenderBundleError;
use wgpu_core::command::RenderPassError;
use wgpu_core::device::DeviceError;
use wgpu_core::device::queue::QueueSubmitError;
use wgpu_core::device::queue::QueueWriteError;
use wgpu_core::pipeline::CreateComputePipelineError;
use wgpu_core::pipeline::CreateRenderPipelineError;
use wgpu_core::pipeline::CreateShaderModuleError;
use wgpu_core::present::ConfigureSurfaceError;
use wgpu_core::resource::BufferAccessError;
use wgpu_core::resource::CreateBufferError;
use wgpu_core::resource::CreateQuerySetError;
use wgpu_core::resource::CreateSamplerError;
use wgpu_core::resource::CreateTextureError;
use wgpu_core::resource::CreateTextureViewError;
use wgpu_types::error::ErrorType;
use wgpu_types::error::WebGpuError;
use crate::device::GPUDeviceLostInfo;
use crate::device::GPUDeviceLostReason;
pub type ErrorHandler = std::rc::Rc<DeviceErrorHandler>;
pub struct DeviceErrorHandler {
pub is_lost: OnceLock<()>,
pub scopes: Mutex<Vec<(GPUErrorFilter, Vec<GPUError>)>>,
lost_resolver: Mutex<Option<v8::Global<v8::PromiseResolver>>>,
spawner: V8TaskSpawner,
// The error handler is constructed before the device. A weak
// reference to the device is placed here with `set_device`
// after the device is constructed.
device: OnceLock<v8::Weak<v8::Object>>,
}
impl DeviceErrorHandler {
pub fn new(
lost_resolver: v8::Global<v8::PromiseResolver>,
spawner: V8TaskSpawner,
) -> Self {
Self {
is_lost: Default::default(),
scopes: Mutex::new(vec![]),
lost_resolver: Mutex::new(Some(lost_resolver)),
device: OnceLock::new(),
spawner,
}
}
pub fn set_device(&self, device: v8::Weak<v8::Object>) {
self.device.set(device).unwrap()
}
pub fn push_error<E: Into<GPUError>>(&self, err: Option<E>) {
let Some(err) = err else {
return;
};
if self.is_lost.get().is_some() {
return;
}
let err = err.into();
if let GPUError::Lost(reason) = err {
let _ = self.is_lost.set(());
if let Some(resolver) = self.lost_resolver.lock().unwrap().take() {
self.spawner.spawn(move |scope| {
let resolver = v8::Local::new(scope, resolver);
let info = make_cppgc_object(scope, GPUDeviceLostInfo { reason });
let info = v8::Local::new(scope, info);
resolver.resolve(scope, info.into());
});
}
return;
}
let error_filter = match err {
GPUError::Lost(_) => unreachable!(),
GPUError::Validation(_) => GPUErrorFilter::Validation,
GPUError::OutOfMemory => GPUErrorFilter::OutOfMemory,
GPUError::Internal => GPUErrorFilter::Internal,
};
let mut scopes = self.scopes.lock().unwrap();
let scope = scopes
.iter_mut()
.rfind(|(filter, _)| filter == &error_filter);
if let Some(scope) = scope {
scope.1.push(err);
} else {
let device = self
.device
.get()
.expect("set_device was not called")
.clone();
self.spawner.spawn(move |scope| {
let state = JsRuntime::op_state_from(&*scope);
let Some(device) = device.to_local(scope) else {
// The device has already gone away, so we don't have
// anywhere to report the error.
return;
};
let key = v8::String::new(scope, "dispatchEvent").unwrap();
let val = device.get(scope, key.into()).unwrap();
let func =
v8::Global::new(scope, val.try_cast::<v8::Function>().unwrap());
let device = v8::Global::new(scope, device.cast::<v8::Value>());
let error_event_class =
state.borrow().borrow::<crate::ErrorEventClass>().0.clone();
let error = deno_core::error::to_v8_error(scope, &err);
let error_event_class =
v8::Local::new(scope, error_event_class.clone());
let constructor =
v8::Local::<v8::Function>::try_from(error_event_class).unwrap();
let kind = v8::String::new(scope, "uncapturederror").unwrap();
let obj = v8::Object::new(scope);
let key = v8::String::new(scope, "error").unwrap();
obj.set(scope, key.into(), error);
let event = constructor
.new_instance(scope, &[kind.into(), obj.into()])
.unwrap();
let recv = v8::Local::new(scope, device);
func.open(scope).call(scope, recv, &[event.into()]);
});
}
}
}
#[derive(deno_core::WebIDL, Eq, PartialEq)]
#[webidl(enum)]
pub enum GPUErrorFilter {
Validation,
OutOfMemory,
Internal,
}
#[derive(Debug, deno_error::JsError)]
pub enum GPUError {
// TODO(@crowlKats): consider adding an unreachable value that uses unreachable!()
#[class("UNREACHABLE")]
Lost(GPUDeviceLostReason),
#[class("GPUValidationError")]
Validation(String),
#[class("GPUOutOfMemoryError")]
OutOfMemory,
#[class("GPUInternalError")]
Internal,
}
impl Display for GPUError {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
GPUError::Lost(_) => Ok(()),
GPUError::Validation(s) => f.write_str(s),
GPUError::OutOfMemory => f.write_str("not enough memory left"),
GPUError::Internal => Ok(()),
}
}
}
impl std::error::Error for GPUError {}
impl GPUError {
fn from_webgpu(e: impl WebGpuError) -> Self {
match e.webgpu_error_type() {
ErrorType::Internal => GPUError::Internal,
ErrorType::DeviceLost => GPUError::Lost(GPUDeviceLostReason::Unknown), // TODO: this variant should be ignored, register the lost callback instead.
ErrorType::OutOfMemory => GPUError::OutOfMemory,
ErrorType::Validation => GPUError::Validation(fmt_err(&e)),
}
}
}
fn fmt_err(err: &(dyn std::error::Error + 'static)) -> String {
let mut output = err.to_string();
let mut e = err.source();
while let Some(source) = e {
output.push_str(&format!(": {source}"));
e = source.source();
}
if output.is_empty() {
output.push_str("validation error");
}
output
}
impl From<EncoderStateError> for GPUError {
fn from(err: EncoderStateError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<PassStateError> for GPUError {
fn from(err: PassStateError) -> Self {
GPUError::Validation(fmt_err(&err))
}
}
impl From<CreateBufferError> for GPUError {
fn from(err: CreateBufferError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<DeviceError> for GPUError {
fn from(err: DeviceError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<BufferAccessError> for GPUError {
fn from(err: BufferAccessError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateBindGroupLayoutError> for GPUError {
fn from(err: CreateBindGroupLayoutError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreatePipelineLayoutError> for GPUError {
fn from(err: CreatePipelineLayoutError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateBindGroupError> for GPUError {
fn from(err: CreateBindGroupError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<RenderBundleError> for GPUError {
fn from(err: RenderBundleError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateRenderBundleError> for GPUError {
fn from(err: CreateRenderBundleError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CommandEncoderError> for GPUError {
fn from(err: CommandEncoderError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<QueryError> for GPUError {
fn from(err: QueryError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<ComputePassError> for GPUError {
fn from(err: ComputePassError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateComputePipelineError> for GPUError {
fn from(err: CreateComputePipelineError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<GetBindGroupLayoutError> for GPUError {
fn from(err: GetBindGroupLayoutError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateRenderPipelineError> for GPUError {
fn from(err: CreateRenderPipelineError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<RenderPassError> for GPUError {
fn from(err: RenderPassError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateSamplerError> for GPUError {
fn from(err: CreateSamplerError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateShaderModuleError> for GPUError {
fn from(err: CreateShaderModuleError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateTextureError> for GPUError {
fn from(err: CreateTextureError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateTextureViewError> for GPUError {
fn from(err: CreateTextureViewError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<CreateQuerySetError> for GPUError {
fn from(err: CreateQuerySetError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<QueueSubmitError> for GPUError {
fn from(err: QueueSubmitError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<QueueWriteError> for GPUError {
fn from(err: QueueWriteError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<ClearError> for GPUError {
fn from(err: ClearError) -> Self {
GPUError::from_webgpu(err)
}
}
impl From<ConfigureSurfaceError> for GPUError {
fn from(err: ConfigureSurfaceError) -> Self {
GPUError::from_webgpu(err)
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum GPUGenericError {
#[class(type)]
#[error("Illegal constructor")]
InvalidConstructor,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/render_pass.rs | ext/webgpu/render_pass.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::num::NonZeroU64;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::v8;
use deno_core::v8::Local;
use deno_core::v8::Value;
use deno_core::webidl::ContextFn;
use deno_core::webidl::IntOptions;
use deno_core::webidl::Nullable;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use crate::Instance;
use crate::buffer::GPUBuffer;
use crate::error::GPUGenericError;
use crate::render_bundle::GPURenderBundle;
use crate::texture::GPUTexture;
use crate::texture::GPUTextureView;
use crate::webidl::GPUColor;
pub struct GPURenderPassEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub render_pass: RefCell<wgpu_core::command::RenderPass>,
pub label: String,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPURenderPassEncoder {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPURenderPassEncoder"
}
}
#[op2]
impl GPURenderPassEncoder {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPURenderPassEncoder, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[required(6)]
#[undefined]
fn set_viewport(
&self,
#[webidl] x: f32,
#[webidl] y: f32,
#[webidl] width: f32,
#[webidl] height: f32,
#[webidl] min_depth: f32,
#[webidl] max_depth: f32,
) {
let err = self
.instance
.render_pass_set_viewport(
&mut self.render_pass.borrow_mut(),
x,
y,
width,
height,
min_depth,
max_depth,
)
.err();
self.error_handler.push_error(err);
}
#[required(4)]
#[undefined]
fn set_scissor_rect(
&self,
#[webidl(options(enforce_range = true))] x: u32,
#[webidl(options(enforce_range = true))] y: u32,
#[webidl(options(enforce_range = true))] width: u32,
#[webidl(options(enforce_range = true))] height: u32,
) {
let err = self
.instance
.render_pass_set_scissor_rect(
&mut self.render_pass.borrow_mut(),
x,
y,
width,
height,
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn set_blend_constant(&self, #[webidl] color: GPUColor) {
let err = self
.instance
.render_pass_set_blend_constant(
&mut self.render_pass.borrow_mut(),
color.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn set_stencil_reference(
&self,
#[webidl(options(enforce_range = true))] reference: u32,
) {
let err = self
.instance
.render_pass_set_stencil_reference(
&mut self.render_pass.borrow_mut(),
reference,
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn begin_occlusion_query(
&self,
#[webidl(options(enforce_range = true))] query_index: u32,
) {
let err = self
.instance
.render_pass_begin_occlusion_query(
&mut self.render_pass.borrow_mut(),
query_index,
)
.err();
self.error_handler.push_error(err);
}
#[fast]
#[undefined]
fn end_occlusion_query(&self) {
let err = self
.instance
.render_pass_end_occlusion_query(&mut self.render_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn execute_bundles(&self, #[webidl] bundles: Vec<Ref<GPURenderBundle>>) {
let err = self
.instance
.render_pass_execute_bundles(
&mut self.render_pass.borrow_mut(),
&bundles
.into_iter()
.map(|bundle| bundle.id)
.collect::<Vec<_>>(),
)
.err();
self.error_handler.push_error(err);
}
#[fast]
#[undefined]
fn end(&self) {
let err = self
.instance
.render_pass_end(&mut self.render_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn push_debug_group(&self, #[webidl] group_label: String) {
let err = self
.instance
.render_pass_push_debug_group(
&mut self.render_pass.borrow_mut(),
&group_label,
0, // wgpu#975
)
.err();
self.error_handler.push_error(err);
}
#[fast]
#[undefined]
fn pop_debug_group(&self) {
let err = self
.instance
.render_pass_pop_debug_group(&mut self.render_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn insert_debug_marker(&self, #[webidl] marker_label: String) {
let err = self
.instance
.render_pass_insert_debug_marker(
&mut self.render_pass.borrow_mut(),
&marker_label,
0, // wgpu#975
)
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn set_bind_group<'a>(
&self,
scope: &mut v8::PinScope<'a, '_>,
#[webidl(options(enforce_range = true))] index: u32,
#[webidl] bind_group: Nullable<Ref<crate::bind_group::GPUBindGroup>>,
dynamic_offsets: v8::Local<'a, v8::Value>,
dynamic_offsets_data_start: v8::Local<'a, v8::Value>,
dynamic_offsets_data_length: v8::Local<'a, v8::Value>,
) -> Result<(), WebIdlError> {
const PREFIX: &str =
"Failed to execute 'setBindGroup' on 'GPUComputePassEncoder'";
let err = if let Ok(uint_32) = dynamic_offsets.try_cast::<v8::Uint32Array>()
{
let start = u64::convert(
scope,
dynamic_offsets_data_start,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 4")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let len = u32::convert(
scope,
dynamic_offsets_data_length,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 5")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let ab = uint_32.buffer(scope).unwrap();
let ptr = ab.data().unwrap();
let ab_len = ab.byte_length() / 4;
// SAFETY: created from an array buffer, slice is dropped at end of function call
let data =
unsafe { std::slice::from_raw_parts(ptr.as_ptr() as _, ab_len) };
let offsets = &data[start..(start + len)];
self
.instance
.render_pass_set_bind_group(
&mut self.render_pass.borrow_mut(),
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets,
)
.err()
} else {
let offsets = <Option<Vec<u32>>>::convert(
scope,
dynamic_offsets,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 3")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?
.unwrap_or_default();
self
.instance
.render_pass_set_bind_group(
&mut self.render_pass.borrow_mut(),
index,
bind_group.into_option().map(|bind_group| bind_group.id),
&offsets,
)
.err()
};
self.error_handler.push_error(err);
Ok(())
}
#[undefined]
fn set_pipeline(
&self,
#[webidl] pipeline: Ref<crate::render_pipeline::GPURenderPipeline>,
) {
let err = self
.instance
.render_pass_set_pipeline(&mut self.render_pass.borrow_mut(), pipeline.id)
.err();
self.error_handler.push_error(err);
}
#[required(2)]
#[undefined]
fn set_index_buffer(
&self,
#[webidl] buffer: Ref<GPUBuffer>,
#[webidl] index_format: crate::render_pipeline::GPUIndexFormat,
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) {
let err = self
.instance
.render_pass_set_index_buffer(
&mut self.render_pass.borrow_mut(),
buffer.id,
index_format.into(),
offset,
size.and_then(NonZeroU64::new),
)
.err();
self.error_handler.push_error(err);
}
#[required(2)]
#[undefined]
fn set_vertex_buffer(
&self,
#[webidl(options(enforce_range = true))] slot: u32,
#[webidl] buffer: Ref<GPUBuffer>, // TODO(wgpu): support nullable buffer
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) {
let err = self
.instance
.render_pass_set_vertex_buffer(
&mut self.render_pass.borrow_mut(),
slot,
buffer.id,
offset,
size.and_then(NonZeroU64::new),
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn draw(
&self,
#[webidl(options(enforce_range = true))] vertex_count: u32,
#[webidl(default = 1, options(enforce_range = true))] instance_count: u32,
#[webidl(default = 0, options(enforce_range = true))] first_vertex: u32,
#[webidl(default = 0, options(enforce_range = true))] first_instance: u32,
) {
let err = self
.instance
.render_pass_draw(
&mut self.render_pass.borrow_mut(),
vertex_count,
instance_count,
first_vertex,
first_instance,
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn draw_indexed(
&self,
#[webidl(options(enforce_range = true))] index_count: u32,
#[webidl(default = 1, options(enforce_range = true))] instance_count: u32,
#[webidl(default = 0, options(enforce_range = true))] first_index: u32,
#[webidl(default = 0, options(enforce_range = true))] base_vertex: i32,
#[webidl(default = 0, options(enforce_range = true))] first_instance: u32,
) {
let err = self
.instance
.render_pass_draw_indexed(
&mut self.render_pass.borrow_mut(),
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
)
.err();
self.error_handler.push_error(err);
}
#[required(2)]
#[undefined]
fn draw_indirect(
&self,
#[webidl] indirect_buffer: Ref<GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) {
let err = self
.instance
.render_pass_draw_indirect(
&mut self.render_pass.borrow_mut(),
indirect_buffer.id,
indirect_offset,
)
.err();
self.error_handler.push_error(err);
}
#[required(2)]
#[undefined]
fn draw_indexed_indirect(
&self,
#[webidl] indirect_buffer: Ref<GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) {
let err = self
.instance
.render_pass_draw_indexed_indirect(
&mut self.render_pass.borrow_mut(),
indirect_buffer.id,
indirect_offset,
)
.err();
self.error_handler.push_error(err);
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderPassDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub color_attachments: Vec<Nullable<GPURenderPassColorAttachment>>,
pub depth_stencil_attachment: Option<GPURenderPassDepthStencilAttachment>,
pub occlusion_query_set: Option<Ref<crate::query_set::GPUQuerySet>>,
pub timestamp_writes: Option<GPURenderPassTimestampWrites>,
/*#[webidl(default = 50000000)]
#[options(enforce_range = true)]
pub max_draw_count: u64,*/
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub multiview_mask: u32,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderPassColorAttachment {
pub view: GPUTextureOrView,
#[options(enforce_range = true)]
pub depth_slice: Option<u32>,
pub resolve_target: Option<GPUTextureOrView>,
pub clear_value: Option<GPUColor>,
pub load_op: GPULoadOp,
pub store_op: GPUStoreOp,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPULoadOp {
Load,
Clear,
}
impl GPULoadOp {
pub fn with_default_value<V: Default>(
self,
val: Option<V>,
) -> wgpu_core::command::LoadOp<V> {
match self {
GPULoadOp::Load => wgpu_core::command::LoadOp::Load,
GPULoadOp::Clear => {
wgpu_core::command::LoadOp::Clear(val.unwrap_or_default())
}
}
}
pub fn with_value<V>(self, val: V) -> wgpu_core::command::LoadOp<V> {
match self {
GPULoadOp::Load => wgpu_core::command::LoadOp::Load,
GPULoadOp::Clear => wgpu_core::command::LoadOp::Clear(val),
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUStoreOp {
Store,
Discard,
}
impl From<GPUStoreOp> for wgpu_core::command::StoreOp {
fn from(value: GPUStoreOp) -> Self {
match value {
GPUStoreOp::Store => Self::Store,
GPUStoreOp::Discard => Self::Discard,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderPassDepthStencilAttachment {
pub view: GPUTextureOrView,
pub depth_clear_value: Option<f32>,
pub depth_load_op: Option<GPULoadOp>,
pub depth_store_op: Option<GPUStoreOp>,
#[webidl(default = false)]
pub depth_read_only: bool,
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub stencil_clear_value: u32,
pub stencil_load_op: Option<GPULoadOp>,
pub stencil_store_op: Option<GPUStoreOp>,
#[webidl(default = false)]
pub stencil_read_only: bool,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderPassTimestampWrites {
pub query_set: Ref<crate::query_set::GPUQuerySet>,
#[options(enforce_range = true)]
pub beginning_of_pass_write_index: Option<u32>,
#[options(enforce_range = true)]
pub end_of_pass_write_index: Option<u32>,
}
pub(crate) enum GPUTextureOrView {
Texture(Ref<GPUTexture>),
TextureView(Ref<GPUTextureView>),
}
impl GPUTextureOrView {
pub(crate) fn to_view_id(&self) -> wgpu_core::id::TextureViewId {
match self {
Self::Texture(texture) => texture.default_view_id(),
Self::TextureView(texture_view) => texture_view.id,
}
}
}
impl<'a> WebIdlConverter<'a> for GPUTextureOrView {
type Options = ();
fn convert<'b>(
scope: &mut v8::PinScope<'a, '_>,
value: Local<'a, Value>,
prefix: Cow<'static, str>,
context: ContextFn<'b>,
options: &Self::Options,
) -> Result<Self, WebIdlError> {
<Ref<GPUTexture>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::Texture)
.or_else(|_| {
<Ref<GPUTextureView>>::convert(
scope,
value,
prefix.clone(),
context.borrowed(),
options,
)
.map(Self::TextureView)
})
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/compute_pass.rs | ext/webgpu/compute_pass.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::IntOptions;
use deno_core::webidl::Nullable;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use crate::Instance;
use crate::error::GPUGenericError;
pub struct GPUComputePassEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub compute_pass: RefCell<wgpu_core::command::ComputePass>,
pub label: String,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUComputePassEncoder {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUComputePassEncoder"
}
}
#[op2]
impl GPUComputePassEncoder {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUComputePassEncoder, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[undefined]
fn set_pipeline(
&self,
#[webidl] pipeline: Ref<crate::compute_pipeline::GPUComputePipeline>,
) {
let err = self
.instance
.compute_pass_set_pipeline(
&mut self.compute_pass.borrow_mut(),
pipeline.id,
)
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn dispatch_workgroups(
&self,
#[webidl(options(enforce_range = true))] work_group_count_x: u32,
#[webidl(default = 1, options(enforce_range = true))]
work_group_count_y: u32,
#[webidl(default = 1, options(enforce_range = true))]
work_group_count_z: u32,
) {
let err = self
.instance
.compute_pass_dispatch_workgroups(
&mut self.compute_pass.borrow_mut(),
work_group_count_x,
work_group_count_y,
work_group_count_z,
)
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn dispatch_workgroups_indirect(
&self,
#[webidl] indirect_buffer: Ref<crate::buffer::GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) {
let err = self
.instance
.compute_pass_dispatch_workgroups_indirect(
&mut self.compute_pass.borrow_mut(),
indirect_buffer.id,
indirect_offset,
)
.err();
self.error_handler.push_error(err);
}
#[fast]
#[undefined]
fn end(&self) {
let err = self
.instance
.compute_pass_end(&mut self.compute_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn push_debug_group(&self, #[webidl] group_label: String) {
let err = self
.instance
.compute_pass_push_debug_group(
&mut self.compute_pass.borrow_mut(),
&group_label,
0, // wgpu#975
)
.err();
self.error_handler.push_error(err);
}
#[fast]
#[undefined]
fn pop_debug_group(&self) {
let err = self
.instance
.compute_pass_pop_debug_group(&mut self.compute_pass.borrow_mut())
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn insert_debug_marker(&self, #[webidl] marker_label: String) {
let err = self
.instance
.compute_pass_insert_debug_marker(
&mut self.compute_pass.borrow_mut(),
&marker_label,
0, // wgpu#975
)
.err();
self.error_handler.push_error(err);
}
#[undefined]
fn set_bind_group<'a>(
&self,
scope: &mut v8::PinScope<'a, '_>,
#[webidl(options(enforce_range = true))] index: u32,
#[webidl] bind_group: Nullable<Ref<crate::bind_group::GPUBindGroup>>,
dynamic_offsets: v8::Local<'a, v8::Value>,
dynamic_offsets_data_start: v8::Local<'a, v8::Value>,
dynamic_offsets_data_length: v8::Local<'a, v8::Value>,
) -> Result<(), WebIdlError> {
const PREFIX: &str =
"Failed to execute 'setBindGroup' on 'GPUComputePassEncoder'";
let err = if let Ok(uint_32) = dynamic_offsets.try_cast::<v8::Uint32Array>()
{
let start = u64::convert(
scope,
dynamic_offsets_data_start,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 4")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let len = u32::convert(
scope,
dynamic_offsets_data_length,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 5")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let ab = uint_32.buffer(scope).unwrap();
let ptr = ab.data().unwrap();
let ab_len = ab.byte_length() / 4;
// SAFETY: compute_pass_set_bind_group internally calls extend_from_slice with this slice
let data =
unsafe { std::slice::from_raw_parts(ptr.as_ptr() as _, ab_len) };
let offsets = &data[start..(start + len)];
self
.instance
.compute_pass_set_bind_group(
&mut self.compute_pass.borrow_mut(),
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets,
)
.err()
} else {
let offsets = <Option<Vec<u32>>>::convert(
scope,
dynamic_offsets,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 3")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?
.unwrap_or_default();
self
.instance
.compute_pass_set_bind_group(
&mut self.compute_pass.borrow_mut(),
index,
bind_group.into_option().map(|bind_group| bind_group.id),
&offsets,
)
.err()
};
self.error_handler.push_error(err);
Ok(())
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUComputePassDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub timestamp_writes: Option<GPUComputePassTimestampWrites>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUComputePassTimestampWrites {
pub query_set: Ref<crate::query_set::GPUQuerySet>,
#[options(enforce_range = true)]
pub beginning_of_pass_write_index: Option<u32>,
#[options(enforce_range = true)]
pub end_of_pass_write_index: Option<u32>,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/render_bundle.rs | ext/webgpu/render_bundle.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::num::NonZeroU64;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::IntOptions;
use deno_core::webidl::Nullable;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_error::JsErrorBox;
use crate::Instance;
use crate::buffer::GPUBuffer;
use crate::error::GPUGenericError;
use crate::texture::GPUTextureFormat;
fn c_string_truncated_at_first_nul<T: Into<Vec<u8>>>(
src: T,
) -> std::ffi::CString {
std::ffi::CString::new(src).unwrap_or_else(|err| {
let nul_pos = err.nul_position();
std::ffi::CString::new(err.into_vec().split_at(nul_pos).0).unwrap()
})
}
pub struct GPURenderBundleEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub encoder: RefCell<Option<wgpu_core::command::RenderBundleEncoder>>,
pub label: String,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPURenderBundleEncoder {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPURenderBundleEncoder"
}
}
#[op2]
impl GPURenderBundleEncoder {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPURenderBundleEncoder, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[cppgc]
fn finish(
&self,
#[webidl] descriptor: GPURenderBundleDescriptor,
) -> GPURenderBundle {
let wgpu_descriptor = wgpu_core::command::RenderBundleDescriptor {
label: crate::transform_label(descriptor.label.clone()),
};
let (id, err) = self.instance.render_bundle_encoder_finish(
self.encoder.borrow_mut().take().unwrap(),
&wgpu_descriptor,
None,
);
self.error_handler.push_error(err);
GPURenderBundle {
instance: self.instance.clone(),
id,
label: descriptor.label.clone(),
}
}
#[undefined]
fn push_debug_group(
&self,
#[webidl] group_label: String,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
let label = c_string_truncated_at_first_nul(group_label);
// SAFETY: the string the raw pointer points to lives longer than the below
// function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_push_debug_group(
encoder,
label.as_ptr(),
);
}
Ok(())
}
#[fast]
#[undefined]
fn pop_debug_group(&self) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_pop_debug_group(encoder);
Ok(())
}
#[undefined]
fn insert_debug_marker(
&self,
#[webidl] marker_label: String,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
let label = c_string_truncated_at_first_nul(marker_label);
// SAFETY: the string the raw pointer points to lives longer than the below
// function invocation.
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_insert_debug_marker(
encoder,
label.as_ptr(),
);
}
Ok(())
}
#[undefined]
fn set_bind_group<'a>(
&self,
scope: &mut v8::PinScope<'a, '_>,
#[webidl(options(enforce_range = true))] index: u32,
#[webidl] bind_group: Nullable<Ref<crate::bind_group::GPUBindGroup>>,
dynamic_offsets: v8::Local<'a, v8::Value>,
dynamic_offsets_data_start: v8::Local<'a, v8::Value>,
dynamic_offsets_data_length: v8::Local<'a, v8::Value>,
) -> Result<(), SetBindGroupError> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
const PREFIX: &str =
"Failed to execute 'setBindGroup' on 'GPUComputePassEncoder'";
if let Ok(uint_32) = dynamic_offsets.try_cast::<v8::Uint32Array>() {
let start = u64::convert(
scope,
dynamic_offsets_data_start,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 4")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let len = u32::convert(
scope,
dynamic_offsets_data_length,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 5")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)? as usize;
let ab = uint_32.buffer(scope).unwrap();
let ptr = ab.data().unwrap();
let ab_len = ab.byte_length() / 4;
// SAFETY: created from an array buffer, slice is dropped at end of function call
let data =
unsafe { std::slice::from_raw_parts(ptr.as_ptr() as _, ab_len) };
let offsets = &data[start..(start + len)];
// SAFETY: wgpu FFI call
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group(
encoder,
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets.as_ptr(),
offsets.len(),
);
}
} else {
let offsets = <Option<Vec<u32>>>::convert(
scope,
dynamic_offsets,
Cow::Borrowed(PREFIX),
(|| Cow::Borrowed("Argument 3")).into(),
&IntOptions {
clamp: false,
enforce_range: true,
},
)?
.unwrap_or_default();
// SAFETY: wgpu FFI call
unsafe {
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_bind_group(
encoder,
index,
bind_group.into_option().map(|bind_group| bind_group.id),
offsets.as_ptr(),
offsets.len(),
);
}
}
Ok(())
}
#[undefined]
fn set_pipeline(
&self,
#[webidl] pipeline: Ref<crate::render_pipeline::GPURenderPipeline>,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_pipeline(
encoder,
pipeline.id,
);
Ok(())
}
#[required(2)]
#[undefined]
fn set_index_buffer(
&self,
#[webidl] buffer: Ref<GPUBuffer>,
#[webidl] index_format: crate::render_pipeline::GPUIndexFormat,
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
encoder.set_index_buffer(
buffer.id,
index_format.into(),
offset,
size.and_then(NonZeroU64::new),
);
Ok(())
}
#[required(2)]
#[undefined]
fn set_vertex_buffer(
&self,
#[webidl(options(enforce_range = true))] slot: u32,
#[webidl] buffer: Ref<GPUBuffer>, // TODO(wgpu): support nullable buffer
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_set_vertex_buffer(
encoder,
slot,
buffer.id,
offset,
size.and_then(NonZeroU64::new),
);
Ok(())
}
#[required(1)]
#[undefined]
fn draw(
&self,
#[webidl(options(enforce_range = true))] vertex_count: u32,
#[webidl(default = 1, options(enforce_range = true))] instance_count: u32,
#[webidl(default = 0, options(enforce_range = true))] first_vertex: u32,
#[webidl(default = 0, options(enforce_range = true))] first_instance: u32,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw(
encoder,
vertex_count,
instance_count,
first_vertex,
first_instance,
);
Ok(())
}
#[required(1)]
#[undefined]
fn draw_indexed(
&self,
#[webidl(options(enforce_range = true))] index_count: u32,
#[webidl(default = 1, options(enforce_range = true))] instance_count: u32,
#[webidl(default = 0, options(enforce_range = true))] first_index: u32,
#[webidl(default = 0, options(enforce_range = true))] base_vertex: i32,
#[webidl(default = 0, options(enforce_range = true))] first_instance: u32,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indexed(
encoder,
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
);
Ok(())
}
#[required(2)]
#[undefined]
fn draw_indirect(
&self,
#[webidl] indirect_buffer: Ref<GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indirect(
encoder,
indirect_buffer.id,
indirect_offset,
);
Ok(())
}
#[required(2)]
#[undefined]
fn draw_indexed_indirect(
&self,
#[webidl] indirect_buffer: Ref<GPUBuffer>,
#[webidl(options(enforce_range = true))] indirect_offset: u64,
) -> Result<(), JsErrorBox> {
let mut encoder = self.encoder.borrow_mut();
let encoder = encoder.as_mut().ok_or_else(|| {
JsErrorBox::generic("Encoder has already been finished")
})?;
wgpu_core::command::bundle_ffi::wgpu_render_bundle_draw_indexed_indirect(
encoder,
indirect_buffer.id,
indirect_offset,
);
Ok(())
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderBundleEncoderDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub color_formats: Vec<Nullable<GPUTextureFormat>>,
pub depth_stencil_format: Option<GPUTextureFormat>,
#[webidl(default = 1)]
#[options(enforce_range = true)]
pub sample_count: u32,
#[webidl(default = false)]
pub depth_read_only: bool,
#[webidl(default = false)]
pub stencil_read_only: bool,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
enum SetBindGroupError {
#[class(inherit)]
#[error(transparent)]
WebIDL(#[from] WebIdlError),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
pub struct GPURenderBundle {
pub instance: Instance,
pub id: wgpu_core::id::RenderBundleId,
pub label: String,
}
impl Drop for GPURenderBundle {
fn drop(&mut self) {
self.instance.render_bundle_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPURenderBundle {
const NAME: &'static str = "GPURenderBundle";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPURenderBundle {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPURenderBundle"
}
}
#[op2]
impl GPURenderBundle {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPURenderBundle, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderBundleDescriptor {
#[webidl(default = String::new())]
pub label: String,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/command_encoder.rs | ext/webgpu/command_encoder.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::num::NonZero;
#[cfg(target_vendor = "apple")]
use std::sync::OnceLock;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::IntOptions;
use deno_core::webidl::WebIdlConverter;
use deno_core::webidl::WebIdlError;
use deno_error::JsErrorBox;
use wgpu_core::command::PassChannel;
use wgpu_types::BufferAddress;
use wgpu_types::TexelCopyBufferInfo;
use crate::Instance;
use crate::buffer::GPUBuffer;
use crate::command_buffer::GPUCommandBuffer;
use crate::compute_pass::GPUComputePassEncoder;
use crate::error::GPUGenericError;
use crate::queue::GPUTexelCopyTextureInfo;
use crate::render_pass::GPULoadOp;
use crate::render_pass::GPURenderPassEncoder;
use crate::webidl::GPUExtent3D;
pub struct GPUCommandEncoder {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub id: wgpu_core::id::CommandEncoderId,
pub label: String,
// Weak reference to the JS object so we can attach a finalizer.
// See `GPUDevice::create_command_encoder`.
#[cfg(target_vendor = "apple")]
pub(crate) weak: OnceLock<v8::Weak<v8::Object>>,
}
impl Drop for GPUCommandEncoder {
fn drop(&mut self) {
self.instance.command_encoder_drop(self.id);
}
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUCommandEncoder {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUCommandEncoder"
}
}
#[op2]
impl GPUCommandEncoder {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUCommandEncoder, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[required(1)]
#[cppgc]
fn begin_render_pass(
&self,
#[webidl] descriptor: crate::render_pass::GPURenderPassDescriptor,
) -> Result<GPURenderPassEncoder, JsErrorBox> {
let color_attachments = Cow::Owned(
descriptor
.color_attachments
.into_iter()
.map(|attachment| {
attachment.into_option().map(|attachment| {
wgpu_core::command::RenderPassColorAttachment {
view: attachment.view.to_view_id(),
depth_slice: attachment.depth_slice,
resolve_target: attachment
.resolve_target
.map(|target| target.to_view_id()),
load_op: attachment
.load_op
.with_default_value(attachment.clear_value.map(Into::into)),
store_op: attachment.store_op.into(),
}
})
})
.collect::<Vec<_>>(),
);
let depth_stencil_attachment = descriptor
.depth_stencil_attachment
.map(|attachment| {
if attachment
.depth_load_op
.as_ref()
.is_some_and(|op| matches!(op, GPULoadOp::Clear))
&& attachment.depth_clear_value.is_none()
{
return Err(JsErrorBox::type_error(
r#"'depthClearValue' must be specified when 'depthLoadOp' is "clear""#,
));
}
Ok(wgpu_core::command::RenderPassDepthStencilAttachment {
view: attachment.view.to_view_id(),
depth: PassChannel {
load_op: attachment
.depth_load_op
.map(|load_op| load_op.with_value(attachment.depth_clear_value)),
store_op: attachment.depth_store_op.map(Into::into),
read_only: attachment.depth_read_only,
},
stencil: PassChannel {
load_op: attachment.stencil_load_op.map(|load_op| {
load_op.with_value(Some(attachment.stencil_clear_value))
}),
store_op: attachment.stencil_store_op.map(Into::into),
read_only: attachment.stencil_read_only,
},
})
})
.transpose()?;
let timestamp_writes =
descriptor.timestamp_writes.map(|timestamp_writes| {
wgpu_core::command::PassTimestampWrites {
query_set: timestamp_writes.query_set.id,
beginning_of_pass_write_index: timestamp_writes
.beginning_of_pass_write_index,
end_of_pass_write_index: timestamp_writes.end_of_pass_write_index,
}
});
let wgpu_descriptor = wgpu_core::command::RenderPassDescriptor {
label: crate::transform_label(descriptor.label.clone()),
color_attachments,
depth_stencil_attachment: depth_stencil_attachment.as_ref(),
timestamp_writes: timestamp_writes.as_ref(),
occlusion_query_set: descriptor
.occlusion_query_set
.map(|query_set| query_set.id),
multiview_mask: NonZero::new(descriptor.multiview_mask),
};
let (render_pass, err) = self
.instance
.command_encoder_begin_render_pass(self.id, &wgpu_descriptor);
self.error_handler.push_error(err);
Ok(GPURenderPassEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
render_pass: RefCell::new(render_pass),
label: descriptor.label,
})
}
#[cppgc]
fn begin_compute_pass(
&self,
#[webidl] descriptor: crate::compute_pass::GPUComputePassDescriptor,
) -> GPUComputePassEncoder {
let timestamp_writes =
descriptor.timestamp_writes.map(|timestamp_writes| {
wgpu_core::command::PassTimestampWrites {
query_set: timestamp_writes.query_set.id,
beginning_of_pass_write_index: timestamp_writes
.beginning_of_pass_write_index,
end_of_pass_write_index: timestamp_writes.end_of_pass_write_index,
}
});
let wgpu_descriptor = wgpu_core::command::ComputePassDescriptor {
label: crate::transform_label(descriptor.label.clone()),
timestamp_writes,
};
let (compute_pass, err) = self
.instance
.command_encoder_begin_compute_pass(self.id, &wgpu_descriptor);
self.error_handler.push_error(err);
GPUComputePassEncoder {
instance: self.instance.clone(),
error_handler: self.error_handler.clone(),
compute_pass: RefCell::new(compute_pass),
label: descriptor.label,
}
}
#[required(2)]
#[undefined]
fn copy_buffer_to_buffer<'a>(
&self,
scope: &mut v8::PinScope<'a, '_>,
#[webidl] source: Ref<GPUBuffer>,
arg2: v8::Local<'a, v8::Value>,
arg3: v8::Local<'a, v8::Value>,
arg4: v8::Local<'a, v8::Value>,
arg5: v8::Local<'a, v8::Value>,
) -> Result<(), WebIdlError> {
let prefix = "Failed to execute 'GPUCommandEncoder.copyBufferToBuffer'";
let int_options = IntOptions {
clamp: false,
enforce_range: true,
};
let source_offset: BufferAddress;
let destination: Ref<GPUBuffer>;
let destination_offset: BufferAddress;
let size: Option<BufferAddress>;
// Note that the last argument to either overload of `copy_buffer_to_buffer`
// is optional, so `arg5.is_undefined()` would not work here.
if arg4.is_undefined() {
// 3-argument overload
source_offset = 0;
destination = Ref::<GPUBuffer>::convert(
scope,
arg2,
Cow::Borrowed(prefix),
(|| Cow::Borrowed("destination")).into(),
&(),
)?;
destination_offset = 0;
size = <Option<u64>>::convert(
scope,
arg3,
Cow::Borrowed(prefix),
(|| Cow::Borrowed("size")).into(),
&int_options,
)?;
} else {
// 5-argument overload
source_offset = u64::convert(
scope,
arg2,
Cow::Borrowed(prefix),
(|| Cow::Borrowed("sourceOffset")).into(),
&int_options,
)?;
destination = Ref::<GPUBuffer>::convert(
scope,
arg3,
Cow::Borrowed(prefix),
(|| Cow::Borrowed("destination")).into(),
&(),
)?;
destination_offset = u64::convert(
scope,
arg4,
Cow::Borrowed(prefix),
(|| Cow::Borrowed("destinationOffset")).into(),
&int_options,
)?;
size = <Option<u64>>::convert(
scope,
arg5,
Cow::Borrowed(prefix),
(|| Cow::Borrowed("size")).into(),
&int_options,
)?;
}
let err = self
.instance
.command_encoder_copy_buffer_to_buffer(
self.id,
source.id,
source_offset,
destination.id,
destination_offset,
size,
)
.err();
self.error_handler.push_error(err);
Ok(())
}
#[required(3)]
#[undefined]
fn copy_buffer_to_texture(
&self,
#[webidl] source: GPUTexelCopyBufferInfo,
#[webidl] destination: GPUTexelCopyTextureInfo,
#[webidl] copy_size: GPUExtent3D,
) {
let source = TexelCopyBufferInfo {
buffer: source.buffer.id,
layout: wgpu_types::TexelCopyBufferLayout {
offset: source.offset,
bytes_per_row: source.bytes_per_row,
rows_per_image: source.rows_per_image,
},
};
let destination = wgpu_types::TexelCopyTextureInfo {
texture: destination.texture.id,
mip_level: destination.mip_level,
origin: destination.origin.into(),
aspect: destination.aspect.into(),
};
let err = self
.instance
.command_encoder_copy_buffer_to_texture(
self.id,
&source,
&destination,
©_size.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(3)]
#[undefined]
fn copy_texture_to_buffer(
&self,
#[webidl] source: GPUTexelCopyTextureInfo,
#[webidl] destination: GPUTexelCopyBufferInfo,
#[webidl] copy_size: GPUExtent3D,
) {
let source = wgpu_types::TexelCopyTextureInfo {
texture: source.texture.id,
mip_level: source.mip_level,
origin: source.origin.into(),
aspect: source.aspect.into(),
};
let destination = TexelCopyBufferInfo {
buffer: destination.buffer.id,
layout: wgpu_types::TexelCopyBufferLayout {
offset: destination.offset,
bytes_per_row: destination.bytes_per_row,
rows_per_image: destination.rows_per_image,
},
};
let err = self
.instance
.command_encoder_copy_texture_to_buffer(
self.id,
&source,
&destination,
©_size.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(3)]
#[undefined]
fn copy_texture_to_texture(
&self,
#[webidl] source: GPUTexelCopyTextureInfo,
#[webidl] destination: GPUTexelCopyTextureInfo,
#[webidl] copy_size: GPUExtent3D,
) {
let source = wgpu_types::TexelCopyTextureInfo {
texture: source.texture.id,
mip_level: source.mip_level,
origin: source.origin.into(),
aspect: source.aspect.into(),
};
let destination = wgpu_types::TexelCopyTextureInfo {
texture: destination.texture.id,
mip_level: destination.mip_level,
origin: destination.origin.into(),
aspect: destination.aspect.into(),
};
let err = self
.instance
.command_encoder_copy_texture_to_texture(
self.id,
&source,
&destination,
©_size.into(),
)
.err();
self.error_handler.push_error(err);
}
#[required(1)]
#[undefined]
fn clear_buffer(
&self,
#[webidl] buffer: Ref<GPUBuffer>,
#[webidl(default = 0, options(enforce_range = true))] offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) {
let err = self
.instance
.command_encoder_clear_buffer(self.id, buffer.id, offset, size)
.err();
self.error_handler.push_error(err);
}
#[required(5)]
#[undefined]
fn resolve_query_set(
&self,
#[webidl] query_set: Ref<super::query_set::GPUQuerySet>,
#[webidl(options(enforce_range = true))] first_query: u32,
#[webidl(options(enforce_range = true))] query_count: u32,
#[webidl] destination: Ref<GPUBuffer>,
#[webidl(options(enforce_range = true))] destination_offset: u64,
) {
let err = self
.instance
.command_encoder_resolve_query_set(
self.id,
query_set.id,
first_query,
query_count,
destination.id,
destination_offset,
)
.err();
self.error_handler.push_error(err);
}
#[cppgc]
fn finish(
&self,
#[webidl] descriptor: crate::command_buffer::GPUCommandBufferDescriptor,
) -> GPUCommandBuffer {
let wgpu_descriptor = wgpu_types::CommandBufferDescriptor {
label: crate::transform_label(descriptor.label.clone()),
};
let (id, opt_label_and_err) =
self
.instance
.command_encoder_finish(self.id, &wgpu_descriptor, None);
self
.error_handler
.push_error(opt_label_and_err.map(|(_label, err)| err));
GPUCommandBuffer {
instance: self.instance.clone(),
id,
label: descriptor.label,
}
}
fn push_debug_group(&self, #[webidl] group_label: String) {
let err = self
.instance
.command_encoder_push_debug_group(self.id, &group_label)
.err();
self.error_handler.push_error(err);
}
#[fast]
fn pop_debug_group(&self) {
let err = self.instance.command_encoder_pop_debug_group(self.id).err();
self.error_handler.push_error(err);
}
fn insert_debug_marker(&self, #[webidl] marker_label: String) {
let err = self
.instance
.command_encoder_insert_debug_marker(self.id, &marker_label)
.err();
self.error_handler.push_error(err);
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUCommandEncoderDescriptor {
#[webidl(default = String::new())]
pub label: String,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTexelCopyBufferInfo {
pub buffer: Ref<GPUBuffer>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
offset: u64,
#[options(enforce_range = true)]
bytes_per_row: Option<u32>,
#[options(enforce_range = true)]
rows_per_image: Option<u32>,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/buffer.rs | ext/webgpu/buffer.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::rc::Rc;
use std::time::Duration;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::futures::channel::oneshot;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::WebIdlInterfaceConverter;
use deno_error::JsErrorBox;
use wgpu_core::device::HostMap as MapMode;
use crate::Instance;
use crate::error::GPUGenericError;
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBufferDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub size: u64,
#[options(enforce_range = true)]
pub usage: u32,
#[webidl(default = false)]
pub mapped_at_creation: bool,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum BufferError {
#[class(generic)]
#[error(transparent)]
Canceled(#[from] oneshot::Canceled),
#[class("DOMExceptionOperationError")]
#[error(transparent)]
Access(#[from] wgpu_core::resource::BufferAccessError),
#[class("DOMExceptionOperationError")]
#[error("{0}")]
Operation(&'static str),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
pub struct GPUBuffer {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub id: wgpu_core::id::BufferId,
pub device: wgpu_core::id::DeviceId,
pub label: String,
pub size: u64,
pub usage: u32,
pub map_state: RefCell<&'static str>,
pub map_mode: RefCell<Option<MapMode>>,
pub mapped_js_buffers: RefCell<Vec<v8::Global<v8::ArrayBuffer>>>,
}
impl Drop for GPUBuffer {
fn drop(&mut self) {
self.instance.buffer_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUBuffer {
const NAME: &'static str = "GPUBuffer";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUBuffer {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUBuffer"
}
}
#[op2]
impl GPUBuffer {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUBuffer, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[getter]
#[number]
fn size(&self) -> u64 {
self.size
}
#[getter]
fn usage(&self) -> u32 {
self.usage
}
#[getter]
#[string]
fn map_state(&self) -> &'static str {
*self.map_state.borrow()
}
// In the successful case, the promise should resolve to undefined, but
// `#[undefined]` does not seem to work here.
// https://github.com/denoland/deno/issues/29603
#[async_method]
async fn map_async(
&self,
#[webidl(options(enforce_range = true))] mode: u32,
#[webidl(default = 0)] offset: u64,
#[webidl] size: Option<u64>,
) -> Result<(), BufferError> {
let read_mode = (mode & 0x0001) == 0x0001;
let write_mode = (mode & 0x0002) == 0x0002;
if (read_mode && write_mode) || (!read_mode && !write_mode) {
return Err(BufferError::Operation(
"exactly one of READ or WRITE map mode must be set",
));
}
let mode = if read_mode {
MapMode::Read
} else {
assert!(write_mode);
MapMode::Write
};
{
*self.map_state.borrow_mut() = "pending";
}
let (sender, receiver) =
oneshot::channel::<wgpu_core::resource::BufferAccessResult>();
{
let callback = Box::new(move |status| {
sender.send(status).unwrap();
});
let err = self
.instance
.buffer_map_async(
self.id,
offset,
size,
wgpu_core::resource::BufferMapOperation {
host: mode,
callback: Some(callback),
},
)
.err();
if err.is_some() {
self.error_handler.push_error(err);
return Err(BufferError::Operation("validation error occurred"));
}
}
let done = Rc::new(RefCell::new(false));
let done_ = done.clone();
let device_poll_fut = async move {
while !*done.borrow() {
{
self
.instance
.device_poll(self.device, wgpu_types::PollType::wait_indefinitely())
.unwrap();
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
Ok::<(), BufferError>(())
};
let receiver_fut = async move {
receiver.await??;
let mut done = done_.borrow_mut();
*done = true;
Ok::<(), BufferError>(())
};
tokio::try_join!(device_poll_fut, receiver_fut)?;
*self.map_state.borrow_mut() = "mapped";
*self.map_mode.borrow_mut() = Some(mode);
Ok(())
}
fn get_mapped_range<'s>(
&self,
scope: &mut v8::PinScope<'s, '_>,
#[webidl(default = 0)] offset: u64,
#[webidl] size: Option<u64>,
) -> Result<v8::Local<'s, v8::ArrayBuffer>, BufferError> {
let (slice_pointer, range_size) = self
.instance
.buffer_get_mapped_range(self.id, offset, size)
.map_err(BufferError::Access)?;
let mode = self.map_mode.borrow();
let mode = mode.as_ref().unwrap();
let bs = if mode == &MapMode::Write {
unsafe extern "C" fn noop_deleter_callback(
_data: *mut std::ffi::c_void,
_byte_length: usize,
_deleter_data: *mut std::ffi::c_void,
) {
}
// SAFETY: creating a backing store from the pointer and length provided by wgpu
unsafe {
v8::ArrayBuffer::new_backing_store_from_ptr(
slice_pointer.as_ptr() as _,
range_size as usize,
noop_deleter_callback,
std::ptr::null_mut(),
)
}
} else {
// SAFETY: creating a vector from the pointer and length provided by wgpu
let slice = unsafe {
std::slice::from_raw_parts(slice_pointer.as_ptr(), range_size as usize)
};
v8::ArrayBuffer::new_backing_store_from_vec(slice.to_vec())
};
let shared_bs = bs.make_shared();
let ab = v8::ArrayBuffer::with_backing_store(scope, &shared_bs);
if mode == &MapMode::Write {
self
.mapped_js_buffers
.borrow_mut()
.push(v8::Global::new(scope, ab));
}
Ok(ab)
}
#[nofast]
#[undefined]
fn unmap(&self, scope: &mut v8::PinScope<'_, '_>) -> Result<(), BufferError> {
for ab in self.mapped_js_buffers.replace(vec![]) {
let ab = ab.open(scope);
ab.detach(None);
}
self
.instance
.buffer_unmap(self.id)
.map_err(BufferError::Access)?;
*self.map_state.borrow_mut() = "unmapped";
Ok(())
}
#[fast]
#[undefined]
fn destroy(&self) {
self.instance.buffer_destroy(self.id);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/render_pipeline.rs | ext/webgpu/render_pipeline.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::webidl::Nullable;
use deno_core::webidl::WebIdlInterfaceConverter;
use indexmap::IndexMap;
use crate::Instance;
use crate::bind_group_layout::GPUBindGroupLayout;
use crate::error::GPUGenericError;
use crate::sampler::GPUCompareFunction;
use crate::shader::GPUShaderModule;
use crate::texture::GPUTextureFormat;
use crate::webidl::GPUPipelineLayoutOrGPUAutoLayoutMode;
pub struct GPURenderPipeline {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub id: wgpu_core::id::RenderPipelineId,
pub label: String,
}
impl Drop for GPURenderPipeline {
fn drop(&mut self) {
self.instance.render_pipeline_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPURenderPipeline {
const NAME: &'static str = "GPURenderPipeline";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPURenderPipeline {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPURenderPipeline"
}
}
#[op2]
impl GPURenderPipeline {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPURenderPipeline, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[cppgc]
fn get_bind_group_layout(&self, #[webidl] index: u32) -> GPUBindGroupLayout {
let (id, err) = self
.instance
.render_pipeline_get_bind_group_layout(self.id, index, None);
self.error_handler.push_error(err);
// TODO(wgpu): needs to add a way to retrieve the label
GPUBindGroupLayout {
instance: self.instance.clone(),
id,
label: "".to_string(),
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPURenderPipelineDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub layout: GPUPipelineLayoutOrGPUAutoLayoutMode,
pub vertex: GPUVertexState,
pub primitive: GPUPrimitiveState,
pub depth_stencil: Option<GPUDepthStencilState>,
pub multisample: GPUMultisampleState,
pub fragment: Option<GPUFragmentState>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUMultisampleState {
#[webidl(default = 1)]
#[options(enforce_range = true)]
pub count: u32,
#[webidl(default = 0xFFFFFFFF)]
#[options(enforce_range = true)]
pub mask: u32,
#[webidl(default = false)]
pub alpha_to_coverage_enabled: bool,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUDepthStencilState {
pub format: GPUTextureFormat,
pub depth_write_enabled: Option<bool>,
pub depth_compare: Option<GPUCompareFunction>,
pub stencil_front: GPUStencilFaceState,
pub stencil_back: GPUStencilFaceState,
#[webidl(default = 0xFFFFFFFF)]
#[options(enforce_range = true)]
pub stencil_read_mask: u32,
#[webidl(default = 0xFFFFFFFF)]
#[options(enforce_range = true)]
pub stencil_write_mask: u32,
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub depth_bias: i32,
#[webidl(default = 0.0)]
pub depth_bias_slope_scale: f32,
#[webidl(default = 0.0)]
pub depth_bias_clamp: f32,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUStencilFaceState {
#[webidl(default = GPUCompareFunction::Always)]
pub compare: GPUCompareFunction,
#[webidl(default = GPUStencilOperation::Keep)]
pub fail_op: GPUStencilOperation,
#[webidl(default = GPUStencilOperation::Keep)]
pub depth_fail_op: GPUStencilOperation,
#[webidl(default = GPUStencilOperation::Keep)]
pub pass_op: GPUStencilOperation,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUStencilOperation {
Keep,
Zero,
Replace,
Invert,
IncrementClamp,
DecrementClamp,
IncrementWrap,
DecrementWrap,
}
impl From<GPUStencilOperation> for wgpu_types::StencilOperation {
fn from(value: GPUStencilOperation) -> Self {
match value {
GPUStencilOperation::Keep => Self::Keep,
GPUStencilOperation::Zero => Self::Zero,
GPUStencilOperation::Replace => Self::Replace,
GPUStencilOperation::Invert => Self::Invert,
GPUStencilOperation::IncrementClamp => Self::IncrementClamp,
GPUStencilOperation::DecrementClamp => Self::DecrementClamp,
GPUStencilOperation::IncrementWrap => Self::IncrementWrap,
GPUStencilOperation::DecrementWrap => Self::DecrementWrap,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUVertexState {
pub module: Ref<GPUShaderModule>,
pub entry_point: Option<String>,
#[webidl(default = Default::default())]
pub constants: IndexMap<String, f64>,
#[webidl(default = vec![])]
pub buffers: Vec<Nullable<GPUVertexBufferLayout>>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUFragmentState {
pub module: Ref<GPUShaderModule>,
pub entry_point: Option<String>,
#[webidl(default = Default::default())]
pub constants: IndexMap<String, f64>,
pub targets: Vec<Nullable<GPUColorTargetState>>,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUColorTargetState {
pub format: GPUTextureFormat,
pub blend: Option<GPUBlendState>,
#[webidl(default = 0xF)]
#[options(enforce_range = true)]
pub write_mask: u32,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBlendState {
pub color: GPUBlendComponent,
pub alpha: GPUBlendComponent,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUBlendComponent {
#[webidl(default = GPUBlendOperation::Add)]
pub operation: GPUBlendOperation,
#[webidl(default = GPUBlendFactor::One)]
pub src_factor: GPUBlendFactor,
#[webidl(default = GPUBlendFactor::Zero)]
pub dst_factor: GPUBlendFactor,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUBlendOperation {
Add,
Subtract,
ReverseSubtract,
Min,
Max,
}
impl From<GPUBlendOperation> for wgpu_types::BlendOperation {
fn from(value: GPUBlendOperation) -> Self {
match value {
GPUBlendOperation::Add => Self::Add,
GPUBlendOperation::Subtract => Self::Subtract,
GPUBlendOperation::ReverseSubtract => Self::ReverseSubtract,
GPUBlendOperation::Min => Self::Min,
GPUBlendOperation::Max => Self::Max,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUBlendFactor {
#[webidl(rename = "zero")]
Zero,
#[webidl(rename = "one")]
One,
#[webidl(rename = "src")]
Src,
#[webidl(rename = "one-minus-src")]
OneMinusSrc,
#[webidl(rename = "src-alpha")]
SrcAlpha,
#[webidl(rename = "one-minus-src-alpha")]
OneMinusSrcAlpha,
#[webidl(rename = "dst")]
Dst,
#[webidl(rename = "one-minus-dst")]
OneMinusDst,
#[webidl(rename = "dst-alpha")]
DstAlpha,
#[webidl(rename = "one-minus-dst-alpha")]
OneMinusDstAlpha,
#[webidl(rename = "src-alpha-saturated")]
SrcAlphaSaturated,
#[webidl(rename = "constant")]
Constant,
#[webidl(rename = "one-minus-constant")]
OneMinusConstant,
#[webidl(rename = "src1")]
Src1,
#[webidl(rename = "one-minus-src1")]
OneMinusSrc1,
#[webidl(rename = "src1-alpha")]
Src1Alpha,
#[webidl(rename = "one-minus-src1-alpha")]
OneMinusSrc1Alpha,
}
impl From<GPUBlendFactor> for wgpu_types::BlendFactor {
fn from(value: GPUBlendFactor) -> Self {
match value {
GPUBlendFactor::Zero => Self::Zero,
GPUBlendFactor::One => Self::One,
GPUBlendFactor::Src => Self::Src,
GPUBlendFactor::OneMinusSrc => Self::OneMinusSrc,
GPUBlendFactor::SrcAlpha => Self::SrcAlpha,
GPUBlendFactor::OneMinusSrcAlpha => Self::OneMinusSrcAlpha,
GPUBlendFactor::Dst => Self::Dst,
GPUBlendFactor::OneMinusDst => Self::OneMinusDst,
GPUBlendFactor::DstAlpha => Self::DstAlpha,
GPUBlendFactor::OneMinusDstAlpha => Self::OneMinusDstAlpha,
GPUBlendFactor::SrcAlphaSaturated => Self::SrcAlphaSaturated,
GPUBlendFactor::Constant => Self::Constant,
GPUBlendFactor::OneMinusConstant => Self::OneMinusConstant,
GPUBlendFactor::Src1 => Self::Src1,
GPUBlendFactor::OneMinusSrc1 => Self::OneMinusSrc1,
GPUBlendFactor::Src1Alpha => Self::Src1Alpha,
GPUBlendFactor::OneMinusSrc1Alpha => Self::OneMinusSrc1Alpha,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUPrimitiveState {
#[webidl(default = GPUPrimitiveTopology::TriangleList)]
pub topology: GPUPrimitiveTopology,
pub strip_index_format: Option<GPUIndexFormat>,
#[webidl(default = GPUFrontFace::Ccw)]
pub front_face: GPUFrontFace,
#[webidl(default = GPUCullMode::None)]
pub cull_mode: GPUCullMode,
#[webidl(default = false)]
pub unclipped_depth: bool,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUPrimitiveTopology {
PointList,
LineList,
LineStrip,
TriangleList,
TriangleStrip,
}
impl From<GPUPrimitiveTopology> for wgpu_types::PrimitiveTopology {
fn from(value: GPUPrimitiveTopology) -> Self {
match value {
GPUPrimitiveTopology::PointList => Self::PointList,
GPUPrimitiveTopology::LineList => Self::LineList,
GPUPrimitiveTopology::LineStrip => Self::LineStrip,
GPUPrimitiveTopology::TriangleList => Self::TriangleList,
GPUPrimitiveTopology::TriangleStrip => Self::TriangleStrip,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUIndexFormat {
#[webidl(rename = "uint16")]
Uint16,
#[webidl(rename = "uint32")]
Uint32,
}
impl From<GPUIndexFormat> for wgpu_types::IndexFormat {
fn from(value: GPUIndexFormat) -> Self {
match value {
GPUIndexFormat::Uint16 => Self::Uint16,
GPUIndexFormat::Uint32 => Self::Uint32,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUFrontFace {
Ccw,
Cw,
}
impl From<GPUFrontFace> for wgpu_types::FrontFace {
fn from(value: GPUFrontFace) -> Self {
match value {
GPUFrontFace::Ccw => Self::Ccw,
GPUFrontFace::Cw => Self::Cw,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUCullMode {
None,
Front,
Back,
}
impl From<GPUCullMode> for Option<wgpu_types::Face> {
fn from(value: GPUCullMode) -> Self {
match value {
GPUCullMode::None => None,
GPUCullMode::Front => Some(wgpu_types::Face::Front),
GPUCullMode::Back => Some(wgpu_types::Face::Back),
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUVertexBufferLayout {
#[options(enforce_range = true)]
pub array_stride: u64,
#[webidl(default = GPUVertexStepMode::Vertex)]
pub step_mode: GPUVertexStepMode,
pub attributes: Vec<GPUVertexAttribute>,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUVertexStepMode {
Vertex,
Instance,
}
impl From<GPUVertexStepMode> for wgpu_types::VertexStepMode {
fn from(value: GPUVertexStepMode) -> Self {
match value {
GPUVertexStepMode::Vertex => Self::Vertex,
GPUVertexStepMode::Instance => Self::Instance,
}
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUVertexAttribute {
pub format: GPUVertexFormat,
#[options(enforce_range = true)]
pub offset: u64,
#[options(enforce_range = true)]
pub shader_location: u32,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUVertexFormat {
#[webidl(rename = "uint8")]
Uint8,
#[webidl(rename = "uint8x2")]
Uint8x2,
#[webidl(rename = "uint8x4")]
Uint8x4,
#[webidl(rename = "sint8")]
Sint8,
#[webidl(rename = "sint8x2")]
Sint8x2,
#[webidl(rename = "sint8x4")]
Sint8x4,
#[webidl(rename = "unorm8")]
Unorm8,
#[webidl(rename = "unorm8x2")]
Unorm8x2,
#[webidl(rename = "unorm8x4")]
Unorm8x4,
#[webidl(rename = "snorm8")]
Snorm8,
#[webidl(rename = "snorm8x2")]
Snorm8x2,
#[webidl(rename = "snorm8x4")]
Snorm8x4,
#[webidl(rename = "uint16")]
Uint16,
#[webidl(rename = "uint16x2")]
Uint16x2,
#[webidl(rename = "uint16x4")]
Uint16x4,
#[webidl(rename = "sint16")]
Sint16,
#[webidl(rename = "sint16x2")]
Sint16x2,
#[webidl(rename = "sint16x4")]
Sint16x4,
#[webidl(rename = "unorm16")]
Unorm16,
#[webidl(rename = "unorm16x2")]
Unorm16x2,
#[webidl(rename = "unorm16x4")]
Unorm16x4,
#[webidl(rename = "snorm16")]
Snorm16,
#[webidl(rename = "snorm16x2")]
Snorm16x2,
#[webidl(rename = "snorm16x4")]
Snorm16x4,
#[webidl(rename = "float16")]
Float16,
#[webidl(rename = "float16x2")]
Float16x2,
#[webidl(rename = "float16x4")]
Float16x4,
#[webidl(rename = "float32")]
Float32,
#[webidl(rename = "float32x2")]
Float32x2,
#[webidl(rename = "float32x3")]
Float32x3,
#[webidl(rename = "float32x4")]
Float32x4,
#[webidl(rename = "uint32")]
Uint32,
#[webidl(rename = "uint32x2")]
Uint32x2,
#[webidl(rename = "uint32x3")]
Uint32x3,
#[webidl(rename = "uint32x4")]
Uint32x4,
#[webidl(rename = "sint32")]
Sint32,
#[webidl(rename = "sint32x2")]
Sint32x2,
#[webidl(rename = "sint32x3")]
Sint32x3,
#[webidl(rename = "sint32x4")]
Sint32x4,
#[webidl(rename = "unorm10-10-10-2")]
Unorm1010102,
#[webidl(rename = "unorm8x4-bgra")]
Unorm8x4Bgra,
}
impl From<GPUVertexFormat> for wgpu_types::VertexFormat {
fn from(value: GPUVertexFormat) -> Self {
match value {
GPUVertexFormat::Uint8 => Self::Uint8,
GPUVertexFormat::Uint8x2 => Self::Uint8x2,
GPUVertexFormat::Uint8x4 => Self::Uint8x4,
GPUVertexFormat::Sint8 => Self::Sint8,
GPUVertexFormat::Sint8x2 => Self::Sint8x2,
GPUVertexFormat::Sint8x4 => Self::Sint8x4,
GPUVertexFormat::Unorm8 => Self::Unorm8,
GPUVertexFormat::Unorm8x2 => Self::Unorm8x2,
GPUVertexFormat::Unorm8x4 => Self::Unorm8x4,
GPUVertexFormat::Snorm8 => Self::Snorm8,
GPUVertexFormat::Snorm8x2 => Self::Snorm8x2,
GPUVertexFormat::Snorm8x4 => Self::Snorm8x4,
GPUVertexFormat::Uint16 => Self::Uint16,
GPUVertexFormat::Uint16x2 => Self::Uint16x2,
GPUVertexFormat::Uint16x4 => Self::Uint16x4,
GPUVertexFormat::Sint16 => Self::Sint16,
GPUVertexFormat::Sint16x2 => Self::Sint16x2,
GPUVertexFormat::Sint16x4 => Self::Sint16x4,
GPUVertexFormat::Unorm16 => Self::Unorm16,
GPUVertexFormat::Unorm16x2 => Self::Unorm16x2,
GPUVertexFormat::Unorm16x4 => Self::Unorm16x4,
GPUVertexFormat::Snorm16 => Self::Snorm16,
GPUVertexFormat::Snorm16x2 => Self::Snorm16x2,
GPUVertexFormat::Snorm16x4 => Self::Snorm16x4,
GPUVertexFormat::Float16 => Self::Float16,
GPUVertexFormat::Float16x2 => Self::Float16x2,
GPUVertexFormat::Float16x4 => Self::Float16x4,
GPUVertexFormat::Float32 => Self::Float32,
GPUVertexFormat::Float32x2 => Self::Float32x2,
GPUVertexFormat::Float32x3 => Self::Float32x3,
GPUVertexFormat::Float32x4 => Self::Float32x4,
GPUVertexFormat::Uint32 => Self::Uint32,
GPUVertexFormat::Uint32x2 => Self::Uint32x2,
GPUVertexFormat::Uint32x3 => Self::Uint32x3,
GPUVertexFormat::Uint32x4 => Self::Uint32x4,
GPUVertexFormat::Sint32 => Self::Sint32,
GPUVertexFormat::Sint32x2 => Self::Sint32x2,
GPUVertexFormat::Sint32x3 => Self::Sint32x3,
GPUVertexFormat::Sint32x4 => Self::Sint32x4,
GPUVertexFormat::Unorm1010102 => Self::Unorm10_10_10_2,
GPUVertexFormat::Unorm8x4Bgra => Self::Unorm8x4Bgra,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/shader.rs | ext/webgpu/shader.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::make_cppgc_object;
use deno_core::op2;
use deno_core::v8;
use deno_core::webidl::WebIdlInterfaceConverter;
use wgpu_core::pipeline;
use crate::Instance;
use crate::error::GPUGenericError;
pub struct GPUShaderModule {
pub instance: Instance,
pub id: wgpu_core::id::ShaderModuleId,
pub label: String,
pub compilation_info: v8::Global<v8::Object>,
}
impl Drop for GPUShaderModule {
fn drop(&mut self) {
self.instance.shader_module_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUShaderModule {
const NAME: &'static str = "GPUShaderModule";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUShaderModule {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUShaderModule"
}
}
#[op2]
impl GPUShaderModule {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUShaderModule, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
fn get_compilation_info<'a>(
&self,
scope: &mut v8::PinScope<'a, '_>,
) -> v8::Local<'a, v8::Promise> {
let resolver = v8::PromiseResolver::new(scope).unwrap();
let info = v8::Local::new(scope, self.compilation_info.clone());
resolver.resolve(scope, info.into()).unwrap();
resolver.get_promise(scope)
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUShaderModuleDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub code: String,
}
pub struct GPUCompilationMessage {
message: String,
r#type: GPUCompilationMessageType,
line_num: u64,
line_pos: u64,
offset: u64,
length: u64,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUCompilationMessage {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUCompilationMessage"
}
}
#[op2]
impl GPUCompilationMessage {
#[getter]
#[string]
fn message(&self) -> String {
self.message.clone()
}
#[getter]
#[string]
#[rename("type")]
fn r#type(&self) -> &'static str {
self.r#type.as_str()
}
#[getter]
#[number]
fn line_num(&self) -> u64 {
self.line_num
}
#[getter]
#[number]
fn line_pos(&self) -> u64 {
self.line_pos
}
#[getter]
#[number]
fn offset(&self) -> u64 {
self.offset
}
#[getter]
#[number]
fn length(&self) -> u64 {
self.length
}
}
impl GPUCompilationMessage {
fn new(error: &pipeline::CreateShaderModuleError, source: &str) -> Self {
let message = error.to_string();
let loc = match error {
pipeline::CreateShaderModuleError::Parsing(e) => e.inner.location(source),
pipeline::CreateShaderModuleError::Validation(e) => {
e.inner.location(source)
}
_ => None,
};
match loc {
Some(loc) => {
let len_utf16 = |s: &str| s.chars().map(|c| c.len_utf16() as u64).sum();
let start = loc.offset as usize;
// Naga reports a `line_pos` using UTF-8 bytes, so we cannot use it.
let line_start =
source[0..start].rfind('\n').map(|pos| pos + 1).unwrap_or(0);
let line_pos = len_utf16(&source[line_start..start]) + 1;
Self {
message,
r#type: GPUCompilationMessageType::Error,
line_num: loc.line_number.into(),
line_pos,
offset: len_utf16(&source[0..start]),
length: len_utf16(&source[start..start + loc.length as usize]),
}
}
_ => Self {
message,
r#type: GPUCompilationMessageType::Error,
line_num: 0,
line_pos: 0,
offset: 0,
length: 0,
},
}
}
}
pub struct GPUCompilationInfo {
messages: v8::Global<v8::Object>,
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUCompilationInfo {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUCompilationInfo"
}
}
#[op2]
impl GPUCompilationInfo {
#[getter]
#[global]
fn messages(&self) -> v8::Global<v8::Object> {
self.messages.clone()
}
}
impl GPUCompilationInfo {
pub fn new<'args, 'scope>(
scope: &mut v8::PinScope<'scope, '_>,
messages: impl ExactSizeIterator<
Item = &'args pipeline::CreateShaderModuleError,
>,
source: &'args str,
) -> Self {
let array = v8::Array::new(scope, messages.len().try_into().unwrap());
for (i, message) in messages.enumerate() {
let message_object =
make_cppgc_object(scope, GPUCompilationMessage::new(message, source));
array.set_index(scope, i.try_into().unwrap(), message_object.into());
}
let object: v8::Local<v8::Object> = array.into();
object
.set_integrity_level(scope, v8::IntegrityLevel::Frozen)
.unwrap();
Self {
messages: v8::Global::new(scope, object),
}
}
}
#[derive(WebIDL, Clone)]
#[webidl(enum)]
pub(crate) enum GPUCompilationMessageType {
Error,
Warning,
Info,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/queue.rs | ext/webgpu/queue.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::rc::Rc;
use std::time::Duration;
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::futures::channel::oneshot;
use deno_core::op2;
use deno_error::JsErrorBox;
use crate::Instance;
use crate::buffer::GPUBuffer;
use crate::command_buffer::GPUCommandBuffer;
use crate::error::GPUGenericError;
use crate::texture::GPUTexture;
use crate::texture::GPUTextureAspect;
use crate::webidl::GPUExtent3D;
use crate::webidl::GPUOrigin3D;
pub struct GPUQueue {
pub instance: Instance,
pub error_handler: super::error::ErrorHandler,
pub label: String,
pub id: wgpu_core::id::QueueId,
pub device: wgpu_core::id::DeviceId,
}
impl Drop for GPUQueue {
fn drop(&mut self) {
self.instance.queue_drop(self.id);
}
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUQueue {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUQueue"
}
}
#[op2]
impl GPUQueue {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUQueue, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
#[required(1)]
#[undefined]
fn submit(
&self,
#[webidl] command_buffers: Vec<Ref<GPUCommandBuffer>>,
) -> Result<(), JsErrorBox> {
let ids = command_buffers
.into_iter()
.map(|cb| cb.id)
.collect::<Vec<_>>();
let err = self.instance.queue_submit(self.id, &ids).err();
if let Some((_, err)) = err {
self.error_handler.push_error(Some(err));
}
Ok(())
}
// In the successful case, the promise should resolve to undefined, but
// `#[undefined]` does not seem to work here.
// https://github.com/denoland/deno/issues/29603
#[async_method]
async fn on_submitted_work_done(&self) -> Result<(), JsErrorBox> {
let (sender, receiver) = oneshot::channel::<()>();
let callback = Box::new(move || {
sender.send(()).unwrap();
});
self
.instance
.queue_on_submitted_work_done(self.id, callback);
let done = Rc::new(RefCell::new(false));
let done_ = done.clone();
let device_poll_fut = async move {
while !*done.borrow() {
{
self
.instance
.device_poll(self.device, wgpu_types::PollType::wait_indefinitely())
.unwrap();
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
Ok::<(), JsErrorBox>(())
};
let receiver_fut = async move {
receiver
.await
.map_err(|e| JsErrorBox::generic(e.to_string()))?;
let mut done = done_.borrow_mut();
*done = true;
Ok::<(), JsErrorBox>(())
};
tokio::try_join!(device_poll_fut, receiver_fut)?;
Ok(())
}
#[required(3)]
#[undefined]
fn write_buffer(
&self,
#[webidl] buffer: Ref<GPUBuffer>,
#[webidl(options(enforce_range = true))] buffer_offset: u64,
#[anybuffer] buf: &[u8],
#[webidl(default = 0, options(enforce_range = true))] data_offset: u64,
#[webidl(options(enforce_range = true))] size: Option<u64>,
) {
let data = match size {
Some(size) => {
&buf[(data_offset as usize)..((data_offset + size) as usize)]
}
None => &buf[(data_offset as usize)..],
};
let err = self
.instance
.queue_write_buffer(self.id, buffer.id, buffer_offset, data)
.err();
self.error_handler.push_error(err);
}
#[required(4)]
#[undefined]
fn write_texture(
&self,
#[webidl] destination: GPUTexelCopyTextureInfo,
#[anybuffer] buf: &[u8],
#[webidl] data_layout: GPUTexelCopyBufferLayout,
#[webidl] size: GPUExtent3D,
) {
let destination = wgpu_types::TexelCopyTextureInfo {
texture: destination.texture.id,
mip_level: destination.mip_level,
origin: destination.origin.into(),
aspect: destination.aspect.into(),
};
let data_layout = wgpu_types::TexelCopyBufferLayout {
offset: data_layout.offset,
bytes_per_row: data_layout.bytes_per_row,
rows_per_image: data_layout.rows_per_image,
};
let err = self
.instance
.queue_write_texture(
self.id,
&destination,
buf,
&data_layout,
&size.into(),
)
.err();
self.error_handler.push_error(err);
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUTexelCopyTextureInfo {
pub texture: Ref<GPUTexture>,
#[webidl(default = 0)]
#[options(enforce_range = true)]
pub mip_level: u32,
#[webidl(default = Default::default())]
pub origin: GPUOrigin3D,
#[webidl(default = GPUTextureAspect::All)]
pub aspect: GPUTextureAspect,
}
#[derive(WebIDL)]
#[webidl(dictionary)]
struct GPUTexelCopyBufferLayout {
#[webidl(default = 0)]
#[options(enforce_range = true)]
offset: u64,
#[options(enforce_range = true)]
bytes_per_row: Option<u32>,
#[options(enforce_range = true)]
rows_per_image: Option<u32>,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/sampler.rs | ext/webgpu/sampler.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use crate::Instance;
use crate::error::GPUGenericError;
pub struct GPUSampler {
pub instance: Instance,
pub id: wgpu_core::id::SamplerId,
pub label: String,
}
impl Drop for GPUSampler {
fn drop(&mut self) {
self.instance.sampler_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUSampler {
const NAME: &'static str = "GPUSampler";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUSampler {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUSampler"
}
}
#[op2]
impl GPUSampler {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUSampler, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(super) struct GPUSamplerDescriptor {
#[webidl(default = String::new())]
pub label: String,
#[webidl(default = GPUAddressMode::ClampToEdge)]
pub address_mode_u: GPUAddressMode,
#[webidl(default = GPUAddressMode::ClampToEdge)]
pub address_mode_v: GPUAddressMode,
#[webidl(default = GPUAddressMode::ClampToEdge)]
pub address_mode_w: GPUAddressMode,
#[webidl(default = GPUFilterMode::Nearest)]
pub mag_filter: GPUFilterMode,
#[webidl(default = GPUFilterMode::Nearest)]
pub min_filter: GPUFilterMode,
#[webidl(default = GPUMipmapFilterMode::Nearest)]
pub mipmap_filter: GPUMipmapFilterMode,
#[webidl(default = 0.0)]
pub lod_min_clamp: f32,
#[webidl(default = 32.0)]
pub lod_max_clamp: f32,
pub compare: Option<GPUCompareFunction>,
#[webidl(default = 1)]
#[options(clamp = true)]
pub max_anisotropy: u16,
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUAddressMode {
ClampToEdge,
Repeat,
MirrorRepeat,
}
impl From<GPUAddressMode> for wgpu_types::AddressMode {
fn from(value: GPUAddressMode) -> Self {
match value {
GPUAddressMode::ClampToEdge => Self::ClampToEdge,
GPUAddressMode::Repeat => Self::Repeat,
GPUAddressMode::MirrorRepeat => Self::MirrorRepeat,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUFilterMode {
Nearest,
Linear,
}
impl From<GPUFilterMode> for wgpu_types::FilterMode {
fn from(value: GPUFilterMode) -> Self {
match value {
GPUFilterMode::Nearest => Self::Nearest,
GPUFilterMode::Linear => Self::Linear,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUMipmapFilterMode {
Nearest,
Linear,
}
impl From<GPUMipmapFilterMode> for wgpu_types::MipmapFilterMode {
fn from(value: GPUMipmapFilterMode) -> Self {
match value {
GPUMipmapFilterMode::Nearest => Self::Nearest,
GPUMipmapFilterMode::Linear => Self::Linear,
}
}
}
#[derive(WebIDL)]
#[webidl(enum)]
pub(crate) enum GPUCompareFunction {
Never,
Less,
Equal,
LessEqual,
Greater,
NotEqual,
GreaterEqual,
Always,
}
impl From<GPUCompareFunction> for wgpu_types::CompareFunction {
fn from(value: GPUCompareFunction) -> Self {
match value {
GPUCompareFunction::Never => Self::Never,
GPUCompareFunction::Less => Self::Less,
GPUCompareFunction::Equal => Self::Equal,
GPUCompareFunction::LessEqual => Self::LessEqual,
GPUCompareFunction::Greater => Self::Greater,
GPUCompareFunction::NotEqual => Self::NotEqual,
GPUCompareFunction::GreaterEqual => Self::GreaterEqual,
GPUCompareFunction::Always => Self::Always,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/webgpu/pipeline_layout.rs | ext/webgpu/pipeline_layout.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::GarbageCollected;
use deno_core::WebIDL;
use deno_core::cppgc::Ref;
use deno_core::op2;
use deno_core::webidl::WebIdlInterfaceConverter;
use crate::Instance;
use crate::error::GPUGenericError;
pub struct GPUPipelineLayout {
pub instance: Instance,
pub id: wgpu_core::id::PipelineLayoutId,
pub label: String,
}
impl Drop for GPUPipelineLayout {
fn drop(&mut self) {
self.instance.pipeline_layout_drop(self.id);
}
}
impl WebIdlInterfaceConverter for GPUPipelineLayout {
const NAME: &'static str = "GPUPipelineLayout";
}
// SAFETY: we're sure this can be GCed
unsafe impl GarbageCollected for GPUPipelineLayout {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"GPUPipelineLayout"
}
}
#[op2]
impl GPUPipelineLayout {
#[constructor]
#[cppgc]
fn constructor(_: bool) -> Result<GPUPipelineLayout, GPUGenericError> {
Err(GPUGenericError::InvalidConstructor)
}
#[getter]
#[string]
fn label(&self) -> String {
self.label.clone()
}
#[setter]
#[string]
fn label(&self, #[webidl] _label: String) {
// TODO(@crowlKats): no-op, needs wpgu to implement changing the label
}
}
#[derive(WebIDL)]
#[webidl(dictionary)]
pub(crate) struct GPUPipelineLayoutDescriptor {
#[webidl(default = String::new())]
pub label: String,
pub bind_group_layouts:
Vec<Ref<super::bind_group_layout::GPUBindGroupLayout>>,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fs/lib.rs | ext/fs/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod interface;
mod ops;
mod std_fs;
pub use deno_io::fs::FsError;
pub use deno_maybe_sync as sync;
pub use deno_maybe_sync::MaybeSend;
pub use deno_maybe_sync::MaybeSync;
pub use crate::interface::FileSystem;
pub use crate::interface::FileSystemRc;
pub use crate::interface::FsDirEntry;
pub use crate::interface::FsFileType;
pub use crate::interface::OpenOptions;
pub use crate::ops::FsOpsError;
pub use crate::ops::FsOpsErrorKind;
pub use crate::ops::OperationError;
use crate::ops::*;
pub use crate::std_fs::RealFs;
pub use crate::std_fs::open_options_for_checked_path;
pub const UNSTABLE_FEATURE_NAME: &str = "fs";
deno_core::extension!(deno_fs,
deps = [ deno_web ],
ops = [
op_fs_cwd,
op_fs_umask,
op_fs_chdir,
op_fs_open_sync,
op_fs_open_async,
op_fs_mkdir_sync,
op_fs_mkdir_async,
op_fs_chmod_sync,
op_fs_chmod_async,
op_fs_chown_sync,
op_fs_chown_async,
op_fs_remove_sync,
op_fs_remove_async,
op_fs_copy_file_sync,
op_fs_copy_file_async,
op_fs_stat_sync,
op_fs_stat_async,
op_fs_lstat_sync,
op_fs_lstat_async,
op_fs_realpath_sync,
op_fs_realpath_async,
op_fs_read_dir_sync,
op_fs_read_dir_async,
op_fs_rename_sync,
op_fs_rename_async,
op_fs_link_sync,
op_fs_link_async,
op_fs_symlink_sync,
op_fs_symlink_async,
op_fs_read_link_sync,
op_fs_read_link_async,
op_fs_truncate_sync,
op_fs_truncate_async,
op_fs_utime_sync,
op_fs_utime_async,
op_fs_make_temp_dir_sync,
op_fs_make_temp_dir_async,
op_fs_make_temp_file_sync,
op_fs_make_temp_file_async,
op_fs_write_file_sync,
op_fs_write_file_async,
op_fs_read_file_sync,
op_fs_read_file_async,
op_fs_read_file_text_sync,
op_fs_read_file_text_async,
op_fs_seek_sync,
op_fs_seek_async,
op_fs_file_sync_data_sync,
op_fs_file_sync_data_async,
op_fs_file_sync_sync,
op_fs_file_sync_async,
op_fs_file_stat_sync,
op_fs_file_stat_async,
op_fs_fchmod_async,
op_fs_fchmod_sync,
op_fs_fchown_async,
op_fs_fchown_sync,
op_fs_flock_async,
op_fs_flock_sync,
op_fs_funlock_async,
op_fs_funlock_sync,
op_fs_ftruncate_sync,
op_fs_file_truncate_async,
op_fs_futime_sync,
op_fs_futime_async,
],
esm = [ "30_fs.js" ],
options = {
fs: FileSystemRc,
},
state = |state, options| {
state.put(options.fs);
},
);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fs/interface.rs | ext/fs/interface.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use core::str;
use std::borrow::Cow;
use std::path::PathBuf;
use std::rc::Rc;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_maybe_sync::MaybeSend;
use deno_maybe_sync::MaybeSync;
use deno_permissions::CheckedPath;
use deno_permissions::CheckedPathBuf;
use serde::Deserialize;
use serde::Serialize;
#[derive(Deserialize, Default, Debug, Clone, Copy)]
#[serde(rename_all = "camelCase")]
#[serde(default)]
pub struct OpenOptions {
pub read: bool,
pub write: bool,
pub create: bool,
pub truncate: bool,
pub append: bool,
pub create_new: bool,
pub custom_flags: Option<i32>,
pub mode: Option<u32>,
}
impl OpenOptions {
pub fn read() -> Self {
Self {
read: true,
write: false,
create: false,
truncate: false,
append: false,
create_new: false,
custom_flags: None,
mode: None,
}
}
pub fn write(
create: bool,
append: bool,
create_new: bool,
mode: Option<u32>,
) -> Self {
Self {
read: false,
write: true,
create,
truncate: !append,
append,
create_new,
custom_flags: None,
mode,
}
}
}
impl From<i32> for OpenOptions {
fn from(flags: i32) -> Self {
let mut options = OpenOptions {
..Default::default()
};
let mut flags = flags;
if (flags & libc::O_APPEND) == libc::O_APPEND {
options.append = true;
flags &= !libc::O_APPEND;
}
if (flags & libc::O_CREAT) == libc::O_CREAT {
options.create = true;
flags &= !libc::O_CREAT;
}
if (flags & libc::O_EXCL) == libc::O_EXCL {
options.create_new = true;
options.write = true;
flags &= !libc::O_EXCL;
}
if (flags & libc::O_RDWR) == libc::O_RDWR {
options.read = true;
options.write = true;
flags &= !libc::O_RDWR;
}
if (flags & libc::O_TRUNC) == libc::O_TRUNC {
options.truncate = true;
flags &= !libc::O_TRUNC;
}
if (flags & libc::O_WRONLY) == libc::O_WRONLY {
options.write = true;
flags &= !libc::O_WRONLY;
}
if flags != 0 {
options.custom_flags = Some(flags);
}
if !options.append
&& !options.create
&& !options.create_new
&& !options.read
&& !options.truncate
&& !options.write
{
options.read = true;
}
Self { ..options }
}
}
#[derive(Deserialize)]
pub enum FsFileType {
#[serde(rename = "file")]
File,
#[serde(rename = "dir")]
Directory,
#[serde(rename = "junction")]
Junction,
}
/// WARNING: This is part of the public JS Deno API.
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FsDirEntry {
pub name: String,
pub is_file: bool,
pub is_directory: bool,
pub is_symlink: bool,
}
#[allow(clippy::disallowed_types)]
pub type FileSystemRc = deno_maybe_sync::MaybeArc<dyn FileSystem>;
#[async_trait::async_trait(?Send)]
pub trait FileSystem: std::fmt::Debug + MaybeSend + MaybeSync {
fn cwd(&self) -> FsResult<PathBuf>;
fn tmp_dir(&self) -> FsResult<PathBuf>;
fn chdir(&self, path: &CheckedPath) -> FsResult<()>;
fn umask(&self, mask: Option<u32>) -> FsResult<u32>;
fn open_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
) -> FsResult<Rc<dyn File>>;
async fn open_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>>;
fn mkdir_sync(
&self,
path: &CheckedPath,
recursive: bool,
mode: Option<u32>,
) -> FsResult<()>;
async fn mkdir_async(
&self,
path: CheckedPathBuf,
recursive: bool,
mode: Option<u32>,
) -> FsResult<()>;
#[cfg(unix)]
fn chmod_sync(&self, path: &CheckedPath, mode: u32) -> FsResult<()>;
#[cfg(not(unix))]
fn chmod_sync(&self, path: &CheckedPath, mode: i32) -> FsResult<()>;
#[cfg(unix)]
async fn chmod_async(&self, path: CheckedPathBuf, mode: u32) -> FsResult<()>;
#[cfg(not(unix))]
async fn chmod_async(&self, path: CheckedPathBuf, mode: i32) -> FsResult<()>;
fn chown_sync(
&self,
path: &CheckedPath,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()>;
async fn chown_async(
&self,
path: CheckedPathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()>;
fn lchmod_sync(&self, path: &CheckedPath, mode: u32) -> FsResult<()>;
async fn lchmod_async(&self, path: CheckedPathBuf, mode: u32)
-> FsResult<()>;
fn lchown_sync(
&self,
path: &CheckedPath,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()>;
async fn lchown_async(
&self,
path: CheckedPathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()>;
fn remove_sync(&self, path: &CheckedPath, recursive: bool) -> FsResult<()>;
async fn remove_async(
&self,
path: CheckedPathBuf,
recursive: bool,
) -> FsResult<()>;
fn copy_file_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()>;
async fn copy_file_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()>;
fn cp_sync(&self, path: &CheckedPath, new_path: &CheckedPath)
-> FsResult<()>;
async fn cp_async(
&self,
path: CheckedPathBuf,
new_path: CheckedPathBuf,
) -> FsResult<()>;
fn stat_sync(&self, path: &CheckedPath) -> FsResult<FsStat>;
async fn stat_async(&self, path: CheckedPathBuf) -> FsResult<FsStat>;
fn lstat_sync(&self, path: &CheckedPath) -> FsResult<FsStat>;
async fn lstat_async(&self, path: CheckedPathBuf) -> FsResult<FsStat>;
fn realpath_sync(&self, path: &CheckedPath) -> FsResult<PathBuf>;
async fn realpath_async(&self, path: CheckedPathBuf) -> FsResult<PathBuf>;
fn read_dir_sync(&self, path: &CheckedPath) -> FsResult<Vec<FsDirEntry>>;
async fn read_dir_async(
&self,
path: CheckedPathBuf,
) -> FsResult<Vec<FsDirEntry>>;
fn rename_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()>;
async fn rename_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()>;
fn link_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()>;
async fn link_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()>;
fn symlink_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
file_type: Option<FsFileType>,
) -> FsResult<()>;
async fn symlink_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()>;
fn read_link_sync(&self, path: &CheckedPath) -> FsResult<PathBuf>;
async fn read_link_async(&self, path: CheckedPathBuf) -> FsResult<PathBuf>;
fn truncate_sync(&self, path: &CheckedPath, len: u64) -> FsResult<()>;
async fn truncate_async(
&self,
path: CheckedPathBuf,
len: u64,
) -> FsResult<()>;
fn utime_sync(
&self,
path: &CheckedPath,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()>;
async fn utime_async(
&self,
path: CheckedPathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()>;
fn lutime_sync(
&self,
path: &CheckedPath,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()>;
async fn lutime_async(
&self,
path: CheckedPathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()>;
fn write_file_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let file = self.open_sync(path, options)?;
if let Some(mode) = options.mode {
file.clone().chmod_sync(mode)?;
}
file.write_all_sync(data)?;
Ok(())
}
async fn write_file_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
let file = self.open_async(path, options).await?;
if let Some(mode) = options.mode {
file.clone().chmod_async(mode).await?;
}
file.write_all(data.into()).await?;
Ok(())
}
fn read_file_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
) -> FsResult<Cow<'static, [u8]>> {
let file = self.open_sync(path, options)?;
let buf = file.read_all_sync()?;
Ok(buf)
}
async fn read_file_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
) -> FsResult<Cow<'static, [u8]>> {
let file = self.open_async(path, options).await?;
let buf = file.read_all_async().await?;
Ok(buf)
}
fn is_file_sync(&self, path: &CheckedPath) -> bool {
self.stat_sync(path).map(|m| m.is_file).unwrap_or(false)
}
fn is_dir_sync(&self, path: &CheckedPath) -> bool {
self
.stat_sync(path)
.map(|m| m.is_directory)
.unwrap_or(false)
}
fn exists_sync(&self, path: &CheckedPath) -> bool;
async fn exists_async(&self, path: CheckedPathBuf) -> FsResult<bool>;
fn read_text_file_lossy_sync(
&self,
path: &CheckedPath,
) -> FsResult<Cow<'static, str>> {
let buf = self.read_file_sync(path, OpenOptions::read())?;
Ok(string_from_cow_utf8_lossy(buf))
}
async fn read_text_file_lossy_async<'a>(
&'a self,
path: CheckedPathBuf,
) -> FsResult<Cow<'static, str>> {
let buf = self.read_file_async(path, OpenOptions::read()).await?;
Ok(string_from_cow_utf8_lossy(buf))
}
}
#[inline(always)]
fn string_from_cow_utf8_lossy(buf: Cow<'static, [u8]>) -> Cow<'static, str> {
match buf {
Cow::Owned(buf) => Cow::Owned(string_from_utf8_lossy(buf)),
Cow::Borrowed(buf) => String::from_utf8_lossy(buf),
}
}
// Like String::from_utf8_lossy but operates on owned values
#[inline(always)]
fn string_from_utf8_lossy(buf: Vec<u8>) -> String {
match String::from_utf8_lossy(&buf) {
// buf contained non-utf8 chars than have been patched
Cow::Owned(s) => s,
// SAFETY: if Borrowed then the buf only contains utf8 chars,
// we do this instead of .into_owned() to avoid copying the input buf
Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(buf) },
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fs/std_fs.rs | ext/fs/std_fs.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#![allow(clippy::disallowed_methods)]
use std::borrow::Cow;
use std::fs;
use std::io;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::StdFileResourceInner;
use deno_io::fs::File;
use deno_io::fs::FsError;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_permissions::CheckedPath;
use deno_permissions::CheckedPathBuf;
use crate::FileSystem;
use crate::OpenOptions;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
#[derive(Debug, Default, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &CheckedPath) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::Mode;
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0));
let _ = umask(prev);
prev
};
#[cfg(any(target_os = "android", target_os = "linux"))]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let std_file = open_with_checked_path(options, path)?;
Ok(Rc::new(StdFileResourceInner::file(
std_file,
Some(path.to_path_buf()),
)))
}
async fn open_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let std_file = open_with_checked_path(options, &path.as_checked_path())?;
Ok(Rc::new(StdFileResourceInner::file(
std_file,
Some(path.to_path_buf()),
)))
}
fn mkdir_sync(
&self,
path: &CheckedPath,
recursive: bool,
mode: Option<u32>,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: CheckedPathBuf,
recursive: bool,
mode: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
#[cfg(unix)]
fn chmod_sync(&self, path: &CheckedPath, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
#[cfg(not(unix))]
fn chmod_sync(&self, path: &CheckedPath, mode: i32) -> FsResult<()> {
chmod(path, mode)
}
#[cfg(unix)]
async fn chmod_async(&self, path: CheckedPathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
#[cfg(not(unix))]
async fn chmod_async(&self, path: CheckedPathBuf, mode: i32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &CheckedPath,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: CheckedPathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &CheckedPath, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(
&self,
path: CheckedPathBuf,
recursive: bool,
) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(
&self,
from: &CheckedPath,
to: &CheckedPath,
) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(
&self,
from: CheckedPathBuf,
to: CheckedPathBuf,
) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn cp_sync(&self, fro: &CheckedPath, to: &CheckedPath) -> FsResult<()> {
cp(fro, to)
}
async fn cp_async(
&self,
fro: CheckedPathBuf,
to: CheckedPathBuf,
) -> FsResult<()> {
spawn_blocking(move || cp(&fro, &to)).await?
}
fn stat_sync(&self, path: &CheckedPath) -> FsResult<FsStat> {
stat(path)
}
async fn stat_async(&self, path: CheckedPathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?
}
fn lstat_sync(&self, path: &CheckedPath) -> FsResult<FsStat> {
lstat(path)
}
async fn lstat_async(&self, path: CheckedPathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?
}
fn exists_sync(&self, path: &CheckedPath) -> bool {
exists(path)
}
async fn exists_async(&self, path: CheckedPathBuf) -> FsResult<bool> {
spawn_blocking(move || exists(&path))
.await
.map_err(Into::into)
}
fn realpath_sync(&self, path: &CheckedPath) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: CheckedPathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &CheckedPath) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(
&self,
path: CheckedPathBuf,
) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn lchmod_sync(&self, path: &CheckedPath, mode: u32) -> FsResult<()> {
lchmod(path, mode)
}
async fn lchmod_async(
&self,
path: CheckedPathBuf,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || lchmod(&path, mode)).await?
}
fn link_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &CheckedPath) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: CheckedPathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &CheckedPath, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(
&self,
path: CheckedPathBuf,
len: u64,
) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &CheckedPath,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: CheckedPathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn lutime_sync(
&self,
path: &CheckedPath,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_symlink_file_times(path, atime, mtime).map_err(Into::into)
}
async fn lutime_async(
&self,
path: CheckedPathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_symlink_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn lchown_sync(
&self,
path: &CheckedPath,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
lchown(path, uid, gid)
}
async fn lchown_async(
&self,
path: CheckedPathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || lchown(&path, uid, gid)).await?
}
fn write_file_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let mut file = open_with_checked_path(options, path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
let mut file = open_with_checked_path(options, &path.as_checked_path())?;
spawn_blocking(move || {
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
) -> FsResult<Cow<'static, [u8]>> {
let mut file = open_with_checked_path(options, path)?;
let mut buf = Vec::new();
file.read_to_end(&mut buf)?;
Ok(Cow::Owned(buf))
}
async fn read_file_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
) -> FsResult<Cow<'static, [u8]>> {
let mut file = open_with_checked_path(options, &path.as_checked_path())?;
spawn_blocking(move || {
let mut buf = Vec::new();
file.read_to_end(&mut buf)?;
Ok::<_, FsError>(Cow::Owned(buf))
})
.await?
}
}
fn mkdir(path: &Path, recursive: bool, mode: Option<u32>) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
if let Some(mode) = mode {
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
#[cfg(not(unix))]
fn chmod(path: &Path, mode: i32) -> FsResult<()> {
use std::os::windows::ffi::OsStrExt;
// Windows chmod doesn't follow symlinks unlike the UNIX counterpart,
// so we have to resolve the symlink manually
let resolved_path = realpath(path)?;
let wchar_path = resolved_path
.as_os_str()
.encode_wide()
.chain(std::iter::once(0))
.collect::<Vec<_>>();
// SAFETY: `path` is a null-terminated string.
let result = unsafe { libc::wchmod(wchar_path.as_ptr(), mode) };
if result != 0 {
return Err(io::Error::last_os_error().into());
}
Ok(())
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::Gid;
use nix::unistd::Uid;
use nix::unistd::chown;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
#[cfg(target_os = "macos")]
fn lchmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use libc::O_SYMLINK;
let file = fs::OpenOptions::new()
.write(true)
.custom_flags(O_SYMLINK)
.open(path)?;
file.set_permissions(fs::Permissions::from_mode(mode))?;
Ok(())
}
#[cfg(not(target_os = "macos"))]
fn lchmod(_path: &Path, _mode: u32) -> FsResult<()> {
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn lchown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use std::os::unix::ffi::OsStrExt;
let c_path = std::ffi::CString::new(path.as_os_str().as_bytes()).unwrap();
// -1 = leave unchanged
let uid = uid
.map(|uid| uid as libc::uid_t)
.unwrap_or(-1i32 as libc::uid_t);
let gid = gid
.map(|gid| gid as libc::gid_t)
.unwrap_or(-1i32 as libc::gid_t);
// SAFETY: `c_path` is a valid C string and lives throughout this function call.
let result = unsafe { libc::lchown(c_path.as_ptr(), uid, gid) };
if result != 0 {
return Err(io::Error::last_os_error().into());
}
Ok(())
}
// TODO: implement lchown for Windows
#[cfg(not(unix))]
fn lchown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY != 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use std::ffi::CString;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use libc::clonefile;
use libc::stat;
use libc::unlink;
let from_str = CString::new(from.as_os_str().as_encoded_bytes())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let to_str = CString::new(to.as_os_str().as_encoded_bytes())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret != 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
fn cp(from: &Path, to: &Path) -> FsResult<()> {
fn cp_(source_meta: fs::Metadata, from: &Path, to: &Path) -> FsResult<()> {
use rayon::prelude::IntoParallelIterator;
use rayon::prelude::ParallelIterator;
let ty = source_meta.file_type();
if ty.is_dir() {
#[allow(unused_mut)]
let mut builder = fs::DirBuilder::new();
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
use std::os::unix::fs::PermissionsExt;
builder.mode(fs::symlink_metadata(from)?.permissions().mode());
}
// The target directory might already exists. If it does,
// continue copying all entries instead of aborting.
if let Err(err) = builder.create(to)
&& err.kind() != ErrorKind::AlreadyExists
{
return Err(FsError::Io(err));
}
let mut entries: Vec<_> = fs::read_dir(from)?
.map(|res| res.map(|e| e.file_name()))
.collect::<Result<_, _>>()?;
entries.shrink_to_fit();
entries
.into_par_iter()
.map(|file_name| {
cp_(
fs::symlink_metadata(from.join(&file_name)).unwrap(),
&from.join(&file_name),
&to.join(&file_name),
)
.map_err(|err| {
io::Error::new(
err.kind(),
format!(
"failed to copy '{}' to '{}': {:?}",
from.join(&file_name).display(),
to.join(&file_name).display(),
err
),
)
})
})
.collect::<Result<Vec<_>, _>>()?;
return Ok(());
} else if ty.is_symlink() {
let from = std::fs::read_link(from)?;
#[cfg(unix)]
std::os::unix::fs::symlink(from, to)?;
#[cfg(windows)]
std::os::windows::fs::symlink_file(from, to)?;
return Ok(());
}
#[cfg(unix)]
{
use std::os::unix::fs::FileTypeExt;
if ty.is_socket() {
return Err(
io::Error::new(
io::ErrorKind::InvalidInput,
"sockets cannot be copied",
)
.into(),
);
}
}
// Ensure parent destination directory exists
if let Some(parent) = to.parent() {
fs::create_dir_all(parent)?;
}
copy_file(from, to)
}
#[cfg(target_os = "macos")]
{
// Just clonefile()
use std::ffi::CString;
use std::os::unix::ffi::OsStrExt;
use libc::clonefile;
use libc::unlink;
let from_str = CString::new(from.as_os_str().as_bytes())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let to_str = CString::new(to.as_os_str().as_bytes())
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
// SAFETY: `from` and `to` are valid C strings.
unsafe {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
}
}
let source_meta = fs::symlink_metadata(from)?;
#[inline]
fn is_identical(
source_meta: &fs::Metadata,
dest_meta: &fs::Metadata,
) -> bool {
#[cfg(unix)]
{
use std::os::unix::fs::MetadataExt;
source_meta.ino() == dest_meta.ino()
}
#[cfg(windows)]
{
use std::os::windows::fs::MetadataExt;
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/ns-fileapi-by_handle_file_information
//
// The identifier (low and high parts) and the volume serial number uniquely identify a file on a single computer.
// To determine whether two open handles represent the same file, combine the identifier and the volume serial
// number for each file and compare them.
//
// Use this code once file_index() and volume_serial_number() is stabalized
// See: https://github.com/rust-lang/rust/issues/63010
//
// source_meta.file_index() == dest_meta.file_index()
// && source_meta.volume_serial_number()
// == dest_meta.volume_serial_number()
source_meta.last_write_time() == dest_meta.last_write_time()
&& source_meta.creation_time() == dest_meta.creation_time()
}
}
if let Ok(m) = fs::metadata(to)
&& m.is_dir()
{
// Only target sub dir when source is not a dir itself
if let Ok(from_meta) = fs::metadata(from)
&& !from_meta.is_dir()
{
return cp_(
source_meta,
from,
&to.join(from.file_name().ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
"the source path is not a valid file",
)
})?),
);
}
}
if let Ok(m) = fs::symlink_metadata(to)
&& is_identical(&source_meta, &m)
{
return Err(
io::Error::new(
io::ErrorKind::InvalidInput,
"the source and destination are the same file",
)
.into(),
);
}
cp_(source_meta, from, to)
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
use std::os::windows::fs::OpenOptionsExt;
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let mut opts = fs::OpenOptions::new();
opts.access_mode(0); // no read or write
opts.custom_flags(FILE_FLAG_BACKUP_SEMANTICS);
let file = opts.open(path)?;
let metadata = file.metadata()?;
let mut fsstat = FsStat::from_std(metadata);
deno_io::stat_extra(&file, &mut fsstat)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use std::os::windows::fs::OpenOptionsExt;
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let mut opts = fs::OpenOptions::new();
opts.access_mode(0); // no read or write
opts.custom_flags(FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT);
let file = opts.open(path)?;
let metadata = file.metadata()?;
let mut fsstat = FsStat::from_std(metadata);
deno_io::stat_extra(&file, &mut fsstat)?;
Ok(fsstat)
}
fn exists(path: &Path) -> bool {
#[cfg(unix)]
{
use nix::unistd::AccessFlags;
use nix::unistd::access;
access(path, AccessFlags::F_OK).is_ok()
}
#[cfg(windows)]
{
fs::exists(path).unwrap_or(false)
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_path_util::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)));
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
FsFileType::Junction => {
junction::create(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
if let Some(custom_flags) = options.custom_flags {
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.custom_flags(custom_flags);
}
#[cfg(not(unix))]
let _ = custom_flags;
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
#[inline(always)]
pub fn open_with_checked_path(
options: OpenOptions,
path: &CheckedPath,
) -> FsResult<std::fs::File> {
let opts = open_options_for_checked_path(options, path);
Ok(opts.open(path)?)
}
#[inline(always)]
pub fn open_options_for_checked_path(
options: OpenOptions,
path: &CheckedPath,
) -> fs::OpenOptions {
let mut opts: fs::OpenOptions = open_options(options);
#[cfg(windows)]
{
_ = path; // not used on windows
// allow opening directories
use std::os::windows::fs::OpenOptionsExt;
opts.custom_flags(winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS);
}
#[cfg(unix)]
if path.canonicalized() {
// Don't follow symlinks on open -- we must always pass fully-resolved files
// with the exception of /proc/ which is too special, and /dev/std* which might point to
// proc.
use std::os::unix::fs::OpenOptionsExt;
match options.custom_flags {
Some(flags) => {
opts.custom_flags(flags | libc::O_NOFOLLOW);
}
None => {
opts.custom_flags(libc::O_NOFOLLOW);
}
}
}
opts
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/fs/ops.rs | ext/fs/ops.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::error::Error;
use std::fmt::Formatter;
use std::io;
use std::io::SeekFrom;
use std::path::Path;
use std::path::PathBuf;
use std::path::StripPrefixError;
use std::rc::Rc;
use boxed_error::Boxed;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::FastString;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::ResourceId;
use deno_core::ToJsBuffer;
use deno_core::error::ResourceError;
use deno_core::op2;
use deno_error::JsErrorBox;
use deno_io::fs::FileResource;
use deno_io::fs::FsError;
use deno_io::fs::FsStat;
use deno_permissions::CheckedPath;
use deno_permissions::CheckedPathBuf;
use deno_permissions::OpenAccessKind;
use deno_permissions::PathWithRequested;
use deno_permissions::PermissionCheckError;
use rand::Rng;
use rand::rngs::ThreadRng;
use rand::thread_rng;
use serde::Deserialize;
use serde::Serialize;
use crate::OpenOptions;
use crate::interface::FileSystemRc;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
#[derive(Debug, Boxed, deno_error::JsError)]
pub struct FsOpsError(pub Box<FsOpsErrorKind>);
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum FsOpsErrorKind {
#[class(inherit)]
#[error("{0}")]
Io(#[source] std::io::Error),
#[class(inherit)]
#[error("{0}")]
OperationError(#[source] OperationError),
#[class(inherit)]
#[error(transparent)]
Permission(#[from] PermissionCheckError),
#[class(inherit)]
#[error(transparent)]
Resource(#[from] ResourceError),
#[class("InvalidData")]
#[error("File name or path {0:?} is not valid UTF-8")]
InvalidUtf8(std::ffi::OsString),
#[class(generic)]
#[error("{0}")]
StripPrefix(#[from] StripPrefixError),
#[class(inherit)]
#[error("{0}")]
Canceled(#[from] deno_core::Canceled),
#[class(type)]
#[error("Invalid seek mode: {0}")]
InvalidSeekMode(i32),
#[class(generic)]
#[error("Invalid control character in prefix or suffix: {0:?}")]
InvalidControlCharacter(String),
#[class(generic)]
#[error("Invalid character in prefix or suffix: {0:?}")]
InvalidCharacter(String),
#[cfg(windows)]
#[class(generic)]
#[error("Invalid trailing character in suffix")]
InvalidTrailingCharacter,
#[class(inherit)]
#[error(transparent)]
Other(JsErrorBox),
}
impl From<FsError> for FsOpsError {
fn from(err: FsError) -> Self {
match err {
FsError::Io(err) => FsOpsErrorKind::Io(err),
FsError::FileBusy => FsOpsErrorKind::Resource(ResourceError::Unavailable),
FsError::NotSupported => {
FsOpsErrorKind::Other(JsErrorBox::not_supported())
}
FsError::PermissionCheck(err) => FsOpsErrorKind::Permission(err),
}
.into_box()
}
}
fn open_options_to_access_kind(open_options: &OpenOptions) -> OpenAccessKind {
let read = open_options.read;
let write = open_options.write || open_options.append;
match (read, write) {
(true, true) => OpenAccessKind::ReadWrite,
(false, true) => OpenAccessKind::Write,
(true, false) | (false, false) => OpenAccessKind::Read,
}
}
#[op2]
#[string]
pub fn op_fs_cwd(state: &mut OpState) -> Result<String, FsOpsError> {
let fs = state.borrow::<FileSystemRc>();
let path = fs.cwd()?;
let path_str = path_into_string(path.into_os_string())?;
Ok(path_str)
}
#[op2(fast, stack_trace)]
pub fn op_fs_chdir(
state: &mut OpState,
#[string] directory: &str,
) -> Result<(), FsOpsError> {
let d = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(directory)),
OpenAccessKind::ReadNoFollow,
Some("Deno.chdir()"),
)?;
state
.borrow::<FileSystemRc>()
.chdir(&d)
.context_path("chdir", &d)
}
#[op2]
pub fn op_fs_umask(
state: &mut OpState,
mask: Option<u32>,
) -> Result<u32, FsOpsError>
where
{
state.borrow::<FileSystemRc>().umask(mask).context("umask")
}
#[derive(Deserialize, Default, Debug, Clone, Copy)]
#[serde(rename_all = "camelCase")]
#[serde(default)]
struct FsOpenOptions {
read: bool,
write: bool,
create: bool,
truncate: bool,
append: bool,
create_new: bool,
mode: Option<u32>,
}
impl From<FsOpenOptions> for OpenOptions {
fn from(options: FsOpenOptions) -> Self {
OpenOptions {
read: options.read,
write: options.write,
create: options.create,
truncate: options.truncate,
append: options.append,
create_new: options.create_new,
custom_flags: None,
mode: options.mode,
}
}
}
#[op2(stack_trace)]
#[smi]
pub fn op_fs_open_sync(
state: &mut OpState,
#[string] path: &str,
#[serde] options: Option<FsOpenOptions>,
) -> Result<ResourceId, FsOpsError> {
let options = match options {
Some(options) => OpenOptions::from(options),
None => OpenOptions::read(),
};
let path = Path::new(path);
let fs = state.borrow::<FileSystemRc>().clone();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(path),
open_options_to_access_kind(&options),
Some("Deno.openSync()"),
)?;
let file = fs.open_sync(&path, options).context_path("open", &path)?;
let rid = state
.resource_table
.add(FileResource::new(file, "fsFile".to_string()));
Ok(rid)
}
#[op2(async, stack_trace)]
#[smi]
pub async fn op_fs_open_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[serde] options: Option<FsOpenOptions>,
) -> Result<ResourceId, FsOpsError> {
let options = match options {
Some(options) => OpenOptions::from(options),
None => OpenOptions::read(),
};
let path = PathBuf::from(path);
let (fs, path) = {
let mut state = state.borrow_mut();
(
state.borrow::<FileSystemRc>().clone(),
state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(path),
open_options_to_access_kind(&options),
Some("Deno.open()"),
)?,
)
};
let file = fs
.open_async(path.as_owned(), options)
.await
.context_path("open", &path)?;
let rid = state
.borrow_mut()
.resource_table
.add(FileResource::new(file, "fsFile".to_string()));
Ok(rid)
}
#[op2(stack_trace)]
pub fn op_fs_mkdir_sync(
state: &mut OpState,
#[string] path: &str,
recursive: bool,
mode: Option<u32>,
) -> Result<(), FsOpsError> {
let mode = mode.unwrap_or(0o777) & 0o777;
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.mkdirSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.mkdir_sync(&path, recursive, Some(mode))
.context_path("mkdir", &path)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_mkdir_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
recursive: bool,
mode: Option<u32>,
) -> Result<(), FsOpsError> {
let mode = mode.unwrap_or(0o777) & 0o777;
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.mkdir()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.mkdir_async(path.as_owned(), recursive, Some(mode))
.await
.context_path("mkdir", &path)?;
Ok(())
}
#[cfg(unix)]
#[op2(fast, stack_trace)]
pub fn op_fs_chmod_sync(
state: &mut OpState,
#[string] path: &str,
mode: u32,
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.chmodSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.chmod_sync(&path, mode).context_path("chmod", &path)?;
Ok(())
}
#[cfg(not(unix))]
#[op2(fast, stack_trace)]
pub fn op_fs_chmod_sync(
state: &mut OpState,
#[string] path: &str,
mode: i32,
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.chmodSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.chmod_sync(&path, mode).context_path("chmod", &path)?;
Ok(())
}
#[cfg(unix)]
#[op2(async, stack_trace)]
pub async fn op_fs_chmod_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
mode: u32,
) -> Result<(), FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.chmod()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.chmod_async(path.as_owned(), mode)
.await
.context_path("chmod", &path)?;
Ok(())
}
#[cfg(not(unix))]
#[op2(async, stack_trace)]
pub async fn op_fs_chmod_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
mode: i32,
) -> Result<(), FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.chmod()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.chmod_async(path.as_owned(), mode)
.await
.context_path("chmod", &path)?;
Ok(())
}
#[op2(stack_trace)]
pub fn op_fs_chown_sync(
state: &mut OpState,
#[string] path: &str,
uid: Option<u32>,
gid: Option<u32>,
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.chownSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.chown_sync(&path, uid, gid)
.context_path("chown", &path)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_chown_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
uid: Option<u32>,
gid: Option<u32>,
) -> Result<(), FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.chown()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.chown_async(path.as_owned(), uid, gid)
.await
.context_path("chown", &path)?;
Ok(())
}
#[op2(fast, stack_trace)]
pub fn op_fs_fchmod_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
mode: u32,
) -> Result<(), FsOpsError> {
let file =
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.chmod_sync(mode)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_fchmod_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
mode: u32,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
.map_err(FsOpsErrorKind::Resource)?;
file.chmod_async(mode).await?;
Ok(())
}
#[op2(stack_trace)]
pub fn op_fs_fchown_sync(
state: &mut OpState,
#[smi] rid: ResourceId,
uid: Option<u32>,
gid: Option<u32>,
) -> Result<(), FsOpsError> {
let file =
FileResource::get_file(state, rid).map_err(FsOpsErrorKind::Resource)?;
file.chown_sync(uid, gid)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_fchown_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
uid: Option<u32>,
gid: Option<u32>,
) -> Result<(), FsOpsError> {
let file = FileResource::get_file(&state.borrow(), rid)
.map_err(FsOpsErrorKind::Resource)?;
file.chown_async(uid, gid).await?;
Ok(())
}
#[op2(fast, stack_trace)]
pub fn op_fs_remove_sync(
state: &mut OpState,
#[string] path: &str,
recursive: bool,
) -> Result<(), FsOpsError> {
let path = Cow::Borrowed(Path::new(path));
let path = if recursive {
state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
path,
OpenAccessKind::WriteNoFollow,
Some("Deno.removeSync()"),
)?
} else {
state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_write_partial(path, "Deno.removeSync()")?
};
let fs = state.borrow::<FileSystemRc>();
fs.remove_sync(&path, recursive)
.context_path("remove", &path)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_remove_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
recursive: bool,
) -> Result<(), FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = Cow::Owned(PathBuf::from(path));
let path = if recursive {
state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
path,
OpenAccessKind::WriteNoFollow,
Some("Deno.remove()"),
)?
} else {
state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_write_partial(path, "Deno.remove()")?
};
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.remove_async(path.as_owned(), recursive)
.await
.context_path("remove", &path)?;
Ok(())
}
#[op2(fast, stack_trace)]
pub fn op_fs_copy_file_sync(
state: &mut OpState,
#[string] from: &str,
#[string] to: &str,
) -> Result<(), FsOpsError> {
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let from = permissions.check_open(
Cow::Borrowed(Path::new(from)),
OpenAccessKind::Read,
Some("Deno.copyFileSync()"),
)?;
let to = permissions.check_open(
Cow::Borrowed(Path::new(to)),
OpenAccessKind::WriteNoFollow,
Some("Deno.copyFileSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.copy_file_sync(&from, &to)
.context_two_path("copy", &from, &to)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_copy_file_async(
state: Rc<RefCell<OpState>>,
#[string] from: String,
#[string] to: String,
) -> Result<(), FsOpsError> {
let (fs, from, to) = {
let mut state = state.borrow_mut();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let from = permissions.check_open(
Cow::Owned(PathBuf::from(from)),
OpenAccessKind::Read,
Some("Deno.copyFile()"),
)?;
let to = permissions.check_open(
Cow::Owned(PathBuf::from(to)),
OpenAccessKind::WriteNoFollow,
Some("Deno.copyFile()"),
)?;
(state.borrow::<FileSystemRc>().clone(), from, to)
};
fs.copy_file_async(from.as_owned(), to.as_owned())
.await
.context_two_path("copy", &from, &to)?;
Ok(())
}
#[op2(fast, stack_trace)]
pub fn op_fs_stat_sync(
state: &mut OpState,
#[string] path: &str,
#[buffer] stat_out_buf: &mut [u32],
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.statSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
let stat = fs.stat_sync(&path).context_path("stat", &path)?;
let serializable_stat = SerializableStat::from(stat);
serializable_stat.write(stat_out_buf);
Ok(())
}
#[op2(async, stack_trace)]
#[serde]
pub async fn op_fs_stat_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
) -> Result<SerializableStat, FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let path = permissions.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.stat()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
let stat = fs
.stat_async(path.as_owned())
.await
.context_path("stat", &path)?;
Ok(SerializableStat::from(stat))
}
#[op2(fast, stack_trace)]
pub fn op_fs_lstat_sync(
state: &mut OpState,
#[string] path: &str,
#[buffer] stat_out_buf: &mut [u32],
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.lstatSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
let stat = fs.lstat_sync(&path).context_path("lstat", &path)?;
let serializable_stat = SerializableStat::from(stat);
serializable_stat.write(stat_out_buf);
Ok(())
}
#[op2(async, stack_trace)]
#[serde]
pub async fn op_fs_lstat_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
) -> Result<SerializableStat, FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let path = permissions.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.lstat()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
let stat = fs
.lstat_async(path.as_owned())
.await
.context_path("lstat", &path)?;
Ok(SerializableStat::from(stat))
}
#[op2(stack_trace)]
#[string]
pub fn op_fs_realpath_sync(
state: &mut OpState,
#[string] path: &str,
) -> Result<String, FsOpsError> {
let fs = state.borrow::<FileSystemRc>().clone();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let path = permissions.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.realPathSync()"),
)?;
let resolved_path =
fs.realpath_sync(&path).context_path("realpath", &path)?;
let path_string = path_into_string(resolved_path.into_os_string())?;
Ok(path_string)
}
#[op2(async, stack_trace)]
#[string]
pub async fn op_fs_realpath_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
) -> Result<String, FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let fs = state.borrow::<FileSystemRc>().clone();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let path = permissions.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.realPath()"),
)?;
(fs, path)
};
let resolved_path = fs
.realpath_async(path.as_owned())
.await
.context_path("realpath", &path)?;
let path_string = path_into_string(resolved_path.into_os_string())?;
Ok(path_string)
}
#[op2(stack_trace)]
#[serde]
pub fn op_fs_read_dir_sync(
state: &mut OpState,
#[string] path: &str,
) -> Result<Vec<FsDirEntry>, FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.readDirSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
let entries = fs.read_dir_sync(&path).context_path("readdir", &path)?;
Ok(entries)
}
#[op2(async, stack_trace)]
#[serde]
pub async fn op_fs_read_dir_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
) -> Result<Vec<FsDirEntry>, FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.readDir()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
let entries = fs
.read_dir_async(path.as_owned())
.await
.context_path("readdir", &path)?;
Ok(entries)
}
#[op2(fast, stack_trace)]
pub fn op_fs_rename_sync(
state: &mut OpState,
#[string] oldpath: &str,
#[string] newpath: &str,
) -> Result<(), FsOpsError> {
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let oldpath = permissions.check_open(
Cow::Borrowed(Path::new(oldpath)),
OpenAccessKind::ReadWriteNoFollow,
Some("Deno.renameSync()"),
)?;
let newpath = permissions.check_open(
Cow::Borrowed(Path::new(newpath)),
OpenAccessKind::WriteNoFollow,
Some("Deno.renameSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.rename_sync(&oldpath, &newpath)
.context_two_path("rename", &oldpath, &newpath)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_rename_async(
state: Rc<RefCell<OpState>>,
#[string] oldpath: String,
#[string] newpath: String,
) -> Result<(), FsOpsError> {
let (fs, oldpath, newpath) = {
let mut state = state.borrow_mut();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let oldpath = permissions.check_open(
Cow::Owned(PathBuf::from(oldpath)),
OpenAccessKind::ReadWriteNoFollow,
Some("Deno.rename()"),
)?;
let newpath = permissions.check_open(
Cow::Owned(PathBuf::from(newpath)),
OpenAccessKind::WriteNoFollow,
Some("Deno.rename()"),
)?;
(state.borrow::<FileSystemRc>().clone(), oldpath, newpath)
};
fs.rename_async(oldpath.as_owned(), newpath.as_owned())
.await
.context_two_path("rename", &oldpath, &newpath)?;
Ok(())
}
#[op2(fast, stack_trace)]
pub fn op_fs_link_sync(
state: &mut OpState,
#[string] oldpath: &str,
#[string] newpath: &str,
) -> Result<(), FsOpsError> {
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let oldpath = permissions.check_open(
Cow::Borrowed(Path::new(oldpath)),
OpenAccessKind::ReadWriteNoFollow,
Some("Deno.linkSync()"),
)?;
let newpath = permissions.check_open(
Cow::Borrowed(Path::new(newpath)),
OpenAccessKind::WriteNoFollow,
Some("Deno.linkSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.link_sync(&oldpath, &newpath)
.context_two_path("link", &oldpath, &newpath)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_link_async(
state: Rc<RefCell<OpState>>,
#[string] oldpath: String,
#[string] newpath: String,
) -> Result<(), FsOpsError> {
let (fs, oldpath, newpath) = {
let mut state = state.borrow_mut();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
let oldpath = permissions.check_open(
Cow::Owned(PathBuf::from(oldpath)),
OpenAccessKind::ReadWriteNoFollow,
Some("Deno.link()"),
)?;
let newpath = permissions.check_open(
Cow::Owned(PathBuf::from(newpath)),
OpenAccessKind::WriteNoFollow,
Some("Deno.link()"),
)?;
(state.borrow::<FileSystemRc>().clone(), oldpath, newpath)
};
fs.link_async(oldpath.as_owned(), newpath.as_owned())
.await
.context_two_path("link", &oldpath, &newpath)?;
Ok(())
}
#[op2(stack_trace)]
pub fn op_fs_symlink_sync(
state: &mut OpState,
#[string] oldpath: &str,
#[string] newpath: &str,
#[serde] file_type: Option<FsFileType>,
) -> Result<(), FsOpsError> {
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
permissions.check_write_all("Deno.symlinkSync()")?;
permissions.check_read_all("Deno.symlinkSync()")?;
// PERMISSIONS: ok because we verified --allow-write and --allow-read above
let oldpath = CheckedPath::unsafe_new(Cow::Borrowed(Path::new(oldpath)));
let newpath = CheckedPath::unsafe_new(Cow::Borrowed(Path::new(newpath)));
let fs = state.borrow::<FileSystemRc>();
fs.symlink_sync(&oldpath, &newpath, file_type)
.context_two_path("symlink", &oldpath, &newpath)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_symlink_async(
state: Rc<RefCell<OpState>>,
#[string] oldpath: String,
#[string] newpath: String,
#[serde] file_type: Option<FsFileType>,
) -> Result<(), FsOpsError> {
let fs = {
let mut state = state.borrow_mut();
let permissions =
state.borrow_mut::<deno_permissions::PermissionsContainer>();
permissions.check_write_all("Deno.symlink()")?;
permissions.check_read_all("Deno.symlink()")?;
state.borrow::<FileSystemRc>().clone()
};
// PERMISSIONS: ok because we verified --allow-write and --allow-read above
let oldpath = CheckedPathBuf::unsafe_new(PathBuf::from(&oldpath));
let newpath = CheckedPathBuf::unsafe_new(PathBuf::from(&newpath));
fs.symlink_async(oldpath.clone(), newpath.clone(), file_type)
.await
.context_two_path(
"symlink",
oldpath.as_checked_path(),
newpath.as_checked_path(),
)?;
Ok(())
}
#[op2(stack_trace)]
#[string]
pub fn op_fs_read_link_sync(
state: &mut OpState,
#[string] path: &str,
) -> Result<String, FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.readLink()"),
)?;
let fs = state.borrow::<FileSystemRc>();
let target = fs.read_link_sync(&path).context_path("readlink", &path)?;
let target_string = path_into_string(target.into_os_string())?;
Ok(target_string)
}
#[op2(async, stack_trace)]
#[string]
pub async fn op_fs_read_link_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
) -> Result<String, FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::ReadNoFollow,
Some("Deno.readLink()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
let target = fs
.read_link_async(path.as_owned())
.await
.context_path("readlink", &path)?;
let target_string = path_into_string(target.into_os_string())?;
Ok(target_string)
}
#[op2(fast, stack_trace)]
pub fn op_fs_truncate_sync(
state: &mut OpState,
#[string] path: &str,
#[number] len: u64,
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.truncateSync()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.truncate_sync(&path, len)
.context_path("truncate", &path)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_truncate_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[number] len: u64,
) -> Result<(), FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.truncate()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.truncate_async(path.as_owned(), len)
.await
.context_path("truncate", &path)?;
Ok(())
}
#[op2(fast, stack_trace)]
pub fn op_fs_utime_sync(
state: &mut OpState,
#[string] path: &str,
#[number] atime_secs: i64,
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
) -> Result<(), FsOpsError> {
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Borrowed(Path::new(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.utime()"),
)?;
let fs = state.borrow::<FileSystemRc>();
fs.utime_sync(&path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
.context_path("utime", &path)?;
Ok(())
}
#[op2(async, stack_trace)]
pub async fn op_fs_utime_async(
state: Rc<RefCell<OpState>>,
#[string] path: String,
#[number] atime_secs: i64,
#[smi] atime_nanos: u32,
#[number] mtime_secs: i64,
#[smi] mtime_nanos: u32,
) -> Result<(), FsOpsError> {
let (fs, path) = {
let mut state = state.borrow_mut();
let path = state
.borrow_mut::<deno_permissions::PermissionsContainer>()
.check_open(
Cow::Owned(PathBuf::from(path)),
OpenAccessKind::WriteNoFollow,
Some("Deno.utime()"),
)?;
(state.borrow::<FileSystemRc>().clone(), path)
};
fs.utime_async(
path.as_owned(),
atime_secs,
atime_nanos,
mtime_secs,
mtime_nanos,
)
.await
.context_path("utime", &path)?;
Ok(())
}
#[op2(stack_trace)]
#[string]
pub fn op_fs_make_temp_dir_sync(
state: &mut OpState,
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
) -> Result<String, FsOpsError> {
let (dir, fs) =
make_temp_check_sync(state, dir_arg.as_deref(), "Deno.makeTempDirSync()")?;
let mut rng = thread_rng();
const MAX_TRIES: u32 = 10;
for _ in 0..MAX_TRIES {
let path = tmp_name(&mut rng, &dir, prefix.as_deref(), suffix.as_deref())?;
// PERMISSIONS: this is ok because we verified the directory above
let path = CheckedPath::unsafe_new(Cow::Owned(path));
match fs.mkdir_sync(&path, false, Some(0o700)) {
Ok(_) => {
// PERMISSIONS: ensure the absolute path is not leaked
let path =
strip_dir_prefix(&dir, dir_arg.as_deref(), path.into_owned_path())?;
return path_into_string(path.into_os_string());
}
Err(FsError::Io(ref e)) if e.kind() == io::ErrorKind::AlreadyExists => {
continue;
}
Err(e) => return Err(e).context("tmpdir"),
}
}
Err(FsError::Io(io::Error::new(
io::ErrorKind::AlreadyExists,
"too many temp dirs exist",
)))
.context("tmpdir")
}
#[op2(async, stack_trace)]
#[string]
pub async fn op_fs_make_temp_dir_async(
state: Rc<RefCell<OpState>>,
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
) -> Result<String, FsOpsError> {
let (dir, fs) =
make_temp_check_async(state, dir_arg.as_deref(), "Deno.makeTempDir()")?;
let mut rng = thread_rng();
const MAX_TRIES: u32 = 10;
for _ in 0..MAX_TRIES {
let path = tmp_name(&mut rng, &dir, prefix.as_deref(), suffix.as_deref())?;
// PERMISSIONS: ok because we verified the directory above
let path = CheckedPathBuf::unsafe_new(path);
match fs
.clone()
.mkdir_async(path.clone(), false, Some(0o700))
.await
{
Ok(_) => {
// PERMISSIONS: ensure the absolute path is not leaked
let path =
strip_dir_prefix(&dir, dir_arg.as_deref(), path.into_path_buf())?;
return path_into_string(path.into_os_string());
}
Err(FsError::Io(ref e)) if e.kind() == io::ErrorKind::AlreadyExists => {
continue;
}
Err(e) => return Err(e).context("tmpdir"),
}
}
Err(FsError::Io(io::Error::new(
io::ErrorKind::AlreadyExists,
"too many temp dirs exist",
)))
.context("tmpdir")
}
#[op2(stack_trace)]
#[string]
pub fn op_fs_make_temp_file_sync(
state: &mut OpState,
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
) -> Result<String, FsOpsError> {
let (dir, fs) =
make_temp_check_sync(state, dir_arg.as_deref(), "Deno.makeTempFileSync()")?;
let open_opts = OpenOptions {
write: true,
create_new: true,
mode: Some(0o600),
..Default::default()
};
let mut rng = thread_rng();
const MAX_TRIES: u32 = 10;
for _ in 0..MAX_TRIES {
let path = tmp_name(&mut rng, &dir, prefix.as_deref(), suffix.as_deref())?;
// PERMISSIONS: this is fine because the dir was checked
let path = CheckedPath::unsafe_new(Cow::Owned(path));
match fs.open_sync(&path, open_opts) {
Ok(_) => {
// PERMISSIONS: ensure the absolute path is not leaked
let path =
strip_dir_prefix(&dir, dir_arg.as_deref(), path.into_owned_path())?;
return path_into_string(path.into_os_string());
}
Err(FsError::Io(ref e)) if e.kind() == io::ErrorKind::AlreadyExists => {
continue;
}
Err(e) => return Err(e).context("tmpfile"),
}
}
Err(FsError::Io(io::Error::new(
io::ErrorKind::AlreadyExists,
"too many temp files exist",
)))
.context("tmpfile")
}
#[op2(async, stack_trace)]
#[string]
pub async fn op_fs_make_temp_file_async(
state: Rc<RefCell<OpState>>,
#[string] dir_arg: Option<String>,
#[string] prefix: Option<String>,
#[string] suffix: Option<String>,
) -> Result<String, FsOpsError> {
let (dir, fs) =
make_temp_check_async(state, dir_arg.as_deref(), "Deno.makeTempFile()")?;
let open_opts = OpenOptions {
write: true,
create_new: true,
mode: Some(0o600),
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/lib.rs | ext/web/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod blob;
mod broadcast_channel;
mod compression;
mod console;
mod message_port;
mod stream_resource;
mod timers;
mod url;
mod urlpattern;
use std::borrow::Cow;
use std::cell::RefCell;
use std::sync::Arc;
pub use blob::BlobError;
pub use compression::CompressionError;
use deno_core::ByteString;
use deno_core::ToJsBuffer;
use deno_core::U16String;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::v8;
use encoding_rs::CoderResult;
use encoding_rs::Decoder;
use encoding_rs::DecoderResult;
use encoding_rs::Encoding;
pub use message_port::MessagePortError;
pub use stream_resource::StreamResourceError;
pub use crate::blob::Blob;
pub use crate::blob::BlobPart;
pub use crate::blob::BlobStore;
pub use crate::blob::InMemoryBlobPart;
use crate::blob::op_blob_create_object_url;
use crate::blob::op_blob_create_part;
use crate::blob::op_blob_from_object_url;
use crate::blob::op_blob_read_part;
use crate::blob::op_blob_remove_part;
use crate::blob::op_blob_revoke_object_url;
use crate::blob::op_blob_slice_part;
pub use crate::broadcast_channel::InMemoryBroadcastChannel;
pub use crate::message_port::JsMessageData;
pub use crate::message_port::MessagePort;
pub use crate::message_port::Transferable;
pub use crate::message_port::create_entangled_message_port;
pub use crate::message_port::deserialize_js_transferables;
use crate::message_port::op_message_port_create_entangled;
use crate::message_port::op_message_port_post_message;
use crate::message_port::op_message_port_recv_message;
use crate::message_port::op_message_port_recv_message_sync;
pub use crate::message_port::serialize_transferables;
pub use crate::timers::StartTime;
use crate::timers::op_defer;
use crate::timers::op_now;
use crate::timers::op_time_origin;
deno_core::extension!(deno_web,
deps = [ deno_webidl ],
ops = [
op_base64_decode,
op_base64_encode,
op_base64_atob,
op_base64_btoa,
op_encoding_normalize_label,
op_encoding_decode_single,
op_encoding_decode_utf8,
op_encoding_new_decoder,
op_encoding_decode,
op_encoding_encode_into,
op_blob_create_part,
op_blob_slice_part,
op_blob_read_part,
op_blob_remove_part,
op_blob_create_object_url,
op_blob_revoke_object_url,
op_blob_from_object_url,
op_message_port_create_entangled,
op_message_port_post_message,
op_message_port_recv_message,
op_message_port_recv_message_sync,
compression::op_compression_new,
compression::op_compression_write,
compression::op_compression_finish,
op_now,
op_time_origin,
op_defer,
stream_resource::op_readable_stream_resource_allocate,
stream_resource::op_readable_stream_resource_allocate_sized,
stream_resource::op_readable_stream_resource_get_sink,
stream_resource::op_readable_stream_resource_write_error,
stream_resource::op_readable_stream_resource_write_buf,
stream_resource::op_readable_stream_resource_write_sync,
stream_resource::op_readable_stream_resource_close,
stream_resource::op_readable_stream_resource_await_close,
url::op_url_reparse,
url::op_url_parse,
url::op_url_get_serialization,
url::op_url_parse_with_base,
url::op_url_parse_search_params,
url::op_url_stringify_search_params,
urlpattern::op_urlpattern_parse,
urlpattern::op_urlpattern_process_match_input,
console::op_preview_entries,
broadcast_channel::op_broadcast_subscribe,
broadcast_channel::op_broadcast_unsubscribe,
broadcast_channel::op_broadcast_send,
broadcast_channel::op_broadcast_recv,
],
esm = [
"00_infra.js",
"01_dom_exception.js",
"01_mimesniff.js",
"02_event.js",
"02_structured_clone.js",
"02_timers.js",
"03_abort_signal.js",
"04_global_interfaces.js",
"05_base64.js",
"06_streams.js",
"08_text_encoding.js",
"09_file.js",
"10_filereader.js",
"12_location.js",
"13_message_port.js",
"14_compression.js",
"15_performance.js",
"16_image_data.js",
"00_url.js",
"01_urlpattern.js",
"01_console.js",
"01_broadcast_channel.js"
],
lazy_loaded_esm = [ "webtransport.js" ],
options = {
blob_store: Arc<BlobStore>,
maybe_location: Option<Url>,
bc: InMemoryBroadcastChannel,
},
state = |state, options| {
state.put(options.blob_store);
if let Some(location) = options.maybe_location {
state.put(Location(location));
}
state.put(StartTime::default());
state.put(options.bc);
}
);
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum WebError {
#[class("DOMExceptionInvalidCharacterError")]
#[error("Failed to decode base64")]
Base64Decode,
#[class(range)]
#[error("The encoding label provided ('{0}') is invalid.")]
InvalidEncodingLabel(String),
#[class(type)]
#[error("buffer exceeds maximum length")]
BufferTooLong,
#[class(range)]
#[error("Value too large to decode")]
ValueTooLarge,
#[class(range)]
#[error("Provided buffer too small")]
BufferTooSmall,
#[class(type)]
#[error("The encoded data is not valid")]
DataInvalid,
#[class(generic)]
#[error(transparent)]
DataError(#[from] v8::DataError),
}
#[op2]
#[serde]
fn op_base64_decode(#[string] input: String) -> Result<ToJsBuffer, WebError> {
let mut s = input.into_bytes();
let decoded_len = forgiving_base64_decode_inplace(&mut s)?;
s.truncate(decoded_len);
Ok(s.into())
}
#[op2]
#[serde]
fn op_base64_atob(#[serde] mut s: ByteString) -> Result<ByteString, WebError> {
let decoded_len = forgiving_base64_decode_inplace(&mut s)?;
s.truncate(decoded_len);
Ok(s)
}
/// See <https://infra.spec.whatwg.org/#forgiving-base64>
#[inline]
fn forgiving_base64_decode_inplace(
input: &mut [u8],
) -> Result<usize, WebError> {
let decoded = base64_simd::forgiving_decode_inplace(input)
.map_err(|_| WebError::Base64Decode)?;
Ok(decoded.len())
}
#[op2]
#[string]
fn op_base64_encode(#[buffer] s: &[u8]) -> String {
forgiving_base64_encode(s)
}
#[op2]
#[string]
fn op_base64_btoa(#[serde] s: ByteString) -> String {
forgiving_base64_encode(s.as_ref())
}
/// See <https://infra.spec.whatwg.org/#forgiving-base64>
#[inline]
pub fn forgiving_base64_encode(s: &[u8]) -> String {
base64_simd::STANDARD.encode_to_string(s)
}
#[op2]
#[string]
fn op_encoding_normalize_label(
#[string] label: String,
) -> Result<String, WebError> {
let encoding = Encoding::for_label_no_replacement(label.as_bytes())
.ok_or(WebError::InvalidEncodingLabel(label))?;
Ok(encoding.name().to_lowercase())
}
#[op2]
fn op_encoding_decode_utf8<'a>(
scope: &mut v8::PinScope<'a, '_>,
#[anybuffer] zero_copy: &[u8],
ignore_bom: bool,
) -> Result<v8::Local<'a, v8::String>, WebError> {
let buf = &zero_copy;
let buf = if !ignore_bom
&& buf.len() >= 3
&& buf[0] == 0xef
&& buf[1] == 0xbb
&& buf[2] == 0xbf
{
&buf[3..]
} else {
buf
};
// If `String::new_from_utf8()` returns `None`, this means that the
// length of the decoded string would be longer than what V8 can
// handle. In this case we return `RangeError`.
//
// For more details see:
// - https://encoding.spec.whatwg.org/#dom-textdecoder-decode
// - https://github.com/denoland/deno/issues/6649
// - https://github.com/v8/v8/blob/d68fb4733e39525f9ff0a9222107c02c28096e2a/include/v8.h#L3277-L3278
match v8::String::new_from_utf8(scope, buf, v8::NewStringType::Normal) {
Some(text) => Ok(text),
None => Err(WebError::BufferTooLong),
}
}
#[op2]
#[serde]
fn op_encoding_decode_single(
#[anybuffer] data: &[u8],
#[string] label: String,
fatal: bool,
ignore_bom: bool,
) -> Result<U16String, WebError> {
let encoding = Encoding::for_label(label.as_bytes())
.ok_or(WebError::InvalidEncodingLabel(label))?;
let mut decoder = if ignore_bom {
encoding.new_decoder_without_bom_handling()
} else {
encoding.new_decoder_with_bom_removal()
};
let max_buffer_length = decoder
.max_utf16_buffer_length(data.len())
.ok_or(WebError::ValueTooLarge)?;
let mut output = vec![0; max_buffer_length];
if fatal {
let (result, _, written) =
decoder.decode_to_utf16_without_replacement(data, &mut output, true);
match result {
DecoderResult::InputEmpty => {
output.truncate(written);
Ok(output.into())
}
DecoderResult::OutputFull => Err(WebError::BufferTooSmall),
DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid),
}
} else {
let (result, _, written, _) =
decoder.decode_to_utf16(data, &mut output, true);
match result {
CoderResult::InputEmpty => {
output.truncate(written);
Ok(output.into())
}
CoderResult::OutputFull => Err(WebError::BufferTooSmall),
}
}
}
#[op2]
#[cppgc]
fn op_encoding_new_decoder(
#[string] label: &str,
fatal: bool,
ignore_bom: bool,
) -> Result<TextDecoderResource, WebError> {
let encoding = Encoding::for_label(label.as_bytes())
.ok_or_else(|| WebError::InvalidEncodingLabel(label.to_string()))?;
let decoder = if ignore_bom {
encoding.new_decoder_without_bom_handling()
} else {
encoding.new_decoder_with_bom_removal()
};
Ok(TextDecoderResource {
decoder: RefCell::new(decoder),
fatal,
})
}
#[op2]
#[serde]
fn op_encoding_decode(
#[anybuffer] data: &[u8],
#[cppgc] resource: &TextDecoderResource,
stream: bool,
) -> Result<U16String, WebError> {
let mut decoder = resource.decoder.borrow_mut();
let fatal = resource.fatal;
let max_buffer_length = decoder
.max_utf16_buffer_length(data.len())
.ok_or(WebError::ValueTooLarge)?;
let mut output = vec![0; max_buffer_length];
if fatal {
let (result, _, written) =
decoder.decode_to_utf16_without_replacement(data, &mut output, !stream);
match result {
DecoderResult::InputEmpty => {
output.truncate(written);
Ok(output.into())
}
DecoderResult::OutputFull => Err(WebError::BufferTooSmall),
DecoderResult::Malformed(_, _) => Err(WebError::DataInvalid),
}
} else {
let (result, _, written, _) =
decoder.decode_to_utf16(data, &mut output, !stream);
match result {
CoderResult::InputEmpty => {
output.truncate(written);
Ok(output.into())
}
CoderResult::OutputFull => Err(WebError::BufferTooSmall),
}
}
}
struct TextDecoderResource {
decoder: RefCell<Decoder>,
fatal: bool,
}
// SAFETY: we're sure `TextDecoderResource` can be GCed
unsafe impl deno_core::GarbageCollected for TextDecoderResource {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"TextDecoderResource"
}
}
#[op2(fast(op_encoding_encode_into_fast))]
#[allow(deprecated)]
fn op_encoding_encode_into(
scope: &mut v8::PinScope<'_, '_>,
input: v8::Local<v8::Value>,
#[buffer] buffer: &mut [u8],
#[buffer] out_buf: &mut [u32],
) -> Result<(), WebError> {
let s = v8::Local::<v8::String>::try_from(input)?;
let mut nchars = 0;
out_buf[1] = s.write_utf8(
scope,
buffer,
Some(&mut nchars),
v8::WriteOptions::NO_NULL_TERMINATION
| v8::WriteOptions::REPLACE_INVALID_UTF8,
) as u32;
out_buf[0] = nchars as u32;
Ok(())
}
#[op2(fast)]
fn op_encoding_encode_into_fast(
#[string] input: Cow<'_, str>,
#[buffer] buffer: &mut [u8],
#[buffer] out_buf: &mut [u32],
) {
// Since `input` is already UTF-8, we can simply find the last UTF-8 code
// point boundary from input that fits in `buffer`, and copy the bytes up to
// that point.
let boundary = if buffer.len() >= input.len() {
input.len()
} else {
let mut boundary = buffer.len();
// The maximum length of a UTF-8 code point is 4 bytes.
for _ in 0..4 {
if input.is_char_boundary(boundary) {
break;
}
debug_assert!(boundary > 0);
boundary -= 1;
}
debug_assert!(input.is_char_boundary(boundary));
boundary
};
buffer[..boundary].copy_from_slice(input[..boundary].as_bytes());
// The `read` output parameter is measured in UTF-16 code units.
out_buf[0] = match input {
// Borrowed Cow strings are zero-copy views into the V8 heap.
// Thus, they are guarantee to be SeqOneByteString.
Cow::Borrowed(v) => v[..boundary].len() as u32,
Cow::Owned(v) => v[..boundary].encode_utf16().count() as u32,
};
out_buf[1] = boundary as u32;
}
pub struct Location(pub Url);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/url.rs | ext/web/url.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::op2;
use deno_core::url::Url;
use deno_core::url::form_urlencoded;
use deno_core::url::quirks;
use deno_error::JsErrorBox;
/// Parse `href` with a `base_href`. Fills the out `buf` with URL components.
#[op2(fast)]
#[smi]
pub fn op_url_parse_with_base(
state: &mut OpState,
#[string] href: &str,
#[string] base_href: &str,
#[buffer] buf: &mut [u32],
) -> u32 {
let base_url = match Url::parse(base_href) {
Ok(url) => url,
Err(_) => return ParseStatus::Err as u32,
};
parse_url(state, href, Some(&base_url), buf)
}
#[repr(u32)]
pub enum ParseStatus {
Ok = 0,
OkSerialization = 1,
Err,
}
struct UrlSerialization(String);
#[op2]
#[string]
pub fn op_url_get_serialization(state: &mut OpState) -> String {
state.take::<UrlSerialization>().0
}
/// Parse `href` without a `base_url`. Fills the out `buf` with URL components.
#[op2(fast)]
#[smi]
pub fn op_url_parse(
state: &mut OpState,
#[string] href: &str,
#[buffer] buf: &mut [u32],
) -> u32 {
parse_url(state, href, None, buf)
}
/// `op_url_parse` and `op_url_parse_with_base` share the same implementation.
///
/// This function is used to parse the URL and fill the `buf` with internal
/// offset values of the URL components.
///
/// If the serialized URL is the same as the input URL, then `UrlSerialization` is
/// not set and returns `ParseStatus::Ok`.
///
/// If the serialized URL is different from the input URL, then `UrlSerialization` is
/// set and returns `ParseStatus::OkSerialization`. JS side should check status and
/// use `op_url_get_serialization` to get the serialized URL.
///
/// If the URL is invalid, then `UrlSerialization` is not set and returns `ParseStatus::Err`.
///
/// ```js
/// const buf = new Uint32Array(8);
/// const status = op_url_parse("http://example.com", buf.buffer);
/// let serializedUrl = "";
/// if (status === ParseStatus.Ok) {
/// serializedUrl = "http://example.com";
/// } else if (status === ParseStatus.OkSerialization) {
/// serializedUrl = op_url_get_serialization();
/// }
/// ```
#[inline]
fn parse_url(
state: &mut OpState,
href: &str,
base_href: Option<&Url>,
buf: &mut [u32],
) -> u32 {
match Url::options().base_url(base_href).parse(href) {
Ok(url) => {
let inner_url = quirks::internal_components(&url);
buf[0] = inner_url.scheme_end;
buf[1] = inner_url.username_end;
buf[2] = inner_url.host_start;
buf[3] = inner_url.host_end;
buf[4] = inner_url.port.unwrap_or(0) as u32;
buf[5] = inner_url.path_start;
buf[6] = inner_url.query_start.unwrap_or(0);
buf[7] = inner_url.fragment_start.unwrap_or(0);
let serialization: String = url.into();
if serialization != href {
state.put(UrlSerialization(serialization));
ParseStatus::OkSerialization as u32
} else {
ParseStatus::Ok as u32
}
}
Err(_) => ParseStatus::Err as u32,
}
}
#[allow(dead_code)]
#[derive(Eq, PartialEq, Debug)]
#[repr(u8)]
pub enum UrlSetter {
Hash = 0,
Host = 1,
Hostname = 2,
Password = 3,
Pathname = 4,
Port = 5,
Protocol = 6,
Search = 7,
Username = 8,
}
const NO_PORT: u32 = 65536;
#[op2(fast)]
#[smi]
pub fn op_url_reparse(
state: &mut OpState,
#[string] href: String,
#[smi] setter: u8,
#[string] setter_value: String,
#[buffer] buf: &mut [u32],
) -> u32 {
let mut url = match Url::options().parse(&href) {
Ok(url) => url,
Err(_) => return ParseStatus::Err as u32,
};
if setter > 8 {
return ParseStatus::Err as u32;
}
// SAFETY: checked to be less than 9.
let setter = unsafe { std::mem::transmute::<u8, UrlSetter>(setter) };
let value = setter_value.as_ref();
let e = match setter {
UrlSetter::Hash => {
quirks::set_hash(&mut url, value);
Ok(())
}
UrlSetter::Host => quirks::set_host(&mut url, value),
UrlSetter::Hostname => quirks::set_hostname(&mut url, value),
UrlSetter::Password => quirks::set_password(&mut url, value),
UrlSetter::Pathname => {
quirks::set_pathname(&mut url, value);
Ok(())
}
UrlSetter::Port => quirks::set_port(&mut url, value),
UrlSetter::Protocol => quirks::set_protocol(&mut url, value),
UrlSetter::Search => {
quirks::set_search(&mut url, value);
Ok(())
}
UrlSetter::Username => quirks::set_username(&mut url, value),
};
match e {
Ok(_) => {
let inner_url = quirks::internal_components(&url);
buf[0] = inner_url.scheme_end;
buf[1] = inner_url.username_end;
buf[2] = inner_url.host_start;
buf[3] = inner_url.host_end;
buf[4] = inner_url.port.map(|p| p as u32).unwrap_or(NO_PORT);
buf[5] = inner_url.path_start;
buf[6] = inner_url.query_start.unwrap_or(0);
buf[7] = inner_url.fragment_start.unwrap_or(0);
let serialization: String = url.into();
if serialization != href {
state.put(UrlSerialization(serialization));
ParseStatus::OkSerialization as u32
} else {
ParseStatus::Ok as u32
}
}
Err(_) => ParseStatus::Err as u32,
}
}
#[op2]
#[serde]
pub fn op_url_parse_search_params(
#[string] args: Option<String>,
#[buffer] zero_copy: Option<JsBuffer>,
) -> Result<Vec<(String, String)>, JsErrorBox> {
let params = match (args, zero_copy) {
(None, Some(zero_copy)) => form_urlencoded::parse(&zero_copy)
.into_iter()
.map(|(k, v)| (k.as_ref().to_owned(), v.as_ref().to_owned()))
.collect(),
(Some(args), None) => form_urlencoded::parse(args.as_bytes())
.into_iter()
.map(|(k, v)| (k.as_ref().to_owned(), v.as_ref().to_owned()))
.collect(),
_ => return Err(JsErrorBox::type_error("invalid parameters")),
};
Ok(params)
}
#[op2]
#[string]
pub fn op_url_stringify_search_params(
#[serde] args: Vec<(String, String)>,
) -> String {
form_urlencoded::Serializer::new(String::new())
.extend_pairs(args)
.finish()
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/message_port.rs | ext/web/message_port.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::future::poll_fn;
use std::rc::Rc;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::DetachedBuffer;
use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::TransferredResource;
use deno_core::op2;
use deno_error::JsErrorBox;
use serde::Deserialize;
use serde::Serialize;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::mpsc::error::TryRecvError;
use tokio::sync::mpsc::unbounded_channel;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum MessagePortError {
#[class(type)]
#[error("Invalid message port transfer")]
InvalidTransfer,
#[class(type)]
#[error("Message port is not ready for transfer")]
NotReady,
#[class(type)]
#[error("Can not transfer self message port")]
TransferSelf,
#[class(inherit)]
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
#[class(inherit)]
#[error(transparent)]
Resource(deno_core::error::ResourceError),
#[class(inherit)]
#[error(transparent)]
Generic(JsErrorBox),
}
pub enum Transferable {
Resource(String, Box<dyn TransferredResource>),
MultiResource(String, Vec<Box<dyn TransferredResource>>),
ArrayBuffer(u32),
}
type MessagePortMessage = (DetachedBuffer, Vec<Transferable>);
pub struct MessagePort {
rx: RefCell<UnboundedReceiver<MessagePortMessage>>,
tx: RefCell<Option<UnboundedSender<MessagePortMessage>>>,
}
impl MessagePort {
pub fn send(
&self,
state: &mut OpState,
data: JsMessageData,
) -> Result<(), MessagePortError> {
let transferables =
deserialize_js_transferables(state, data.transferables)?;
// Swallow the failed to send error. It means the channel was disentangled,
// but not cleaned up.
if let Some(tx) = &*self.tx.borrow() {
tx.send((data.data, transferables)).ok();
}
Ok(())
}
pub async fn recv(
&self,
state: Rc<RefCell<OpState>>,
) -> Result<Option<JsMessageData>, MessagePortError> {
let rx = &self.rx;
let maybe_data = poll_fn(|cx| {
let mut rx = rx.borrow_mut();
rx.poll_recv(cx)
})
.await;
if let Some((data, transferables)) = maybe_data {
let js_transferables =
serialize_transferables(&mut state.borrow_mut(), transferables);
return Ok(Some(JsMessageData {
data,
transferables: js_transferables,
}));
}
Ok(None)
}
/// This forcefully disconnects the message port from its paired port. This
/// will wake up the `.recv` on the paired port, which will return `Ok(None)`.
pub fn disentangle(&self) {
let mut tx = self.tx.borrow_mut();
tx.take();
}
}
pub fn create_entangled_message_port() -> (MessagePort, MessagePort) {
let (port1_tx, port2_rx) = unbounded_channel::<MessagePortMessage>();
let (port2_tx, port1_rx) = unbounded_channel::<MessagePortMessage>();
let port1 = MessagePort {
rx: RefCell::new(port1_rx),
tx: RefCell::new(Some(port1_tx)),
};
let port2 = MessagePort {
rx: RefCell::new(port2_rx),
tx: RefCell::new(Some(port2_tx)),
};
(port1, port2)
}
pub struct MessagePortResource {
port: MessagePort,
cancel: CancelHandle,
}
impl Resource for MessagePortResource {
fn name(&self) -> Cow<'_, str> {
"messagePort".into()
}
fn close(self: Rc<Self>) {
self.cancel.cancel();
}
fn transfer(
self: Rc<Self>,
) -> Result<Box<dyn TransferredResource>, JsErrorBox> {
self.cancel.cancel();
let resource = Rc::try_unwrap(self)
.map_err(|_| JsErrorBox::from_err(MessagePortError::NotReady))?;
Ok(Box::new(resource.port))
}
}
impl TransferredResource for MessagePort {
fn receive(self: Box<Self>) -> Rc<dyn Resource> {
Rc::new(MessagePortResource {
port: *self,
cancel: CancelHandle::new(),
})
}
}
#[op2]
#[serde]
pub fn op_message_port_create_entangled(
state: &mut OpState,
) -> (ResourceId, ResourceId) {
let (port1, port2) = create_entangled_message_port();
let port1_id = state.resource_table.add(MessagePortResource {
port: port1,
cancel: CancelHandle::new(),
});
let port2_id = state.resource_table.add(MessagePortResource {
port: port2,
cancel: CancelHandle::new(),
});
(port1_id, port2_id)
}
#[derive(Deserialize, Serialize)]
#[serde(tag = "kind", content = "data", rename_all = "camelCase")]
pub enum JsTransferable {
ArrayBuffer(u32),
Resource(String, ResourceId),
MultiResource(String, Vec<ResourceId>),
}
pub fn deserialize_js_transferables(
state: &mut OpState,
js_transferables: Vec<JsTransferable>,
) -> Result<Vec<Transferable>, MessagePortError> {
let mut transferables = Vec::with_capacity(js_transferables.len());
for js_transferable in js_transferables {
match js_transferable {
JsTransferable::Resource(name, rid) => {
let resource = state
.resource_table
.take_any(rid)
.map_err(|_| MessagePortError::InvalidTransfer)?;
let tx = resource.transfer().map_err(MessagePortError::Generic)?;
transferables.push(Transferable::Resource(name, tx));
}
JsTransferable::MultiResource(name, rids) => {
let mut txs = Vec::with_capacity(rids.len());
for rid in rids {
let resource = state
.resource_table
.take_any(rid)
.map_err(|_| MessagePortError::InvalidTransfer)?;
let tx = resource.transfer().map_err(MessagePortError::Generic)?;
txs.push(tx);
}
transferables.push(Transferable::MultiResource(name, txs));
}
JsTransferable::ArrayBuffer(id) => {
transferables.push(Transferable::ArrayBuffer(id));
}
}
}
Ok(transferables)
}
pub fn serialize_transferables(
state: &mut OpState,
transferables: Vec<Transferable>,
) -> Vec<JsTransferable> {
let mut js_transferables = Vec::with_capacity(transferables.len());
for transferable in transferables {
match transferable {
Transferable::Resource(name, tx) => {
let rx = tx.receive();
let rid = state.resource_table.add_rc_dyn(rx);
js_transferables.push(JsTransferable::Resource(name, rid));
}
Transferable::MultiResource(name, txs) => {
let rids = txs
.into_iter()
.map(|tx| state.resource_table.add_rc_dyn(tx.receive()))
.collect();
js_transferables.push(JsTransferable::MultiResource(name, rids));
}
Transferable::ArrayBuffer(id) => {
js_transferables.push(JsTransferable::ArrayBuffer(id));
}
}
}
js_transferables
}
#[derive(Deserialize, Serialize)]
pub struct JsMessageData {
pub data: DetachedBuffer,
pub transferables: Vec<JsTransferable>,
}
#[op2]
pub fn op_message_port_post_message(
state: &mut OpState,
#[smi] rid: ResourceId,
#[serde] data: JsMessageData,
) -> Result<(), MessagePortError> {
for js_transferable in &data.transferables {
if let JsTransferable::Resource(_name, id) = js_transferable
&& *id == rid
{
return Err(MessagePortError::TransferSelf);
}
}
let resource = state
.resource_table
.get::<MessagePortResource>(rid)
.map_err(MessagePortError::Resource)?;
resource.port.send(state, data)
}
#[op2(async)]
#[serde]
pub async fn op_message_port_recv_message(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> Result<Option<JsMessageData>, MessagePortError> {
let resource = {
let state = state.borrow();
match state.resource_table.get::<MessagePortResource>(rid) {
Ok(resource) => resource,
Err(_) => return Ok(None),
}
};
let cancel = RcRef::map(resource.clone(), |r| &r.cancel);
resource.port.recv(state).or_cancel(cancel).await?
}
#[op2]
#[serde]
pub fn op_message_port_recv_message_sync(
state: &mut OpState, // Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> Result<Option<JsMessageData>, MessagePortError> {
let resource = state
.resource_table
.get::<MessagePortResource>(rid)
.map_err(MessagePortError::Resource)?;
let mut rx = resource.port.rx.borrow_mut();
match rx.try_recv() {
Ok((d, t)) => Ok(Some(JsMessageData {
data: d,
transferables: serialize_transferables(state, t),
})),
Err(TryRecvError::Empty) => Ok(None),
Err(TryRecvError::Disconnected) => Ok(None),
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/broadcast_channel.rs | ext/web/broadcast_channel.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::ResourceId;
use deno_core::op2;
use deno_core::parking_lot::Mutex;
use tokio::sync::broadcast;
use tokio::sync::broadcast::error::SendError as BroadcastSendError;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::SendError as MpscSendError;
use uuid::Uuid;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum BroadcastChannelError {
#[class(inherit)]
#[error(transparent)]
Resource(
#[from]
#[inherit]
deno_core::error::ResourceError,
),
#[class(generic)]
#[error(transparent)]
MPSCSendError(MpscSendError<Box<dyn std::fmt::Debug + Send + Sync>>),
#[class(generic)]
#[error(transparent)]
BroadcastSendError(
BroadcastSendError<Box<dyn std::fmt::Debug + Send + Sync>>,
),
}
impl<T: std::fmt::Debug + Send + Sync + 'static> From<MpscSendError<T>>
for BroadcastChannelError
{
fn from(value: MpscSendError<T>) -> Self {
BroadcastChannelError::MPSCSendError(MpscSendError(Box::new(value.0)))
}
}
impl<T: std::fmt::Debug + Send + Sync + 'static> From<BroadcastSendError<T>>
for BroadcastChannelError
{
fn from(value: BroadcastSendError<T>) -> Self {
BroadcastChannelError::BroadcastSendError(BroadcastSendError(Box::new(
value.0,
)))
}
}
pub type BroadcastChannelMessage = (String, Vec<u8>);
#[op2(fast)]
#[smi]
pub fn op_broadcast_subscribe(
state: &mut OpState,
) -> Result<ResourceId, BroadcastChannelError> {
let bc = state.borrow::<InMemoryBroadcastChannel>();
let resource = bc.subscribe()?;
Ok(state.resource_table.add(resource))
}
#[op2(fast)]
pub fn op_broadcast_unsubscribe(
state: &mut OpState,
#[smi] rid: ResourceId,
) -> Result<(), BroadcastChannelError> {
let resource = state
.resource_table
.get::<InMemoryBroadcastChannelResource>(rid)?;
let bc = state.borrow::<InMemoryBroadcastChannel>();
bc.unsubscribe(&resource)
}
#[op2]
pub fn op_broadcast_send(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[string] name: String,
#[buffer] buf: JsBuffer,
) -> Result<(), BroadcastChannelError> {
let resource = state
.borrow()
.resource_table
.get::<InMemoryBroadcastChannelResource>(rid)?;
let bc = state.borrow().borrow::<InMemoryBroadcastChannel>().clone();
bc.send(&resource, name, buf.to_vec())
}
#[op2(async)]
#[serde]
pub async fn op_broadcast_recv(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> Result<Option<BroadcastChannelMessage>, BroadcastChannelError> {
let resource = state
.borrow()
.resource_table
.get::<InMemoryBroadcastChannelResource>(rid)?;
let bc = state.borrow().borrow::<InMemoryBroadcastChannel>().clone();
bc.recv(&resource).await
}
#[derive(Clone)]
pub struct InMemoryBroadcastChannel(
Arc<Mutex<broadcast::Sender<InMemoryChannelMessage>>>,
);
pub struct InMemoryBroadcastChannelResource {
rx: tokio::sync::Mutex<(
broadcast::Receiver<InMemoryChannelMessage>,
mpsc::UnboundedReceiver<()>,
)>,
cancel_tx: mpsc::UnboundedSender<()>,
uuid: Uuid,
}
impl deno_core::Resource for InMemoryBroadcastChannelResource {}
#[derive(Clone, Debug)]
struct InMemoryChannelMessage {
name: Arc<String>,
data: Arc<Vec<u8>>,
uuid: Uuid,
}
impl Default for InMemoryBroadcastChannel {
fn default() -> Self {
let (tx, _) = broadcast::channel(256);
Self(Arc::new(Mutex::new(tx)))
}
}
impl InMemoryBroadcastChannel {
fn subscribe(
&self,
) -> Result<InMemoryBroadcastChannelResource, BroadcastChannelError> {
let (cancel_tx, cancel_rx) = mpsc::unbounded_channel();
let broadcast_rx = self.0.lock().subscribe();
let rx = tokio::sync::Mutex::new((broadcast_rx, cancel_rx));
let uuid = Uuid::new_v4();
Ok(InMemoryBroadcastChannelResource {
rx,
cancel_tx,
uuid,
})
}
fn unsubscribe(
&self,
resource: &InMemoryBroadcastChannelResource,
) -> Result<(), BroadcastChannelError> {
Ok(resource.cancel_tx.send(())?)
}
fn send(
&self,
resource: &InMemoryBroadcastChannelResource,
name: String,
data: Vec<u8>,
) -> Result<(), BroadcastChannelError> {
let name = Arc::new(name);
let data = Arc::new(data);
let uuid = resource.uuid;
self
.0
.lock()
.send(InMemoryChannelMessage { name, data, uuid })?;
Ok(())
}
async fn recv(
&self,
resource: &InMemoryBroadcastChannelResource,
) -> Result<Option<BroadcastChannelMessage>, BroadcastChannelError> {
let mut g = resource.rx.lock().await;
let (broadcast_rx, cancel_rx) = &mut *g;
loop {
let result = tokio::select! {
r = broadcast_rx.recv() => r,
_ = cancel_rx.recv() => return Ok(None),
};
use tokio::sync::broadcast::error::RecvError::*;
match result {
Err(Closed) => return Ok(None),
Err(Lagged(_)) => (), // Backlogged, messages dropped.
Ok(message) if message.uuid == resource.uuid => (), // Self-send.
Ok(message) => {
let name = String::clone(&message.name);
let data = Vec::clone(&message.data);
return Ok(Some((name, data)));
}
}
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/stream_resource.rs | ext/web/stream_resource.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::cell::RefMut;
use std::ffi::c_void;
use std::future::Future;
use std::future::poll_fn;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::pin::Pin;
use std::rc::Rc;
use std::task::Context;
use std::task::Poll;
use std::task::Waker;
use bytes::BytesMut;
use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::ExternalPointer;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::RcLike;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::external;
use deno_core::op2;
use deno_core::serde_v8::V8Slice;
use deno_core::unsync::TaskQueue;
use futures::TryFutureExt;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum StreamResourceError {
#[class(inherit)]
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
#[class(type)]
#[error("{0}")]
Js(String),
}
// How many buffers we'll allow in the channel before we stop allowing writes.
const BUFFER_CHANNEL_SIZE: u16 = 1024;
// How much data is in the channel before we stop allowing writes.
const BUFFER_BACKPRESSURE_LIMIT: usize = 64 * 1024;
// Optimization: prevent multiple small writes from adding overhead.
//
// If the total size of the channel is less than this value and there is more than one buffer available
// to read, we will allocate a buffer to store the entire contents of the channel and copy each value from
// the channel rather than yielding them one at a time.
const BUFFER_AGGREGATION_LIMIT: usize = 1024;
struct BoundedBufferChannelInner {
buffers: [MaybeUninit<V8Slice<u8>>; BUFFER_CHANNEL_SIZE as _],
ring_producer: u16,
ring_consumer: u16,
error: Option<StreamResourceError>,
current_size: usize,
// TODO(mmastrac): we can math this field instead of accounting for it
len: usize,
closed: bool,
read_waker: Option<Waker>,
write_waker: Option<Waker>,
_unsend: PhantomData<std::sync::MutexGuard<'static, ()>>,
}
impl Default for BoundedBufferChannelInner {
fn default() -> Self {
Self::new()
}
}
impl Drop for BoundedBufferChannelInner {
fn drop(&mut self) {
// If any buffers remain in the ring, drop them here
self.drain(std::mem::drop);
}
}
impl std::fmt::Debug for BoundedBufferChannelInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_fmt(format_args!(
"[BoundedBufferChannel closed={} error={:?} ring={}->{} len={} size={}]",
self.closed,
self.error,
self.ring_producer,
self.ring_consumer,
self.len,
self.current_size
))
}
}
impl BoundedBufferChannelInner {
pub fn new() -> Self {
const UNINIT: MaybeUninit<V8Slice<u8>> = MaybeUninit::uninit();
Self {
buffers: [UNINIT; BUFFER_CHANNEL_SIZE as _],
ring_producer: 0,
ring_consumer: 0,
len: 0,
closed: false,
error: None,
current_size: 0,
read_waker: None,
write_waker: None,
_unsend: PhantomData,
}
}
/// # Safety
///
/// This doesn't check whether `ring_consumer` is valid, so you'd better make sure it is before
/// calling this.
#[inline(always)]
unsafe fn next_unsafe(&mut self) -> &mut V8Slice<u8> {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
self
.buffers
.get_unchecked_mut(self.ring_consumer as usize)
.assume_init_mut()
}
}
/// # Safety
///
/// This doesn't check whether `ring_consumer` is valid, so you'd better make sure it is before
/// calling this.
#[inline(always)]
unsafe fn take_next_unsafe(&mut self) -> V8Slice<u8> {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
let res = std::ptr::read(self.next_unsafe());
self.ring_consumer = (self.ring_consumer + 1) % BUFFER_CHANNEL_SIZE;
res
}
}
fn drain(&mut self, mut f: impl FnMut(V8Slice<u8>)) {
while self.ring_producer != self.ring_consumer {
// SAFETY: We know the ring indexes are valid
let res = unsafe { std::ptr::read(self.next_unsafe()) };
self.ring_consumer = (self.ring_consumer + 1) % BUFFER_CHANNEL_SIZE;
f(res);
}
self.current_size = 0;
self.ring_producer = 0;
self.ring_consumer = 0;
self.len = 0;
}
pub fn read(
&mut self,
limit: usize,
) -> Result<Option<BufView>, StreamResourceError> {
// Empty buffers will return the error, if one exists, or None
if self.len == 0 {
if let Some(error) = self.error.take() {
return Err(error);
} else {
return Ok(None);
}
}
// If we have less than the aggregation limit AND we have more than one buffer in the channel,
// aggregate and return everything in a single buffer.
if limit >= BUFFER_AGGREGATION_LIMIT
&& self.current_size <= BUFFER_AGGREGATION_LIMIT
&& self.len > 1
{
let mut bytes = BytesMut::with_capacity(BUFFER_AGGREGATION_LIMIT);
self.drain(|slice| {
bytes.extend_from_slice(slice.as_ref());
});
// We can always write again
if let Some(waker) = self.write_waker.take() {
waker.wake();
}
return Ok(Some(BufView::from(bytes.freeze())));
}
// SAFETY: We know this exists
let buf = unsafe { self.next_unsafe() };
let buf = if buf.len() <= limit {
self.current_size -= buf.len();
self.len -= 1;
// SAFETY: We know this exists
unsafe { self.take_next_unsafe() }
} else {
let buf = buf.split_to(limit);
self.current_size -= limit;
buf
};
// If current_size is zero, len must be zero (and if not, len must not be)
debug_assert!(
!((self.current_size == 0) ^ (self.len == 0)),
"Length accounting mismatch: {self:?}"
);
if self.write_waker.is_some() {
// We may be able to write again if we have buffer and byte room in the channel
if self.can_write()
&& let Some(waker) = self.write_waker.take()
{
waker.wake();
}
}
Ok(Some(BufView::from(JsBuffer::from_parts(buf))))
}
pub fn write(&mut self, buffer: V8Slice<u8>) -> Result<(), V8Slice<u8>> {
let next_producer_index = (self.ring_producer + 1) % BUFFER_CHANNEL_SIZE;
if next_producer_index == self.ring_consumer {
// Note that we may have been allowed to write because of a close/error condition, but the
// underlying channel is actually closed. If this is the case, we return `Ok(())`` and just
// drop the bytes on the floor.
return if self.closed || self.error.is_some() {
Ok(())
} else {
Err(buffer)
};
}
self.current_size += buffer.len();
// SAFETY: we know the ringbuffer bounds are correct
unsafe {
*self.buffers.get_unchecked_mut(self.ring_producer as usize) =
MaybeUninit::new(buffer)
};
self.ring_producer = next_producer_index;
self.len += 1;
debug_assert!(self.ring_producer != self.ring_consumer);
if let Some(waker) = self.read_waker.take() {
waker.wake();
}
Ok(())
}
pub fn write_error(&mut self, error: StreamResourceError) {
self.error = Some(error);
if let Some(waker) = self.read_waker.take() {
waker.wake();
}
}
#[inline(always)]
pub fn can_read(&self) -> bool {
// Read will return if:
// - the stream is closed
// - there is an error
// - the stream is not empty
self.closed
|| self.error.is_some()
|| self.ring_consumer != self.ring_producer
}
#[inline(always)]
pub fn can_write(&self) -> bool {
// Write will return if:
// - the stream is closed
// - there is an error
// - the stream is not full (either buffer or byte count)
let next_producer_index = (self.ring_producer + 1) % BUFFER_CHANNEL_SIZE;
self.closed
|| self.error.is_some()
|| (next_producer_index != self.ring_consumer
&& self.current_size < BUFFER_BACKPRESSURE_LIMIT)
}
pub fn poll_read_ready(&mut self, cx: &mut Context) -> Poll<()> {
if !self.can_read() {
self.read_waker = Some(cx.waker().clone());
Poll::Pending
} else {
self.read_waker.take();
Poll::Ready(())
}
}
pub fn poll_write_ready(&mut self, cx: &mut Context) -> Poll<()> {
if !self.can_write() {
self.write_waker = Some(cx.waker().clone());
Poll::Pending
} else {
self.write_waker.take();
Poll::Ready(())
}
}
pub fn close(&mut self) {
self.closed = true;
// Wake up reads and writes, since they'll both be able to proceed forever now
if let Some(waker) = self.write_waker.take() {
waker.wake();
}
if let Some(waker) = self.read_waker.take() {
waker.wake();
}
}
}
#[repr(transparent)]
#[derive(Clone, Default)]
struct BoundedBufferChannel {
inner: Rc<RefCell<BoundedBufferChannelInner>>,
}
impl BoundedBufferChannel {
// TODO(mmastrac): in release mode we should be able to make this an UnsafeCell
#[inline(always)]
fn inner(&self) -> RefMut<'_, BoundedBufferChannelInner> {
self.inner.borrow_mut()
}
pub fn read(
&self,
limit: usize,
) -> Result<Option<BufView>, StreamResourceError> {
self.inner().read(limit)
}
pub fn write(&self, buffer: V8Slice<u8>) -> Result<(), V8Slice<u8>> {
self.inner().write(buffer)
}
pub fn write_error(&self, error: StreamResourceError) {
self.inner().write_error(error)
}
pub fn can_write(&self) -> bool {
self.inner().can_write()
}
pub fn poll_read_ready(&self, cx: &mut Context) -> Poll<()> {
self.inner().poll_read_ready(cx)
}
pub fn poll_write_ready(&self, cx: &mut Context) -> Poll<()> {
self.inner().poll_write_ready(cx)
}
pub fn closed(&self) -> bool {
self.inner().closed
}
#[cfg(test)]
pub fn byte_size(&self) -> usize {
self.inner().current_size
}
pub fn close(&self) {
self.inner().close()
}
}
#[allow(clippy::type_complexity)]
struct ReadableStreamResource {
read_queue: Rc<TaskQueue>,
channel: BoundedBufferChannel,
cancel_handle: CancelHandle,
data: ReadableStreamResourceData,
size_hint: (u64, Option<u64>),
}
impl ReadableStreamResource {
pub fn cancel_handle(self: &Rc<Self>) -> impl RcLike<CancelHandle> + use<> {
RcRef::map(self, |s| &s.cancel_handle).clone()
}
async fn read(
self: Rc<Self>,
limit: usize,
) -> Result<BufView, StreamResourceError> {
let cancel_handle = self.cancel_handle();
// Serialize all the reads using a task queue.
let _read_permit = self.read_queue.acquire().await;
poll_fn(|cx| self.channel.poll_read_ready(cx))
.or_cancel(cancel_handle)
.await?;
self
.channel
.read(limit)
.map(|buf| buf.unwrap_or_else(BufView::empty))
}
fn close_channel(&self) {
// Trigger the promise in JS to cancel the stream if necessarily
self.data.completion.complete(true);
// Cancel any outstanding read requests
self.cancel_handle.cancel();
// Close the channel to wake up anyone waiting
self.channel.close();
}
}
impl Resource for ReadableStreamResource {
fn name(&self) -> Cow<'_, str> {
Cow::Borrowed("readableStream")
}
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
Box::pin(
ReadableStreamResource::read(self, limit)
.map_err(deno_error::JsErrorBox::from_err),
)
}
fn close(self: Rc<Self>) {
self.close_channel();
}
fn size_hint(&self) -> (u64, Option<u64>) {
self.size_hint
}
}
impl Drop for ReadableStreamResource {
fn drop(&mut self) {
self.close_channel();
}
}
// TODO(mmastrac): Move this to deno_core
#[derive(Clone, Debug, Default)]
pub struct CompletionHandle {
inner: Rc<RefCell<CompletionHandleInner>>,
}
#[derive(Debug, Default)]
struct CompletionHandleInner {
complete: bool,
success: bool,
waker: Option<Waker>,
}
impl CompletionHandle {
pub fn complete(&self, success: bool) {
let mut mut_self = self.inner.borrow_mut();
mut_self.complete = true;
mut_self.success = success;
if let Some(waker) = mut_self.waker.take() {
drop(mut_self);
waker.wake();
}
}
}
impl Future for CompletionHandle {
type Output = bool;
fn poll(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let mut mut_self = self.inner.borrow_mut();
if mut_self.complete {
return std::task::Poll::Ready(mut_self.success);
}
mut_self.waker = Some(cx.waker().clone());
std::task::Poll::Pending
}
}
/// Allocate a resource that wraps a ReadableStream.
#[op2(fast)]
#[smi]
pub fn op_readable_stream_resource_allocate(state: &mut OpState) -> ResourceId {
let completion = CompletionHandle::default();
let resource = ReadableStreamResource {
read_queue: Default::default(),
cancel_handle: Default::default(),
channel: BoundedBufferChannel::default(),
data: ReadableStreamResourceData { completion },
size_hint: (0, None),
};
state.resource_table.add(resource)
}
/// Allocate a resource that wraps a ReadableStream, with a size hint.
#[op2(fast)]
#[smi]
pub fn op_readable_stream_resource_allocate_sized(
state: &mut OpState,
#[number] length: u64,
) -> ResourceId {
let completion = CompletionHandle::default();
let resource = ReadableStreamResource {
read_queue: Default::default(),
cancel_handle: Default::default(),
channel: BoundedBufferChannel::default(),
data: ReadableStreamResourceData { completion },
size_hint: (length, Some(length)),
};
state.resource_table.add(resource)
}
#[op2(fast)]
pub fn op_readable_stream_resource_get_sink(
state: &mut OpState,
#[smi] rid: ResourceId,
) -> *const c_void {
let Ok(resource) = state.resource_table.get::<ReadableStreamResource>(rid)
else {
return std::ptr::null();
};
ExternalPointer::new(resource.channel.clone()).into_raw()
}
external!(BoundedBufferChannel, "stream resource channel");
fn get_sender(sender: *const c_void) -> BoundedBufferChannel {
// SAFETY: We know this is a valid v8::External
unsafe {
ExternalPointer::<BoundedBufferChannel>::from_raw(sender)
.unsafely_deref()
.clone()
}
}
fn drop_sender(sender: *const c_void) {
// SAFETY: We know this is a valid v8::External
unsafe {
ExternalPointer::<BoundedBufferChannel>::from_raw(sender).unsafely_take();
}
}
#[op2(async)]
pub fn op_readable_stream_resource_write_buf(
sender: *const c_void,
#[buffer] buffer: JsBuffer,
) -> impl Future<Output = bool> {
let sender = get_sender(sender);
async move {
poll_fn(|cx| sender.poll_write_ready(cx)).await;
sender.write(buffer.into_parts()).unwrap();
!sender.closed()
}
}
/// Write to the channel synchronously, returning 0 if the channel was closed, 1 if we wrote
/// successfully, 2 if the channel was full and we need to block.
#[op2]
pub fn op_readable_stream_resource_write_sync(
sender: *const c_void,
#[buffer] buffer: JsBuffer,
) -> u32 {
let sender = get_sender(sender);
if sender.can_write() {
if sender.closed() {
0
} else {
sender.write(buffer.into_parts()).unwrap();
1
}
} else {
2
}
}
#[op2(fast)]
pub fn op_readable_stream_resource_write_error(
sender: *const c_void,
#[string] error: String,
) -> bool {
let sender = get_sender(sender);
// We can always write an error, no polling required
sender.write_error(StreamResourceError::Js(error));
!sender.closed()
}
#[op2(fast)]
#[smi]
pub fn op_readable_stream_resource_close(sender: *const c_void) {
get_sender(sender).close();
drop_sender(sender);
}
#[op2(async)]
pub fn op_readable_stream_resource_await_close(
state: &mut OpState,
#[smi] rid: ResourceId,
) -> impl Future<Output = ()> + use<> {
let completion = state
.resource_table
.get::<ReadableStreamResource>(rid)
.ok()
.map(|r| r.data.completion.clone());
async move {
if let Some(completion) = completion {
completion.await;
}
}
}
struct ReadableStreamResourceData {
completion: CompletionHandle,
}
impl Drop for ReadableStreamResourceData {
fn drop(&mut self) {
self.completion.complete(true);
}
}
#[cfg(test)]
mod tests {
use std::cell::OnceCell;
use std::sync::OnceLock;
use std::sync::atomic::AtomicUsize;
use std::time::Duration;
use deno_core::v8;
use super::*;
static V8_GLOBAL: OnceLock<()> = OnceLock::new();
thread_local! {
static ISOLATE: OnceCell<std::sync::Mutex<v8::OwnedIsolate>> = const { OnceCell::new() };
}
fn with_isolate<T>(mut f: impl FnMut(&mut v8::Isolate) -> T) -> T {
V8_GLOBAL.get_or_init(|| {
let platform =
v8::new_unprotected_default_platform(0, false).make_shared();
v8::V8::initialize_platform(platform);
v8::V8::initialize();
});
ISOLATE.with(|cell| {
let mut isolate = cell
.get_or_init(|| {
std::sync::Mutex::new(v8::Isolate::new(Default::default()))
})
.try_lock()
.unwrap();
f(&mut isolate)
})
}
fn create_buffer(byte_length: usize) -> V8Slice<u8> {
with_isolate(|isolate| {
let ptr = v8::ArrayBuffer::new_backing_store(isolate, byte_length);
// SAFETY: we just made this
unsafe { V8Slice::from_parts(ptr.into(), 0..byte_length) }
})
}
#[test]
fn test_bounded_buffer_channel() {
let channel = BoundedBufferChannel::default();
for _ in 0..BUFFER_CHANNEL_SIZE - 1 {
channel.write(create_buffer(1024)).unwrap();
}
}
#[tokio::test(flavor = "current_thread")]
async fn test_multi_task() {
let channel = BoundedBufferChannel::default();
let channel_send = channel.clone();
// Fast writer
let a = deno_core::unsync::spawn(async move {
for _ in 0..BUFFER_CHANNEL_SIZE * 2 {
poll_fn(|cx| channel_send.poll_write_ready(cx)).await;
channel_send
.write(create_buffer(BUFFER_AGGREGATION_LIMIT))
.unwrap();
}
});
// Slightly slower reader
let b = deno_core::unsync::spawn(async move {
for _ in 0..BUFFER_CHANNEL_SIZE * 2 {
if cfg!(windows) {
// windows has ~15ms resolution on sleep, so just yield so
// this test doesn't take 30 seconds to run
tokio::task::yield_now().await;
} else {
tokio::time::sleep(Duration::from_millis(1)).await;
}
poll_fn(|cx| channel.poll_read_ready(cx)).await;
channel.read(BUFFER_AGGREGATION_LIMIT).unwrap();
}
});
a.await.unwrap();
b.await.unwrap();
}
#[tokio::test(flavor = "current_thread")]
async fn test_multi_task_small_reads() {
let channel = BoundedBufferChannel::default();
let channel_send = channel.clone();
let total_send = Rc::new(AtomicUsize::new(0));
let total_send_task = total_send.clone();
let total_recv = Rc::new(AtomicUsize::new(0));
let total_recv_task = total_recv.clone();
// Fast writer
let a = deno_core::unsync::spawn(async move {
for _ in 0..BUFFER_CHANNEL_SIZE * 2 {
poll_fn(|cx| channel_send.poll_write_ready(cx)).await;
channel_send.write(create_buffer(16)).unwrap();
total_send_task.fetch_add(16, std::sync::atomic::Ordering::SeqCst);
}
// We need to close because we may get aggregated packets and we want a signal
channel_send.close();
});
// Slightly slower reader
let b = deno_core::unsync::spawn(async move {
for _ in 0..BUFFER_CHANNEL_SIZE * 2 {
poll_fn(|cx| channel.poll_read_ready(cx)).await;
// We want to make sure we're aggregating at least some packets
while channel.byte_size() <= 16 && !channel.closed() {
tokio::time::sleep(Duration::from_millis(1)).await;
}
let len = channel
.read(1024)
.unwrap()
.map(|b| b.len())
.unwrap_or_default();
total_recv_task.fetch_add(len, std::sync::atomic::Ordering::SeqCst);
}
});
a.await.unwrap();
b.await.unwrap();
assert_eq!(
total_send.load(std::sync::atomic::Ordering::SeqCst),
total_recv.load(std::sync::atomic::Ordering::SeqCst)
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/compression.rs | ext/web/compression.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::io::Write;
use deno_core::op2;
use flate2::Compression;
use flate2::write::DeflateDecoder;
use flate2::write::DeflateEncoder;
use flate2::write::GzDecoder;
use flate2::write::GzEncoder;
use flate2::write::ZlibDecoder;
use flate2::write::ZlibEncoder;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CompressionError {
#[class(type)]
#[error("Unsupported format")]
UnsupportedFormat,
#[class(type)]
#[error("resource is closed")]
ResourceClosed,
#[class(type)]
#[error(transparent)]
IoTypeError(std::io::Error),
#[class(inherit)]
#[error(transparent)]
Io(std::io::Error),
}
#[derive(Debug)]
struct CompressionResource(RefCell<Option<Inner>>);
// SAFETY: we're sure `CompressionResource` can be GCed
unsafe impl deno_core::GarbageCollected for CompressionResource {
fn trace(&self, _visitor: &mut deno_core::v8::cppgc::Visitor) {}
fn get_name(&self) -> &'static std::ffi::CStr {
c"CompressionResource"
}
}
/// https://wicg.github.io/compression/#supported-formats
#[derive(Debug)]
enum Inner {
DeflateDecoder(ZlibDecoder<Vec<u8>>),
DeflateEncoder(ZlibEncoder<Vec<u8>>),
DeflateRawDecoder(DeflateDecoder<Vec<u8>>),
DeflateRawEncoder(DeflateEncoder<Vec<u8>>),
GzDecoder(GzDecoder<Vec<u8>>),
GzEncoder(GzEncoder<Vec<u8>>),
}
#[op2]
#[cppgc]
pub fn op_compression_new(
#[string] format: &str,
is_decoder: bool,
) -> Result<CompressionResource, CompressionError> {
let w = Vec::new();
let inner = match (format, is_decoder) {
("deflate", true) => Inner::DeflateDecoder(ZlibDecoder::new(w)),
("deflate", false) => {
Inner::DeflateEncoder(ZlibEncoder::new(w, Compression::default()))
}
("deflate-raw", true) => Inner::DeflateRawDecoder(DeflateDecoder::new(w)),
("deflate-raw", false) => {
Inner::DeflateRawEncoder(DeflateEncoder::new(w, Compression::default()))
}
("gzip", true) => Inner::GzDecoder(GzDecoder::new(w)),
("gzip", false) => {
Inner::GzEncoder(GzEncoder::new(w, Compression::default()))
}
_ => return Err(CompressionError::UnsupportedFormat),
};
Ok(CompressionResource(RefCell::new(Some(inner))))
}
#[op2]
#[buffer]
pub fn op_compression_write(
#[cppgc] resource: &CompressionResource,
#[anybuffer] input: &[u8],
) -> Result<Vec<u8>, CompressionError> {
let mut inner = resource.0.borrow_mut();
let inner = inner.as_mut().ok_or(CompressionError::ResourceClosed)?;
let out: Vec<u8> = match &mut *inner {
Inner::DeflateDecoder(d) => {
d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::DeflateEncoder(d) => {
d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::DeflateRawDecoder(d) => {
d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::DeflateRawEncoder(d) => {
d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::GzDecoder(d) => {
d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
Inner::GzEncoder(d) => {
d.write_all(input).map_err(CompressionError::IoTypeError)?;
d.flush().map_err(CompressionError::Io)?;
d.get_mut().drain(..)
}
}
.collect();
Ok(out)
}
#[op2]
#[buffer]
pub fn op_compression_finish(
#[cppgc] resource: &CompressionResource,
report_errors: bool,
) -> Result<Vec<u8>, CompressionError> {
let inner = resource
.0
.borrow_mut()
.take()
.ok_or(CompressionError::ResourceClosed)?;
let out = match inner {
Inner::DeflateDecoder(d) => {
d.finish().map_err(CompressionError::IoTypeError)
}
Inner::DeflateEncoder(d) => {
d.finish().map_err(CompressionError::IoTypeError)
}
Inner::DeflateRawDecoder(d) => {
d.finish().map_err(CompressionError::IoTypeError)
}
Inner::DeflateRawEncoder(d) => {
d.finish().map_err(CompressionError::IoTypeError)
}
Inner::GzDecoder(d) => d.finish().map_err(CompressionError::IoTypeError),
Inner::GzEncoder(d) => d.finish().map_err(CompressionError::IoTypeError),
};
match out {
Err(err) => {
if report_errors {
Err(err)
} else {
Ok(Vec::with_capacity(0))
}
}
Ok(out) => Ok(out),
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/console.rs | ext/web/console.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::op2;
use deno_core::v8;
#[op2]
pub fn op_preview_entries<'s>(
scope: &mut v8::PinScope<'s, '_>,
object: &v8::Object,
slow_path: bool,
) -> v8::Local<'s, v8::Value> {
let (entries, is_key_value) = object.preview_entries(scope);
match entries {
None => v8::undefined(scope).into(),
Some(entries) => {
if !slow_path {
return entries.into();
}
let ret: [v8::Local<v8::Value>; 2] =
[entries.into(), v8::Boolean::new(scope, is_key_value).into()];
v8::Array::new_with_elements(scope, &ret).into()
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/blob.rs | ext/web/blob.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::collections::HashMap;
use std::fmt::Debug;
use std::rc::Rc;
use std::sync::Arc;
use async_trait::async_trait;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::ToJsBuffer;
use deno_core::op2;
use deno_core::parking_lot::Mutex;
use deno_core::url::Url;
use serde::Deserialize;
use serde::Serialize;
use uuid::Uuid;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum BlobError {
#[class(type)]
#[error("Blob part not found")]
BlobPartNotFound,
#[class(type)]
#[error("start + len can not be larger than blob part size")]
SizeLargerThanBlobPart,
#[class(type)]
#[error("Blob URLs are not supported in this context")]
BlobURLsNotSupported,
#[class(generic)]
#[error(transparent)]
Url(#[from] deno_core::url::ParseError),
}
use crate::Location;
pub type PartMap = HashMap<Uuid, Arc<dyn BlobPart + Send + Sync>>;
#[derive(Default, Debug)]
pub struct BlobStore {
parts: Mutex<PartMap>,
object_urls: Mutex<HashMap<Url, Arc<Blob>>>,
}
impl BlobStore {
pub fn insert_part(&self, part: Arc<dyn BlobPart + Send + Sync>) -> Uuid {
let id = Uuid::new_v4();
let mut parts = self.parts.lock();
parts.insert(id, part);
id
}
pub fn get_part(&self, id: &Uuid) -> Option<Arc<dyn BlobPart + Send + Sync>> {
let parts = self.parts.lock();
let part = parts.get(id);
part.cloned()
}
pub fn remove_part(
&self,
id: &Uuid,
) -> Option<Arc<dyn BlobPart + Send + Sync>> {
let mut parts = self.parts.lock();
parts.remove(id)
}
pub fn get_object_url(&self, mut url: Url) -> Option<Arc<Blob>> {
let blob_store = self.object_urls.lock();
url.set_fragment(None);
blob_store.get(&url).cloned()
}
pub fn insert_object_url(
&self,
blob: Blob,
maybe_location: Option<Url>,
) -> Url {
let origin = if let Some(location) = maybe_location {
location.origin().ascii_serialization()
} else {
"null".to_string()
};
let id = Uuid::new_v4();
let url = Url::parse(&format!("blob:{origin}/{id}")).unwrap();
let mut blob_store = self.object_urls.lock();
blob_store.insert(url.clone(), Arc::new(blob));
url
}
pub fn remove_object_url(&self, url: &Url) {
let mut blob_store = self.object_urls.lock();
blob_store.remove(url);
}
pub fn clear(&self) {
self.parts.lock().clear();
self.object_urls.lock().clear();
}
}
#[derive(Debug)]
pub struct Blob {
pub media_type: String,
pub parts: Vec<Arc<dyn BlobPart + Send + Sync>>,
}
impl Blob {
// TODO(lucacsonato): this should be a stream!
pub async fn read_all(&self) -> Vec<u8> {
let size = self.size();
let mut bytes = Vec::with_capacity(size);
for part in &self.parts {
let chunk = part.read().await;
bytes.extend_from_slice(chunk);
}
assert_eq!(bytes.len(), size);
bytes
}
fn size(&self) -> usize {
let mut total = 0;
for part in &self.parts {
total += part.size()
}
total
}
}
#[async_trait]
pub trait BlobPart: Debug {
// TODO(lucacsonato): this should be a stream!
async fn read<'a>(&'a self) -> &'a [u8];
fn size(&self) -> usize;
}
#[derive(Debug)]
pub struct InMemoryBlobPart(Vec<u8>);
impl From<Vec<u8>> for InMemoryBlobPart {
fn from(vec: Vec<u8>) -> Self {
Self(vec)
}
}
#[async_trait]
impl BlobPart for InMemoryBlobPart {
async fn read<'a>(&'a self) -> &'a [u8] {
&self.0
}
fn size(&self) -> usize {
self.0.len()
}
}
#[derive(Debug)]
pub struct SlicedBlobPart {
part: Arc<dyn BlobPart + Send + Sync>,
start: usize,
len: usize,
}
#[async_trait]
impl BlobPart for SlicedBlobPart {
async fn read<'a>(&'a self) -> &'a [u8] {
let original = self.part.read().await;
&original[self.start..self.start + self.len]
}
fn size(&self) -> usize {
self.len
}
}
#[op2]
#[serde]
pub fn op_blob_create_part(
state: &mut OpState,
#[buffer] data: JsBuffer,
) -> Uuid {
let blob_store = state.borrow::<Arc<BlobStore>>();
let part = InMemoryBlobPart(data.to_vec());
blob_store.insert_part(Arc::new(part))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SliceOptions {
start: usize,
len: usize,
}
#[op2]
#[serde]
pub fn op_blob_slice_part(
state: &mut OpState,
#[serde] id: Uuid,
#[serde] options: SliceOptions,
) -> Result<Uuid, BlobError> {
let blob_store = state.borrow::<Arc<BlobStore>>();
let part = blob_store
.get_part(&id)
.ok_or(BlobError::BlobPartNotFound)?;
let SliceOptions { start, len } = options;
let size = part.size();
if start + len > size {
return Err(BlobError::SizeLargerThanBlobPart);
}
let sliced_part = SlicedBlobPart { part, start, len };
let id = blob_store.insert_part(Arc::new(sliced_part));
Ok(id)
}
#[op2(async)]
#[serde]
pub async fn op_blob_read_part(
state: Rc<RefCell<OpState>>,
#[serde] id: Uuid,
) -> Result<ToJsBuffer, BlobError> {
let part = {
let state = state.borrow();
let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.get_part(&id)
}
.ok_or(BlobError::BlobPartNotFound)?;
let buf = part.read().await;
Ok(ToJsBuffer::from(buf.to_vec()))
}
#[op2]
pub fn op_blob_remove_part(state: &mut OpState, #[serde] id: Uuid) {
let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.remove_part(&id);
}
#[op2]
#[string]
pub fn op_blob_create_object_url(
state: &mut OpState,
#[string] media_type: String,
#[serde] part_ids: Vec<Uuid>,
) -> Result<String, BlobError> {
let mut parts = Vec::with_capacity(part_ids.len());
let blob_store = state.borrow::<Arc<BlobStore>>();
for part_id in part_ids {
let part = blob_store
.get_part(&part_id)
.ok_or(BlobError::BlobPartNotFound)?;
parts.push(part);
}
let blob = Blob { media_type, parts };
let maybe_location = state.try_borrow::<Location>();
let blob_store = state.borrow::<Arc<BlobStore>>();
let url = blob_store
.insert_object_url(blob, maybe_location.map(|location| location.0.clone()));
Ok(url.into())
}
#[op2(fast)]
pub fn op_blob_revoke_object_url(
state: &mut OpState,
#[string] url: &str,
) -> Result<(), BlobError> {
let url = Url::parse(url)?;
let blob_store = state.borrow::<Arc<BlobStore>>();
blob_store.remove_object_url(&url);
Ok(())
}
#[derive(Serialize)]
pub struct ReturnBlob {
pub media_type: String,
pub parts: Vec<ReturnBlobPart>,
}
#[derive(Serialize)]
pub struct ReturnBlobPart {
pub uuid: Uuid,
pub size: usize,
}
#[op2]
#[serde]
pub fn op_blob_from_object_url(
state: &mut OpState,
#[string] url: String,
) -> Result<Option<ReturnBlob>, BlobError> {
let url = Url::parse(&url)?;
if url.scheme() != "blob" {
return Ok(None);
}
let blob_store = state
.try_borrow::<Arc<BlobStore>>()
.ok_or(BlobError::BlobURLsNotSupported)?;
match blob_store.get_object_url(url) {
Some(blob) => {
let parts = blob
.parts
.iter()
.map(|part| ReturnBlobPart {
uuid: blob_store.insert_part(part.clone()),
size: part.size(),
})
.collect();
Ok(Some(ReturnBlob {
media_type: blob.media_type.clone(),
parts,
}))
}
_ => Ok(None),
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/timers.rs | ext/web/timers.rs | // Copyright 2018-2025 the Deno authors. MIT license.
//! This module helps deno implement timers and performance APIs.
use std::time::Duration;
use std::time::Instant;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use deno_core::OpState;
use deno_core::op2;
pub struct StartTime(Instant);
impl Default for StartTime {
fn default() -> Self {
Self(Instant::now())
}
}
impl std::ops::Deref for StartTime {
type Target = Instant;
fn deref(&self) -> &Self::Target {
&self.0
}
}
fn expose_time(duration: Duration, out: &mut [u8]) {
let seconds = duration.as_secs() as u32;
let subsec_nanos = duration.subsec_nanos();
if out.len() >= 8 {
out[0..4].copy_from_slice(&seconds.to_ne_bytes());
out[4..8].copy_from_slice(&subsec_nanos.to_ne_bytes());
}
}
#[op2(fast)]
pub fn op_now(state: &mut OpState, #[buffer] buf: &mut [u8]) {
let start_time = state.borrow::<StartTime>();
let elapsed = start_time.elapsed();
expose_time(elapsed, buf);
}
#[op2(fast)]
pub fn op_time_origin(state: &mut OpState, #[buffer] buf: &mut [u8]) {
// https://w3c.github.io/hr-time/#dfn-estimated-monotonic-time-of-the-unix-epoch
let wall_time = SystemTime::now();
let monotonic_time = state.borrow::<StartTime>().elapsed();
let epoch = wall_time.duration_since(UNIX_EPOCH).unwrap() - monotonic_time;
expose_time(epoch, buf);
}
#[allow(clippy::unused_async)]
#[op2(async(lazy), fast)]
pub async fn op_defer() {}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/urlpattern.rs | ext/web/urlpattern.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::op2;
use urlpattern::quirks;
use urlpattern::quirks::MatchInput;
use urlpattern::quirks::StringOrInit;
use urlpattern::quirks::UrlPattern;
deno_error::js_error_wrapper!(urlpattern::Error, UrlPatternError, "TypeError");
#[op2]
#[serde]
pub fn op_urlpattern_parse(
#[serde] input: StringOrInit,
#[string] base_url: Option<String>,
#[serde] options: urlpattern::UrlPatternOptions,
) -> Result<UrlPattern, UrlPatternError> {
let init =
quirks::process_construct_pattern_input(input, base_url.as_deref())?;
let pattern = quirks::parse_pattern(init, options)?;
Ok(pattern)
}
#[op2]
#[serde]
pub fn op_urlpattern_process_match_input(
#[serde] input: StringOrInit,
#[string] base_url: Option<String>,
) -> Result<Option<(MatchInput, quirks::Inputs)>, UrlPatternError> {
let res = quirks::process_match_input(input, base_url.as_deref())?;
let (input, inputs) = match res {
Some((input, inputs)) => (input, inputs),
None => return Ok(None),
};
Ok(quirks::parse_match_input(input).map(|input| (input, inputs)))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/benches/url_ops.rs | ext/web/benches/url_ops.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_bench_util::bench_js_sync;
use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::Bencher;
use deno_bench_util::bencher::benchmark_group;
use deno_core::Extension;
fn setup() -> Vec<Extension> {
deno_core::extension!(
bench_setup,
esm_entry_point = "ext:bench_setup/setup",
esm = ["ext:bench_setup/setup" = {
source = r#"
import { URL } from "ext:deno_web/00_url.js";
globalThis.URL = URL;
"#
}]
);
vec![
deno_webidl::deno_webidl::init(),
deno_web::deno_web::init(Default::default(), None, Default::default()),
bench_setup::init(),
]
}
fn bench_url_parse(b: &mut Bencher) {
bench_js_sync(b, r#"new URL(`http://www.google.com/`);"#, setup);
}
benchmark_group!(benches, bench_url_parse,);
bench_or_profile!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/benches/timers_ops.rs | ext/web/benches/timers_ops.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_bench_util::bench_js_async;
use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::Bencher;
use deno_bench_util::bencher::benchmark_group;
use deno_core::Extension;
#[derive(Clone)]
struct Permissions;
fn setup() -> Vec<Extension> {
deno_core::extension!(
bench_setup,
esm_entry_point = "ext:bench_setup/setup",
esm = ["ext:bench_setup/setup" = {
source = r#"
import { setTimeout } from "ext:deno_web/02_timers.js";
globalThis.setTimeout = setTimeout;
"#
}],
state = |state| {
state.put(Permissions {});
},
);
vec![
deno_webidl::deno_webidl::init(),
deno_web::deno_web::init(Default::default(), None, Default::default()),
bench_setup::init(),
]
}
fn bench_set_timeout(b: &mut Bencher) {
bench_js_async(b, r#"setTimeout(() => {}, 0);"#, setup);
}
benchmark_group!(benches, bench_set_timeout,);
bench_or_profile!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/web/benches/encoding.rs | ext/web/benches/encoding.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_bench_util::bench_js_sync;
use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::Bencher;
use deno_bench_util::bencher::benchmark_group;
use deno_core::Extension;
#[derive(Clone)]
struct Permissions;
fn setup() -> Vec<Extension> {
deno_core::extension!(
bench_setup,
esm_entry_point = "ext:bench_setup/setup",
esm = ["ext:bench_setup/setup" = {
source = r#"
import { TextDecoder } from "ext:deno_web/08_text_encoding.js";
globalThis.TextDecoder = TextDecoder;
globalThis.hello12k = Deno.core.encode("hello world\n".repeat(1e3));
"#
}],
state = |state| {
state.put(Permissions {});
},
);
vec![
deno_webidl::deno_webidl::init(),
deno_web::deno_web::init(Default::default(), None, Default::default()),
bench_setup::init(),
]
}
fn bench_encode_12kb(b: &mut Bencher) {
bench_js_sync(b, r#"new TextDecoder().decode(hello12k);"#, setup);
}
benchmark_group!(benches, bench_encode_12kb);
bench_or_profile!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/response_body.rs | ext/http/response_body.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::Write;
use std::pin::Pin;
use std::rc::Rc;
use std::task::ready;
use brotli::enc::encode::BrotliEncoderOperation;
use brotli::enc::encode::BrotliEncoderParameter;
use brotli::enc::encode::BrotliEncoderStateStruct;
use brotli::writer::StandardAlloc;
use bytes::Bytes;
use bytes::BytesMut;
use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::Resource;
use deno_core::futures::FutureExt;
use deno_error::JsErrorBox;
use flate2::write::GzEncoder;
use hyper::body::Frame;
use hyper::body::SizeHint;
use pin_project::pin_project;
/// Simplification for nested types we use for our streams. We provide a way to convert from
/// this type into Hyper's body [`Frame`].
pub enum ResponseStreamResult {
/// Stream is over.
EndOfStream,
/// Stream provided non-empty data.
NonEmptyBuf(BufView),
/// Stream is ready, but provided no data. Retry. This is a result that is like Pending, but does
/// not register a waker and should be called again at the lowest level of this code. Generally this
/// will only be returned from compression streams that require additional buffering.
NoData,
/// Stream failed.
Error(JsErrorBox),
}
impl From<ResponseStreamResult> for Option<Result<Frame<BufView>, JsErrorBox>> {
fn from(value: ResponseStreamResult) -> Self {
match value {
ResponseStreamResult::EndOfStream => None,
ResponseStreamResult::NonEmptyBuf(buf) => Some(Ok(Frame::data(buf))),
ResponseStreamResult::Error(err) => Some(Err(err)),
// This result should be handled by retrying
ResponseStreamResult::NoData => unimplemented!(),
}
}
}
pub trait PollFrame: Unpin {
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<ResponseStreamResult>;
fn size_hint(&self) -> SizeHint;
}
#[derive(PartialEq, Eq)]
pub enum Compression {
None,
GZip,
Brotli,
}
pub enum ResponseStream {
/// A resource stream, piped in fast mode.
Resource(ResourceBodyAdapter),
#[cfg(test)]
TestChannel(tokio::sync::mpsc::Receiver<BufView>),
}
impl ResponseStream {
pub fn abort(self) {
match self {
ResponseStream::Resource(resource) => resource.stm.close(),
#[cfg(test)]
ResponseStream::TestChannel(..) => {}
}
}
}
#[derive(Default)]
pub enum ResponseBytesInner {
/// An empty stream.
#[default]
Empty,
/// A completed stream.
Done,
/// A static buffer of bytes, sent in one fell swoop.
Bytes(BufView),
/// An uncompressed stream.
UncompressedStream(ResponseStream),
/// A GZip stream.
GZipStream(Box<GZipResponseStream>),
/// A Brotli stream.
BrotliStream(Box<BrotliResponseStream>),
}
impl std::fmt::Debug for ResponseBytesInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Done => f.write_str("Done"),
Self::Empty => f.write_str("Empty"),
Self::Bytes(..) => f.write_str("Bytes"),
Self::UncompressedStream(..) => f.write_str("Uncompressed"),
Self::GZipStream(..) => f.write_str("GZip"),
Self::BrotliStream(..) => f.write_str("Brotli"),
}
}
}
impl ResponseBytesInner {
pub fn abort(self) {
match self {
Self::Done | Self::Empty | Self::Bytes(..) => {}
Self::BrotliStream(stm) => stm.abort(),
Self::GZipStream(stm) => stm.abort(),
Self::UncompressedStream(stm) => stm.abort(),
}
}
pub fn size_hint(&self) -> SizeHint {
match self {
Self::Done => SizeHint::with_exact(0),
Self::Empty => SizeHint::with_exact(0),
Self::Bytes(bytes) => SizeHint::with_exact(bytes.len() as u64),
Self::UncompressedStream(res) => res.size_hint(),
Self::GZipStream(..) => SizeHint::default(),
Self::BrotliStream(..) => SizeHint::default(),
}
}
fn from_stream(compression: Compression, stream: ResponseStream) -> Self {
match compression {
Compression::GZip => {
Self::GZipStream(Box::new(GZipResponseStream::new(stream)))
}
Compression::Brotli => {
Self::BrotliStream(Box::new(BrotliResponseStream::new(stream)))
}
_ => Self::UncompressedStream(stream),
}
}
pub fn from_resource(
compression: Compression,
stm: Rc<dyn Resource>,
auto_close: bool,
) -> Self {
Self::from_stream(
compression,
ResponseStream::Resource(ResourceBodyAdapter::new(stm, auto_close)),
)
}
pub fn from_bufview(compression: Compression, buf: BufView) -> Self {
match compression {
Compression::GZip => {
let mut writer =
GzEncoder::new(Vec::new(), flate2::Compression::fast());
writer.write_all(&buf).unwrap();
Self::Bytes(BufView::from(writer.finish().unwrap()))
}
Compression::Brotli => {
// quality level 6 is based on google's nginx default value for
// on-the-fly compression
// https://github.com/google/ngx_brotli#brotli_comp_level
// lgwin 22 is equivalent to brotli window size of (2**22)-16 bytes
// (~4MB)
let mut writer =
brotli::CompressorWriter::new(Vec::new(), 65 * 1024, 6, 22);
writer.write_all(&buf).unwrap();
writer.flush().unwrap();
Self::Bytes(BufView::from(writer.into_inner()))
}
_ => Self::Bytes(buf),
}
}
pub fn from_vec(compression: Compression, vec: Vec<u8>) -> Self {
match compression {
Compression::GZip => {
let mut writer =
GzEncoder::new(Vec::new(), flate2::Compression::fast());
writer.write_all(&vec).unwrap();
Self::Bytes(BufView::from(writer.finish().unwrap()))
}
Compression::Brotli => {
let mut writer =
brotli::CompressorWriter::new(Vec::new(), 65 * 1024, 6, 22);
writer.write_all(&vec).unwrap();
writer.flush().unwrap();
Self::Bytes(BufView::from(writer.into_inner()))
}
_ => Self::Bytes(BufView::from(vec)),
}
}
/// Did we complete this response successfully?
pub fn is_complete(&self) -> bool {
matches!(self, ResponseBytesInner::Done | ResponseBytesInner::Empty)
}
}
pub struct ResourceBodyAdapter {
auto_close: bool,
stm: Rc<dyn Resource>,
future: AsyncResult<BufView>,
}
impl ResourceBodyAdapter {
pub fn new(stm: Rc<dyn Resource>, auto_close: bool) -> Self {
let future = stm.clone().read(64 * 1024);
ResourceBodyAdapter {
auto_close,
stm,
future,
}
}
}
impl PollFrame for ResponseStream {
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<ResponseStreamResult> {
match &mut *self {
ResponseStream::Resource(res) => Pin::new(res).poll_frame(cx),
#[cfg(test)]
ResponseStream::TestChannel(rx) => Pin::new(rx).poll_frame(cx),
}
}
fn size_hint(&self) -> SizeHint {
match self {
ResponseStream::Resource(res) => res.size_hint(),
#[cfg(test)]
ResponseStream::TestChannel(_) => SizeHint::default(),
}
}
}
impl PollFrame for ResourceBodyAdapter {
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<ResponseStreamResult> {
let res = match ready!(self.future.poll_unpin(cx)) {
Err(err) => ResponseStreamResult::Error(err),
Ok(buf) => {
if buf.is_empty() {
if self.auto_close {
self.stm.clone().close();
}
ResponseStreamResult::EndOfStream
} else {
// Re-arm the future
self.future = self.stm.clone().read(64 * 1024);
ResponseStreamResult::NonEmptyBuf(buf)
}
}
};
std::task::Poll::Ready(res)
}
fn size_hint(&self) -> SizeHint {
let hint = self.stm.size_hint();
let mut size_hint = SizeHint::new();
size_hint.set_lower(hint.0);
if let Some(upper) = hint.1 {
size_hint.set_upper(upper)
}
size_hint
}
}
#[cfg(test)]
impl PollFrame for tokio::sync::mpsc::Receiver<BufView> {
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<ResponseStreamResult> {
let res = match ready!(self.poll_recv(cx)) {
Some(buf) => ResponseStreamResult::NonEmptyBuf(buf),
None => ResponseStreamResult::EndOfStream,
};
std::task::Poll::Ready(res)
}
fn size_hint(&self) -> SizeHint {
SizeHint::default()
}
}
#[derive(Copy, Clone, Debug)]
enum GZipState {
Header,
Streaming,
Flushing,
Trailer,
EndOfStream,
}
#[pin_project]
pub struct GZipResponseStream {
stm: flate2::Compress,
crc: flate2::Crc,
next_buf: Option<BytesMut>,
partial: Option<BufView>,
#[pin]
underlying: ResponseStream,
state: GZipState,
}
impl GZipResponseStream {
pub fn new(underlying: ResponseStream) -> Self {
Self {
stm: flate2::Compress::new(flate2::Compression::fast(), false),
crc: flate2::Crc::new(),
next_buf: None,
partial: None,
state: GZipState::Header,
underlying,
}
}
pub fn abort(self) {
self.underlying.abort()
}
}
/// This is a minimal GZip header suitable for serving data from a webserver. We don't need to provide
/// most of the information. We're skipping header name, CRC, etc, and providing a null timestamp.
///
/// We're using compression level 1, as higher levels don't produce significant size differences. This
/// is probably the reason why nginx's default gzip compression level is also 1:
///
/// https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_comp_level
static GZIP_HEADER: Bytes =
Bytes::from_static(&[0x1f, 0x8b, 0x08, 0, 0, 0, 0, 0, 0x01, 0xff]);
impl PollFrame for GZipResponseStream {
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<ResponseStreamResult> {
let this = self.get_mut();
let state = &mut this.state;
let orig_state = *state;
let frame = match *state {
GZipState::EndOfStream => {
return std::task::Poll::Ready(ResponseStreamResult::EndOfStream);
}
GZipState::Header => {
*state = GZipState::Streaming;
return std::task::Poll::Ready(ResponseStreamResult::NonEmptyBuf(
BufView::from(GZIP_HEADER.clone()),
));
}
GZipState::Trailer => {
*state = GZipState::EndOfStream;
let mut v = Vec::with_capacity(8);
v.extend(&this.crc.sum().to_le_bytes());
v.extend(&this.crc.amount().to_le_bytes());
return std::task::Poll::Ready(ResponseStreamResult::NonEmptyBuf(
BufView::from(v),
));
}
GZipState::Streaming => match this.partial.take() {
Some(partial) => ResponseStreamResult::NonEmptyBuf(partial),
_ => {
ready!(Pin::new(&mut this.underlying).poll_frame(cx))
}
},
GZipState::Flushing => ResponseStreamResult::EndOfStream,
};
let stm = &mut this.stm;
// Ideally we could use MaybeUninit here, but flate2 requires &[u8]. We should also try
// to dynamically adjust this buffer.
let mut buf = this
.next_buf
.take()
.unwrap_or_else(|| BytesMut::zeroed(64 * 1024));
let start_in = stm.total_in();
let start_out = stm.total_out();
let res = match frame {
// Short-circuit these and just return
x @ (ResponseStreamResult::NoData | ResponseStreamResult::Error(..)) => {
return std::task::Poll::Ready(x);
}
ResponseStreamResult::EndOfStream => {
*state = GZipState::Flushing;
stm.compress(&[], &mut buf, flate2::FlushCompress::Finish)
}
ResponseStreamResult::NonEmptyBuf(mut input) => {
let res = stm.compress(&input, &mut buf, flate2::FlushCompress::Sync);
let len_in = (stm.total_in() - start_in) as usize;
debug_assert!(len_in <= input.len());
this.crc.update(&input[..len_in]);
if len_in < input.len() {
input.advance_cursor(len_in);
this.partial = Some(input);
}
res
}
};
let len = stm.total_out() - start_out;
let res = match res {
Err(err) => {
ResponseStreamResult::Error(JsErrorBox::generic(err.to_string()))
}
Ok(flate2::Status::BufError) => {
// This should not happen
unreachable!("old={orig_state:?} new={state:?} buf_len={}", buf.len());
}
Ok(flate2::Status::Ok) => {
if len == 0 {
this.next_buf = Some(buf);
ResponseStreamResult::NoData
} else {
buf.truncate(len as usize);
ResponseStreamResult::NonEmptyBuf(BufView::from(buf.freeze()))
}
}
Ok(flate2::Status::StreamEnd) => {
*state = GZipState::Trailer;
if len == 0 {
this.next_buf = Some(buf);
ResponseStreamResult::NoData
} else {
buf.truncate(len as usize);
ResponseStreamResult::NonEmptyBuf(BufView::from(buf.freeze()))
}
}
};
std::task::Poll::Ready(res)
}
fn size_hint(&self) -> SizeHint {
SizeHint::default()
}
}
#[derive(Copy, Clone, Debug)]
enum BrotliState {
Streaming,
Flushing,
EndOfStream,
}
#[pin_project]
pub struct BrotliResponseStream {
state: BrotliState,
stm: BrotliEncoderStateStruct<StandardAlloc>,
#[pin]
underlying: ResponseStream,
}
impl BrotliResponseStream {
pub fn new(underlying: ResponseStream) -> Self {
let mut stm = BrotliEncoderStateStruct::new(StandardAlloc::default());
// Quality level 6 is based on google's nginx default value for on-the-fly compression
// https://github.com/google/ngx_brotli#brotli_comp_level
// lgwin 22 is equivalent to brotli window size of (2**22)-16 bytes (~4MB)
stm.set_parameter(BrotliEncoderParameter::BROTLI_PARAM_QUALITY, 6);
stm.set_parameter(BrotliEncoderParameter::BROTLI_PARAM_LGWIN, 22);
Self {
stm,
state: BrotliState::Streaming,
underlying,
}
}
pub fn abort(self) {
self.underlying.abort()
}
}
fn max_compressed_size(input_size: usize) -> usize {
if input_size == 0 {
return 2;
}
// [window bits / empty metadata] + N * [uncompressed] + [last empty]
let num_large_blocks = input_size >> 14;
let overhead = 2 + (4 * num_large_blocks) + 3 + 1;
let result = input_size + overhead;
if result < input_size { 0 } else { result }
}
impl PollFrame for BrotliResponseStream {
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<ResponseStreamResult> {
let this = self.get_mut();
let state = &mut this.state;
let frame = match *state {
BrotliState::Streaming => {
ready!(Pin::new(&mut this.underlying).poll_frame(cx))
}
BrotliState::Flushing => ResponseStreamResult::EndOfStream,
BrotliState::EndOfStream => {
return std::task::Poll::Ready(ResponseStreamResult::EndOfStream);
}
};
let res = match frame {
ResponseStreamResult::NonEmptyBuf(buf) => {
let mut output_buffer = vec![0; max_compressed_size(buf.len())];
let mut output_offset = 0;
this.stm.compress_stream(
BrotliEncoderOperation::BROTLI_OPERATION_FLUSH,
&mut buf.len(),
&buf,
&mut 0,
&mut output_buffer.len(),
&mut output_buffer,
&mut output_offset,
&mut None,
&mut |_, _, _, _| (),
);
output_buffer.truncate(output_offset);
ResponseStreamResult::NonEmptyBuf(BufView::from(output_buffer))
}
ResponseStreamResult::EndOfStream => {
let mut output_buffer = vec![0; 1024];
let mut output_offset = 0;
this.stm.compress_stream(
BrotliEncoderOperation::BROTLI_OPERATION_FINISH,
&mut 0,
&[],
&mut 0,
&mut output_buffer.len(),
&mut output_buffer,
&mut output_offset,
&mut None,
&mut |_, _, _, _| (),
);
if output_offset == 0 {
this.state = BrotliState::EndOfStream;
ResponseStreamResult::EndOfStream
} else {
this.state = BrotliState::Flushing;
output_buffer.truncate(output_offset);
ResponseStreamResult::NonEmptyBuf(BufView::from(output_buffer))
}
}
_ => frame,
};
std::task::Poll::Ready(res)
}
fn size_hint(&self) -> SizeHint {
SizeHint::default()
}
}
#[allow(clippy::print_stderr)]
#[cfg(test)]
mod tests {
use std::future::poll_fn;
use std::hash::Hasher;
use std::io::Read;
use std::io::Write;
use super::*;
fn zeros() -> Vec<u8> {
vec![0; 1024 * 1024]
}
fn hard_to_gzip_data() -> Vec<u8> {
const SIZE: usize = 1024 * 1024;
let mut v = Vec::with_capacity(SIZE);
let mut hasher = std::collections::hash_map::DefaultHasher::new();
for i in 0..SIZE {
hasher.write_usize(i);
v.push(hasher.finish() as u8);
}
v
}
fn already_gzipped_data() -> Vec<u8> {
let mut v = Vec::with_capacity(1024 * 1024);
let mut gz =
flate2::GzBuilder::new().write(&mut v, flate2::Compression::best());
gz.write_all(&hard_to_gzip_data()).unwrap();
_ = gz.finish().unwrap();
v
}
fn chunk(v: Vec<u8>) -> impl Iterator<Item = Vec<u8>> {
// Chunk the data into 10k
let mut out = vec![];
for v in v.chunks(10 * 1024) {
out.push(v.to_vec());
}
out.into_iter()
}
fn random(mut v: Vec<u8>) -> impl Iterator<Item = Vec<u8>> {
let mut out = vec![];
loop {
if v.is_empty() {
break;
}
let rand = (rand::random::<usize>() % v.len()) + 1;
let new = v.split_off(rand);
out.push(v);
v = new;
}
// Print the lengths of the vectors if we actually fail this test at some point
let lengths = out.iter().map(|v| v.len()).collect::<Vec<_>>();
eprintln!("Lengths = {:?}", lengths);
out.into_iter()
}
fn front_load(mut v: Vec<u8>) -> impl Iterator<Item = Vec<u8>> {
// Chunk the data at 90%
let offset = (v.len() * 90) / 100;
let v2 = v.split_off(offset);
vec![v, v2].into_iter()
}
fn front_load_but_one(mut v: Vec<u8>) -> impl Iterator<Item = Vec<u8>> {
let offset = v.len() - 1;
let v2 = v.split_off(offset);
vec![v, v2].into_iter()
}
fn back_load(mut v: Vec<u8>) -> impl Iterator<Item = Vec<u8>> {
// Chunk the data at 10%
let offset = (v.len() * 10) / 100;
let v2 = v.split_off(offset);
vec![v, v2].into_iter()
}
async fn test_gzip(i: impl Iterator<Item = Vec<u8>> + Send + 'static) {
let v = i.collect::<Vec<_>>();
let mut expected: Vec<u8> = vec![];
for v in &v {
expected.extend(v);
}
let (tx, rx) = tokio::sync::mpsc::channel(1);
let underlying = ResponseStream::TestChannel(rx);
let mut resp = GZipResponseStream::new(underlying);
let handle = tokio::task::spawn(async move {
for chunk in v {
tx.send(chunk.into()).await.ok().unwrap();
}
});
// Limit how many times we'll loop
const LIMIT: usize = 1000;
let mut v: Vec<u8> = vec![];
for i in 0..=LIMIT {
assert_ne!(i, LIMIT);
let frame = poll_fn(|cx| Pin::new(&mut resp).poll_frame(cx)).await;
if matches!(frame, ResponseStreamResult::EndOfStream) {
break;
}
if matches!(frame, ResponseStreamResult::NoData) {
continue;
}
let ResponseStreamResult::NonEmptyBuf(buf) = frame else {
panic!("Unexpected stream type");
};
assert_ne!(buf.len(), 0);
v.extend(&*buf);
}
let mut gz = flate2::read::GzDecoder::new(&*v);
let mut v = vec![];
gz.read_to_end(&mut v).unwrap();
assert_eq!(v, expected);
handle.await.unwrap();
}
async fn test_brotli(i: impl Iterator<Item = Vec<u8>> + Send + 'static) {
let v = i.collect::<Vec<_>>();
let mut expected: Vec<u8> = vec![];
for v in &v {
expected.extend(v);
}
let (tx, rx) = tokio::sync::mpsc::channel(1);
let underlying = ResponseStream::TestChannel(rx);
let mut resp = BrotliResponseStream::new(underlying);
let handle = tokio::task::spawn(async move {
for chunk in v {
tx.send(chunk.into()).await.ok().unwrap();
}
});
// Limit how many times we'll loop
const LIMIT: usize = 1000;
let mut v: Vec<u8> = vec![];
for i in 0..=LIMIT {
assert_ne!(i, LIMIT);
let frame = poll_fn(|cx| Pin::new(&mut resp).poll_frame(cx)).await;
if matches!(frame, ResponseStreamResult::EndOfStream) {
break;
}
if matches!(frame, ResponseStreamResult::NoData) {
continue;
}
let ResponseStreamResult::NonEmptyBuf(buf) = frame else {
panic!("Unexpected stream type");
};
assert_ne!(buf.len(), 0);
v.extend(&*buf);
}
let mut gz = brotli::Decompressor::new(&*v, v.len());
let mut v = vec![];
if !expected.is_empty() {
gz.read_to_end(&mut v).unwrap();
}
assert_eq!(v, expected);
handle.await.unwrap();
}
#[tokio::test]
async fn test_simple() {
test_brotli(vec![b"hello world".to_vec()].into_iter()).await;
test_gzip(vec![b"hello world".to_vec()].into_iter()).await;
}
#[tokio::test]
async fn test_empty() {
test_brotli(vec![].into_iter()).await;
test_gzip(vec![].into_iter()).await;
}
#[tokio::test]
async fn test_simple_zeros() {
test_brotli(vec![vec![0; 0x10000]].into_iter()).await;
test_gzip(vec![vec![0; 0x10000]].into_iter()).await;
}
macro_rules! test {
($vec:ident) => {
mod $vec {
#[tokio::test]
async fn chunk() {
let iter = super::chunk(super::$vec());
super::test_gzip(iter).await;
let br_iter = super::chunk(super::$vec());
super::test_brotli(br_iter).await;
}
#[tokio::test]
async fn front_load() {
let iter = super::front_load(super::$vec());
super::test_gzip(iter).await;
let br_iter = super::front_load(super::$vec());
super::test_brotli(br_iter).await;
}
#[tokio::test]
async fn front_load_but_one() {
let iter = super::front_load_but_one(super::$vec());
super::test_gzip(iter).await;
let br_iter = super::front_load_but_one(super::$vec());
super::test_brotli(br_iter).await;
}
#[tokio::test]
async fn back_load() {
let iter = super::back_load(super::$vec());
super::test_gzip(iter).await;
let br_iter = super::back_load(super::$vec());
super::test_brotli(br_iter).await;
}
#[tokio::test]
async fn random() {
let iter = super::random(super::$vec());
super::test_gzip(iter).await;
let br_iter = super::random(super::$vec());
super::test_brotli(br_iter).await;
}
}
};
}
test!(zeros);
test!(hard_to_gzip_data);
test!(already_gzipped_data);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/lib.rs | ext/http/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::cmp::min;
use std::error::Error;
use std::future::Future;
use std::future::Pending;
use std::future::pending;
use std::io;
use std::io::Write;
use std::mem::replace;
use std::mem::take;
use std::net::SocketAddr;
use std::pin::Pin;
use std::pin::pin;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::task::Context;
use std::task::Poll;
use std::task::ready;
use async_compression::Level;
use async_compression::tokio::write::BrotliEncoder;
use async_compression::tokio::write::GzipEncoder;
use base64::Engine;
use base64::prelude::BASE64_STANDARD;
use cache_control::CacheControl;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::ByteString;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::CancelTryFuture;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::StringOrBuffer;
use deno_core::futures::FutureExt;
use deno_core::futures::StreamExt;
use deno_core::futures::TryFutureExt;
use deno_core::futures::channel::mpsc;
use deno_core::futures::channel::oneshot;
use deno_core::futures::future::Either;
use deno_core::futures::future::RemoteHandle;
use deno_core::futures::future::Shared;
use deno_core::futures::future::select;
use deno_core::futures::never::Never;
use deno_core::futures::stream::Peekable;
use deno_core::op2;
use deno_core::unsync::spawn;
use deno_error::JsErrorBox;
use deno_net::raw::NetworkStream;
use deno_telemetry::Histogram;
use deno_telemetry::MeterProvider;
use deno_telemetry::OTEL_GLOBALS;
use deno_telemetry::UpDownCounter;
use deno_websocket::ws_create_server_stream;
use flate2::Compression;
use flate2::write::GzEncoder;
use hyper::server::conn::http1;
use hyper::server::conn::http2;
use hyper_util::rt::TokioIo;
use hyper_v014::Body;
use hyper_v014::HeaderMap;
use hyper_v014::Request;
use hyper_v014::Response;
use hyper_v014::body::Bytes;
use hyper_v014::body::HttpBody;
use hyper_v014::body::SizeHint;
use hyper_v014::header::HeaderName;
use hyper_v014::header::HeaderValue;
use hyper_v014::server::conn::Http;
use hyper_v014::service::Service;
use once_cell::sync::OnceCell;
use serde::Serialize;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::Notify;
use crate::network_buffered_stream::NetworkBufferedStream;
use crate::reader_stream::ExternallyAbortableReaderStream;
use crate::reader_stream::ShutdownHandle;
pub mod compressible;
mod fly_accept_encoding;
mod http_next;
mod network_buffered_stream;
mod reader_stream;
mod request_body;
mod request_properties;
mod response_body;
mod service;
use fly_accept_encoding::Encoding;
pub use http_next::HttpNextError;
pub use request_properties::DefaultHttpPropertyExtractor;
pub use request_properties::HttpConnectionProperties;
pub use request_properties::HttpListenProperties;
pub use request_properties::HttpPropertyExtractor;
pub use request_properties::HttpRequestProperties;
pub use service::UpgradeUnavailableError;
struct OtelCollectors {
duration: Histogram<f64>,
active_requests: UpDownCounter<i64>,
request_size: Histogram<u64>,
response_size: Histogram<u64>,
}
static OTEL_COLLECTORS: OnceCell<OtelCollectors> = OnceCell::new();
#[derive(Debug, Default, Clone, Copy)]
pub struct Options {
/// By passing a hook function, the caller can customize various configuration
/// options for the HTTP/2 server.
/// See [`http2::Builder`] for what parameters can be customized.
///
/// If `None`, the default configuration provided by hyper will be used. Note
/// that the default configuration is subject to change in future versions.
pub http2_builder_hook:
Option<fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>>,
/// By passing a hook function, the caller can customize various configuration
/// options for the HTTP/1 server.
/// See [`http1::Builder`] for what parameters can be customized.
///
/// If `None`, the default configuration provided by hyper will be used. Note
/// that the default configuration is subject to change in future versions.
pub http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
/// If `false`, the server will abort the request when the response is dropped.
pub no_legacy_abort: bool,
}
#[cfg(not(feature = "default_property_extractor"))]
deno_core::extension!(
deno_http,
deps = [deno_web, deno_net, deno_fetch, deno_websocket],
parameters = [ HTTP: HttpPropertyExtractor ],
ops = [
op_http_accept,
op_http_headers,
op_http_serve_address_override,
op_http_shutdown,
op_http_upgrade_websocket,
op_http_websocket_accept_header,
op_http_write_headers,
op_http_write_resource,
op_http_write,
http_next::op_http_close_after_finish,
http_next::op_http_get_request_header,
http_next::op_http_get_request_headers,
http_next::op_http_request_on_cancel,
http_next::op_http_get_request_method_and_url<HTTP>,
http_next::op_http_get_request_cancelled,
http_next::op_http_read_request_body,
http_next::op_http_serve_on<HTTP>,
http_next::op_http_serve<HTTP>,
http_next::op_http_set_promise_complete,
http_next::op_http_set_response_body_bytes,
http_next::op_http_set_response_body_resource,
http_next::op_http_set_response_body_text,
http_next::op_http_set_response_header,
http_next::op_http_set_response_headers,
http_next::op_http_set_response_trailers,
http_next::op_http_upgrade_websocket_next,
http_next::op_http_upgrade_raw,
http_next::op_raw_write_vectored,
http_next::op_can_write_vectored,
http_next::op_http_try_wait,
http_next::op_http_wait,
http_next::op_http_close,
http_next::op_http_cancel,
http_next::op_http_metric_handle_otel_error,
],
esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"],
options = {
options: Options,
},
state = |state, options| {
state.put::<Options>(options.options);
}
);
#[cfg(feature = "default_property_extractor")]
deno_core::extension!(
deno_http,
deps = [deno_web, deno_net, deno_fetch, deno_websocket],
ops = [
op_http_accept,
op_http_headers,
op_http_serve_address_override,
op_http_shutdown,
op_http_upgrade_websocket,
op_http_websocket_accept_header,
op_http_write_headers,
op_http_write_resource,
op_http_write,
op_http_notify_serving,
http_next::op_http_close_after_finish,
http_next::op_http_get_request_header,
http_next::op_http_get_request_headers,
http_next::op_http_request_on_cancel,
http_next::op_http_get_request_method_and_url<DefaultHttpPropertyExtractor>,
http_next::op_http_get_request_cancelled,
http_next::op_http_read_request_body,
http_next::op_http_serve_on<DefaultHttpPropertyExtractor>,
http_next::op_http_serve<DefaultHttpPropertyExtractor>,
http_next::op_http_set_promise_complete,
http_next::op_http_set_response_body_bytes,
http_next::op_http_set_response_body_resource,
http_next::op_http_set_response_body_text,
http_next::op_http_set_response_header,
http_next::op_http_set_response_headers,
http_next::op_http_set_response_trailers,
http_next::op_http_upgrade_websocket_next,
http_next::op_http_upgrade_raw,
http_next::op_raw_write_vectored,
http_next::op_can_write_vectored,
http_next::op_http_try_wait,
http_next::op_http_wait,
http_next::op_http_close,
http_next::op_http_cancel,
http_next::op_http_metric_handle_otel_error,
],
esm = ["00_serve.ts", "01_http.js", "02_websocket.ts"],
options = {
options: Options,
},
state = |state, options| {
state.put::<Options>(options.options);
}
);
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum HttpError {
#[class(inherit)]
#[error(transparent)]
Resource(#[from] deno_core::error::ResourceError),
#[class(inherit)]
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
#[class("Http")]
#[error("{0}")]
HyperV014(#[source] Arc<hyper_v014::Error>),
#[class(generic)]
#[error("{0}")]
InvalidHeaderName(#[from] hyper_v014::header::InvalidHeaderName),
#[class(generic)]
#[error("{0}")]
InvalidHeaderValue(#[from] hyper_v014::header::InvalidHeaderValue),
#[class(generic)]
#[error("{0}")]
Http(#[from] hyper_v014::http::Error),
#[class("Http")]
#[error("response headers already sent")]
ResponseHeadersAlreadySent,
#[class("Http")]
#[error("connection closed while sending response")]
ConnectionClosedWhileSendingResponse,
#[class("Http")]
#[error("already in use")]
AlreadyInUse,
#[class(inherit)]
#[error("{0}")]
Io(#[from] std::io::Error),
#[class("Http")]
#[error("no response headers")]
NoResponseHeaders,
#[class("Http")]
#[error("response already completed")]
ResponseAlreadyCompleted,
#[class("Http")]
#[error("cannot upgrade because request body was used")]
UpgradeBodyUsed,
#[class("Http")]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
pub enum HttpSocketAddr {
IpSocket(std::net::SocketAddr),
#[cfg(unix)]
UnixSocket(tokio::net::unix::SocketAddr),
}
impl From<std::net::SocketAddr> for HttpSocketAddr {
fn from(addr: std::net::SocketAddr) -> Self {
Self::IpSocket(addr)
}
}
#[cfg(unix)]
impl From<tokio::net::unix::SocketAddr> for HttpSocketAddr {
fn from(addr: tokio::net::unix::SocketAddr) -> Self {
Self::UnixSocket(addr)
}
}
struct OtelInfo {
attributes: OtelInfoAttributes,
duration: Option<std::time::Instant>,
request_size: Option<u64>,
response_size: Option<u64>,
}
struct OtelInfoAttributes {
http_request_method: Cow<'static, str>,
network_protocol_version: &'static str,
url_scheme: Cow<'static, str>,
server_address: Option<String>,
server_port: Option<i64>,
error_type: Option<&'static str>,
http_response_status_code: Option<i64>,
}
impl OtelInfoAttributes {
fn method(method: &http::method::Method) -> Cow<'static, str> {
use http::method::Method;
match *method {
Method::GET => Cow::Borrowed("GET"),
Method::POST => Cow::Borrowed("POST"),
Method::PUT => Cow::Borrowed("PUT"),
Method::DELETE => Cow::Borrowed("DELETE"),
Method::HEAD => Cow::Borrowed("HEAD"),
Method::OPTIONS => Cow::Borrowed("OPTIONS"),
Method::CONNECT => Cow::Borrowed("CONNECT"),
Method::PATCH => Cow::Borrowed("PATCH"),
Method::TRACE => Cow::Borrowed("TRACE"),
_ => Cow::Owned(method.to_string()),
}
}
fn method_v02(method: &http_v02::method::Method) -> Cow<'static, str> {
use http_v02::method::Method;
match *method {
Method::GET => Cow::Borrowed("GET"),
Method::POST => Cow::Borrowed("POST"),
Method::PUT => Cow::Borrowed("PUT"),
Method::DELETE => Cow::Borrowed("DELETE"),
Method::HEAD => Cow::Borrowed("HEAD"),
Method::OPTIONS => Cow::Borrowed("OPTIONS"),
Method::CONNECT => Cow::Borrowed("CONNECT"),
Method::PATCH => Cow::Borrowed("PATCH"),
Method::TRACE => Cow::Borrowed("TRACE"),
_ => Cow::Owned(method.to_string()),
}
}
fn version(version: http::Version) -> &'static str {
use http::Version;
match version {
Version::HTTP_09 => "0.9",
Version::HTTP_10 => "1.0",
Version::HTTP_11 => "1.1",
Version::HTTP_2 => "2",
Version::HTTP_3 => "3",
_ => unreachable!(),
}
}
fn version_v02(version: http_v02::Version) -> &'static str {
use http_v02::Version;
match version {
Version::HTTP_09 => "0.9",
Version::HTTP_10 => "1.0",
Version::HTTP_11 => "1.1",
Version::HTTP_2 => "2",
Version::HTTP_3 => "3",
_ => unreachable!(),
}
}
fn for_counter(&self) -> Vec<deno_telemetry::KeyValue> {
let mut attributes = vec![
deno_telemetry::KeyValue::new(
"http.request.method",
self.http_request_method.clone(),
),
deno_telemetry::KeyValue::new("url.scheme", self.url_scheme.clone()),
];
if let Some(address) = self.server_address.clone() {
attributes.push(deno_telemetry::KeyValue::new("server.address", address));
}
if let Some(port) = self.server_port {
attributes.push(deno_telemetry::KeyValue::new("server.port", port));
}
attributes
}
fn for_histogram(&self) -> Vec<deno_telemetry::KeyValue> {
let mut histogram_attributes = vec![
deno_telemetry::KeyValue::new(
"http.request.method",
self.http_request_method.clone(),
),
deno_telemetry::KeyValue::new("url.scheme", self.url_scheme.clone()),
deno_telemetry::KeyValue::new(
"network.protocol.version",
self.network_protocol_version,
),
];
if let Some(address) = self.server_address.clone() {
histogram_attributes
.push(deno_telemetry::KeyValue::new("server.address", address));
}
if let Some(port) = self.server_port {
histogram_attributes
.push(deno_telemetry::KeyValue::new("server.port", port));
}
if let Some(status_code) = self.http_response_status_code {
histogram_attributes.push(deno_telemetry::KeyValue::new(
"http.response.status_code",
status_code,
));
}
if let Some(error) = self.error_type {
histogram_attributes
.push(deno_telemetry::KeyValue::new("error.type", error));
}
histogram_attributes
}
}
impl OtelInfo {
fn new(
otel: &deno_telemetry::OtelGlobals,
instant: std::time::Instant,
request_size: u64,
attributes: OtelInfoAttributes,
) -> Self {
let collectors = OTEL_COLLECTORS.get_or_init(|| {
let meter = otel
.meter_provider
.meter_with_scope(otel.builtin_instrumentation_scope.clone());
let duration = meter
.f64_histogram("http.server.request.duration")
.with_unit("s")
.with_description("Duration of HTTP server requests.")
.with_boundaries(vec![
0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0,
7.5, 10.0,
])
.build();
let active_requests = meter
.i64_up_down_counter("http.server.active_requests")
.with_unit("{request}")
.with_description("Number of active HTTP server requests.")
.build();
let request_size = meter
.u64_histogram("http.server.request.body.size")
.with_unit("By")
.with_description("Size of HTTP server request bodies.")
.with_boundaries(vec![
0.0,
100.0,
1000.0,
10000.0,
100000.0,
1000000.0,
10000000.0,
100000000.0,
1000000000.0,
])
.build();
let response_size = meter
.u64_histogram("http.server.response.body.size")
.with_unit("By")
.with_description("Size of HTTP server response bodies.")
.with_boundaries(vec![
0.0,
100.0,
1000.0,
10000.0,
100000.0,
1000000.0,
10000000.0,
100000000.0,
1000000000.0,
])
.build();
OtelCollectors {
duration,
active_requests,
request_size,
response_size,
}
});
collectors.active_requests.add(1, &attributes.for_counter());
Self {
attributes,
duration: Some(instant),
request_size: Some(request_size),
response_size: Some(0),
}
}
fn handle_duration_and_request_size(&mut self) {
let collectors = OTEL_COLLECTORS.get().unwrap();
let attributes = self.attributes.for_histogram();
if let Some(duration) = self.duration.take() {
let duration = duration.elapsed();
collectors
.duration
.record(duration.as_secs_f64(), &attributes);
}
if let Some(request_size) = self.request_size.take() {
let collectors = OTEL_COLLECTORS.get().unwrap();
collectors.request_size.record(request_size, &attributes);
}
}
}
impl Drop for OtelInfo {
fn drop(&mut self) {
let collectors = OTEL_COLLECTORS.get().unwrap();
self.handle_duration_and_request_size();
collectors
.active_requests
.add(-1, &self.attributes.for_counter());
if let Some(response_size) = self.response_size {
collectors
.response_size
.record(response_size, &self.attributes.for_histogram());
}
}
}
fn handle_error_otel(
otel: &Option<Rc<RefCell<Option<OtelInfo>>>>,
error: &HttpError,
) {
if let Some(otel) = otel.as_ref() {
let mut maybe_otel_info = otel.borrow_mut();
if let Some(otel_info) = maybe_otel_info.as_mut() {
otel_info.attributes.error_type = Some(match error {
HttpError::Resource(_) => "resource",
HttpError::Canceled(_) => "canceled",
HttpError::HyperV014(_) => "hyper",
HttpError::InvalidHeaderName(_) => "invalid header name",
HttpError::InvalidHeaderValue(_) => "invalid header value",
HttpError::Http(_) => "http",
HttpError::ResponseHeadersAlreadySent => {
"response headers already sent"
}
HttpError::ConnectionClosedWhileSendingResponse => {
"connection closed while sending response"
}
HttpError::AlreadyInUse => "already in use",
HttpError::Io(_) => "io",
HttpError::NoResponseHeaders => "no response headers",
HttpError::ResponseAlreadyCompleted => "response already completed",
HttpError::UpgradeBodyUsed => "upgrade body used",
HttpError::Other(_) => "unknown",
});
}
}
}
struct HttpConnResource {
addr: HttpSocketAddr,
scheme: &'static str,
acceptors_tx: mpsc::UnboundedSender<HttpAcceptor>,
closed_fut: Shared<RemoteHandle<Result<(), Arc<hyper_v014::Error>>>>,
cancel_handle: Rc<CancelHandle>, // Closes gracefully and cancels accept ops.
}
impl HttpConnResource {
fn new<S>(io: S, scheme: &'static str, addr: HttpSocketAddr) -> Self
where
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
let (acceptors_tx, acceptors_rx) = mpsc::unbounded::<HttpAcceptor>();
let service = HttpService::new(acceptors_rx);
let conn_fut = Http::new()
.with_executor(LocalExecutor)
.serve_connection(io, service)
.with_upgrades();
// When the cancel handle is used, the connection shuts down gracefully.
// No new HTTP streams will be accepted, but existing streams will be able
// to continue operating and eventually shut down cleanly.
let cancel_handle = CancelHandle::new_rc();
let shutdown_fut = never().or_cancel(&cancel_handle).fuse();
// A local task that polls the hyper connection future to completion.
let task_fut = async move {
let conn_fut = pin!(conn_fut);
let shutdown_fut = pin!(shutdown_fut);
let result = match select(conn_fut, shutdown_fut).await {
Either::Left((result, _)) => result,
Either::Right((_, mut conn_fut)) => {
conn_fut.as_mut().graceful_shutdown();
conn_fut.await
}
};
filter_enotconn(result).map_err(Arc::from)
};
let (task_fut, closed_fut) = task_fut.remote_handle();
let closed_fut = closed_fut.shared();
spawn(task_fut);
Self {
addr,
scheme,
acceptors_tx,
closed_fut,
cancel_handle,
}
}
// Accepts a new incoming HTTP request.
async fn accept(
self: &Rc<Self>,
) -> Result<
Option<(
HttpStreamReadResource,
HttpStreamWriteResource,
String,
String,
)>,
HttpError,
> {
let fut = async {
let (request_tx, request_rx) = oneshot::channel();
let (response_tx, response_rx) = oneshot::channel();
let otel_instant = OTEL_GLOBALS
.get()
.filter(|o| o.has_metrics())
.map(|_| std::time::Instant::now());
let acceptor = HttpAcceptor::new(request_tx, response_rx);
self.acceptors_tx.unbounded_send(acceptor).ok()?;
let request = request_rx.await.ok()?;
let accept_encoding = {
let encodings =
fly_accept_encoding::encodings_iter_http_02(request.headers())
.filter(|r| {
matches!(r, Ok((Some(Encoding::Brotli | Encoding::Gzip), _)))
});
fly_accept_encoding::preferred(encodings)
.ok()
.flatten()
.unwrap_or(Encoding::Identity)
};
let otel_info =
OTEL_GLOBALS.get().filter(|o| o.has_metrics()).map(|otel| {
let size_hint = request.size_hint();
Rc::new(RefCell::new(Some(OtelInfo::new(
otel,
otel_instant.unwrap(),
size_hint.upper().unwrap_or(size_hint.lower()),
OtelInfoAttributes {
http_request_method: OtelInfoAttributes::method_v02(
request.method(),
),
url_scheme: Cow::Borrowed(self.scheme),
network_protocol_version: OtelInfoAttributes::version_v02(
request.version(),
),
server_address: request.uri().host().map(|host| host.to_string()),
server_port: request.uri().port_u16().map(|port| port as i64),
error_type: Default::default(),
http_response_status_code: Default::default(),
},
))))
});
let method = request.method().to_string();
let url = req_url(&request, self.scheme, &self.addr);
let read_stream =
HttpStreamReadResource::new(self, request, otel_info.clone());
let write_stream = HttpStreamWriteResource::new(
self,
response_tx,
accept_encoding,
otel_info,
);
Some((read_stream, write_stream, method, url))
};
async {
match fut.await {
Some(stream) => Ok(Some(stream)),
// Return the connection error, if any.
None => self.closed().map_ok(|_| None).await,
}
}
.try_or_cancel(&self.cancel_handle)
.await
}
/// A future that completes when this HTTP connection is closed or errors.
async fn closed(&self) -> Result<(), HttpError> {
self.closed_fut.clone().map_err(HttpError::HyperV014).await
}
}
impl Resource for HttpConnResource {
fn name(&self) -> Cow<'_, str> {
"httpConn".into()
}
fn close(self: Rc<Self>) {
self.cancel_handle.cancel();
}
}
/// Creates a new HttpConn resource which uses `io` as its transport.
pub fn http_create_conn_resource<S, A>(
state: &mut OpState,
io: S,
addr: A,
scheme: &'static str,
) -> ResourceId
where
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
A: Into<HttpSocketAddr>,
{
let conn = HttpConnResource::new(io, scheme, addr.into());
state.resource_table.add(conn)
}
/// An object that implements the `hyper::Service` trait, through which Hyper
/// delivers incoming HTTP requests.
struct HttpService {
acceptors_rx: Peekable<mpsc::UnboundedReceiver<HttpAcceptor>>,
}
impl HttpService {
fn new(acceptors_rx: mpsc::UnboundedReceiver<HttpAcceptor>) -> Self {
let acceptors_rx = acceptors_rx.peekable();
Self { acceptors_rx }
}
}
impl Service<Request<Body>> for HttpService {
type Response = Response<Body>;
type Error = oneshot::Canceled;
type Future = oneshot::Receiver<Response<Body>>;
fn poll_ready(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Result<(), Self::Error>> {
let acceptors_rx = Pin::new(&mut self.acceptors_rx);
let result = ready!(acceptors_rx.poll_peek(cx))
.map(|_| ())
.ok_or(oneshot::Canceled);
Poll::Ready(result)
}
fn call(&mut self, request: Request<Body>) -> Self::Future {
let acceptor = self.acceptors_rx.next().now_or_never().flatten().unwrap();
acceptor.call(request)
}
}
/// A pair of one-shot channels which first transfer a HTTP request from the
/// Hyper service to the HttpConn resource, and then take the Response back to
/// the service.
struct HttpAcceptor {
request_tx: oneshot::Sender<Request<Body>>,
response_rx: oneshot::Receiver<Response<Body>>,
}
impl HttpAcceptor {
fn new(
request_tx: oneshot::Sender<Request<Body>>,
response_rx: oneshot::Receiver<Response<Body>>,
) -> Self {
Self {
request_tx,
response_rx,
}
}
fn call(self, request: Request<Body>) -> oneshot::Receiver<Response<Body>> {
let Self {
request_tx,
response_rx,
} = self;
request_tx
.send(request)
.map(|_| response_rx)
.unwrap_or_else(|_| oneshot::channel().1) // Make new canceled receiver.
}
}
pub struct HttpStreamReadResource {
_conn: Rc<HttpConnResource>,
pub rd: AsyncRefCell<HttpRequestReader>,
cancel_handle: CancelHandle,
size: SizeHint,
otel_info: Option<Rc<RefCell<Option<OtelInfo>>>>,
}
pub struct HttpStreamWriteResource {
conn: Rc<HttpConnResource>,
wr: AsyncRefCell<HttpResponseWriter>,
accept_encoding: Encoding,
otel_info: Option<Rc<RefCell<Option<OtelInfo>>>>,
}
impl HttpStreamReadResource {
fn new(
conn: &Rc<HttpConnResource>,
request: Request<Body>,
otel_info: Option<Rc<RefCell<Option<OtelInfo>>>>,
) -> Self {
let size = request.body().size_hint();
Self {
_conn: conn.clone(),
rd: HttpRequestReader::Headers(request).into(),
size,
cancel_handle: CancelHandle::new(),
otel_info,
}
}
}
impl Resource for HttpStreamReadResource {
fn name(&self) -> Cow<'_, str> {
"httpReadStream".into()
}
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
Box::pin(async move {
let mut rd = RcRef::map(&self, |r| &r.rd).borrow_mut().await;
let body = loop {
match &mut *rd {
HttpRequestReader::Headers(_) => {}
HttpRequestReader::Body(_, body) => break body,
HttpRequestReader::Closed => return Ok(BufView::empty()),
}
match take(&mut *rd) {
HttpRequestReader::Headers(request) => {
let (parts, body) = request.into_parts();
*rd = HttpRequestReader::Body(parts.headers, body.peekable());
}
_ => unreachable!(),
};
};
let fut = async {
let mut body = Pin::new(body);
loop {
match body.as_mut().peek_mut().await {
Some(Ok(chunk)) if !chunk.is_empty() => {
let len = min(limit, chunk.len());
let buf = chunk.split_to(len);
let view = BufView::from(buf);
break Ok(view);
}
// This unwrap is safe because `peek_mut()` returned `Some`, and thus
// currently has a peeked value that can be synchronously returned
// from `next()`.
//
// The future returned from `next()` is always ready, so we can
// safely call `await` on it without creating a race condition.
Some(_) => match body.as_mut().next().await.unwrap() {
Ok(chunk) => assert!(chunk.is_empty()),
Err(err) => {
break Err(JsErrorBox::from_err(HttpError::HyperV014(
Arc::new(err),
)));
}
},
None => break Ok(BufView::empty()),
}
}
};
let cancel_handle = RcRef::map(&self, |r| &r.cancel_handle);
fut.try_or_cancel(cancel_handle).await
})
}
fn close(self: Rc<Self>) {
self.cancel_handle.cancel();
}
fn size_hint(&self) -> (u64, Option<u64>) {
(self.size.lower(), self.size.upper())
}
}
impl HttpStreamWriteResource {
fn new(
conn: &Rc<HttpConnResource>,
response_tx: oneshot::Sender<Response<Body>>,
accept_encoding: Encoding,
otel_info: Option<Rc<RefCell<Option<OtelInfo>>>>,
) -> Self {
Self {
conn: conn.clone(),
wr: HttpResponseWriter::Headers(response_tx).into(),
accept_encoding,
otel_info,
}
}
}
impl Resource for HttpStreamWriteResource {
fn name(&self) -> Cow<'_, str> {
"httpWriteStream".into()
}
}
/// The read half of an HTTP stream.
#[derive(Default)]
pub enum HttpRequestReader {
Headers(Request<Body>),
Body(HeaderMap<HeaderValue>, Peekable<Body>),
#[default]
Closed,
}
/// The write half of an HTTP stream.
#[derive(Default)]
enum HttpResponseWriter {
Headers(oneshot::Sender<Response<Body>>),
Body {
writer: Pin<Box<dyn tokio::io::AsyncWrite>>,
shutdown_handle: ShutdownHandle,
},
BodyUncompressed(BodyUncompressedSender),
#[default]
Closed,
}
struct BodyUncompressedSender(Option<hyper_v014::body::Sender>);
impl BodyUncompressedSender {
fn sender(&mut self) -> &mut hyper_v014::body::Sender {
// This is safe because we only ever take the sender out of the option
// inside of the shutdown method.
self.0.as_mut().unwrap()
}
fn shutdown(mut self) {
// take the sender out of self so that when self is dropped at the end of
// this block, it doesn't get aborted
self.0.take();
}
}
impl From<hyper_v014::body::Sender> for BodyUncompressedSender {
fn from(sender: hyper_v014::body::Sender) -> Self {
BodyUncompressedSender(Some(sender))
}
}
impl Drop for BodyUncompressedSender {
fn drop(&mut self) {
if let Some(sender) = self.0.take() {
sender.abort();
}
}
}
// We use a tuple instead of struct to avoid serialization overhead of the keys.
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct NextRequestResponse(
// read_stream_rid:
ResourceId,
// write_stream_rid:
ResourceId,
// method:
// This is a String rather than a ByteString because reqwest will only return
// the method as a str which is guaranteed to be ASCII-only.
String,
// url:
String,
);
#[op2(async)]
#[serde]
async fn op_http_accept(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> Result<Option<NextRequestResponse>, HttpError> {
let conn = state.borrow().resource_table.get::<HttpConnResource>(rid)?;
match conn.accept().await {
Ok(Some((read_stream, write_stream, method, url))) => {
let read_stream_rid = state
.borrow_mut()
.resource_table
.add_rc(Rc::new(read_stream));
let write_stream_rid = state
.borrow_mut()
.resource_table
.add_rc(Rc::new(write_stream));
let r =
NextRequestResponse(read_stream_rid, write_stream_rid, method, url);
Ok(Some(r))
}
Ok(None) => Ok(None),
Err(err) => Err(err),
}
}
fn req_url(
req: &hyper_v014::Request<hyper_v014::Body>,
scheme: &'static str,
addr: &HttpSocketAddr,
) -> String {
let host: Cow<'_, str> = match addr {
HttpSocketAddr::IpSocket(addr) => {
if let Some(auth) = req.uri().authority() {
match addr.port() {
443 if scheme == "https" => Cow::Borrowed(auth.host()),
80 if scheme == "http" => Cow::Borrowed(auth.host()),
_ => Cow::Borrowed(auth.as_str()), // Includes port number.
}
} else if let Some(host) = req.uri().host() {
Cow::Borrowed(host)
} else if let Some(host) = req.headers().get("HOST") {
match host.to_str() {
Ok(host) => Cow::Borrowed(host),
Err(_) => Cow::Owned(
host
.as_bytes()
.iter()
.cloned()
.map(char::from)
.collect::<String>(),
),
}
} else {
Cow::Owned(addr.to_string())
}
}
// There is no standard way for unix domain socket URLs
// nginx and nodejs request use http://unix:[socket_path]:/ but it is not a valid URL
// httpie uses http+unix://[percent_encoding_of_path]/ which we follow
#[cfg(unix)]
HttpSocketAddr::UnixSocket(addr) => Cow::Owned(
percent_encoding::percent_encode(
addr
.as_pathname()
.and_then(|x| x.to_str())
.unwrap_or_default()
.as_bytes(),
percent_encoding::NON_ALPHANUMERIC,
)
.to_string(),
),
};
let path = req
.uri()
.path_and_query()
.map(|p| p.as_str())
.unwrap_or("/");
[scheme, "://", &host, path].concat()
}
fn req_headers(
header_map: &HeaderMap<HeaderValue>,
) -> Vec<(ByteString, ByteString)> {
// We treat cookies specially, because we don't want them to get them
// mangled by the `Headers` object in JS. What we do is take all cookie
// headers and concat them into a single cookie header, separated by
// semicolons.
let cookie_sep = "; ".as_bytes();
let mut cookies = vec![];
let mut headers = Vec::with_capacity(header_map.len());
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/request_properties.rs | ext/http/request_properties.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::net::SocketAddrV4;
use std::rc::Rc;
use deno_core::OpState;
use deno_core::ResourceId;
use deno_error::JsErrorBox;
use deno_net::raw::NetworkStream;
use deno_net::raw::NetworkStreamAddress;
use deno_net::raw::NetworkStreamListener;
use deno_net::raw::NetworkStreamType;
use deno_net::raw::take_network_stream_listener_resource;
use deno_net::raw::take_network_stream_resource;
use hyper::HeaderMap;
use hyper::Uri;
use hyper::header::HOST;
// TODO(mmastrac): I don't like that we have to clone this, but it's one-time setup
#[derive(Clone)]
pub struct HttpListenProperties {
pub scheme: &'static str,
pub fallback_host: String,
pub local_port: Option<u32>,
pub stream_type: NetworkStreamType,
}
#[derive(Clone)]
pub struct HttpConnectionProperties {
pub peer_address: Rc<str>,
pub peer_port: Option<u32>,
pub local_port: Option<u32>,
pub stream_type: NetworkStreamType,
}
pub struct HttpRequestProperties<'a> {
pub authority: Option<Cow<'a, str>>,
}
/// Pluggable trait to determine listen, connection and request properties
/// for embedders that wish to provide alternative routes for incoming HTTP.
#[async_trait::async_trait(?Send)]
pub trait HttpPropertyExtractor {
type Listener: 'static;
type Connection;
/// Given a listener [`ResourceId`], returns the [`HttpPropertyExtractor::Listener`].
fn get_listener_for_rid(
state: &mut OpState,
listener_rid: ResourceId,
) -> Result<Self::Listener, JsErrorBox>;
/// Given a connection [`ResourceId`], returns the [`HttpPropertyExtractor::Connection`].
fn get_connection_for_rid(
state: &mut OpState,
connection_rid: ResourceId,
) -> Result<Self::Connection, JsErrorBox>;
/// Determines the listener properties.
fn listen_properties_from_listener(
listener: &Self::Listener,
) -> Result<HttpListenProperties, std::io::Error>;
/// Determines the listener properties given a [`HttpPropertyExtractor::Connection`].
fn listen_properties_from_connection(
connection: &Self::Connection,
) -> Result<HttpListenProperties, std::io::Error>;
/// Accept a new [`HttpPropertyExtractor::Connection`] from the given listener [`HttpPropertyExtractor::Listener`].
async fn accept_connection_from_listener(
listener: &Self::Listener,
) -> Result<Self::Connection, JsErrorBox>;
/// Determines the connection properties.
fn connection_properties(
listen_properties: &HttpListenProperties,
connection: &Self::Connection,
) -> HttpConnectionProperties;
/// Turn a given [`HttpPropertyExtractor::Connection`] into a [`NetworkStream`].
fn to_network_stream_from_connection(
connection: Self::Connection,
) -> NetworkStream;
/// Determines the request properties.
fn request_properties<'a>(
connection_properties: &'a HttpConnectionProperties,
uri: &'a Uri,
headers: &'a HeaderMap,
) -> HttpRequestProperties<'a>;
}
pub struct DefaultHttpPropertyExtractor {}
#[async_trait::async_trait(?Send)]
impl HttpPropertyExtractor for DefaultHttpPropertyExtractor {
type Listener = NetworkStreamListener;
type Connection = NetworkStream;
fn get_listener_for_rid(
state: &mut OpState,
listener_rid: ResourceId,
) -> Result<NetworkStreamListener, JsErrorBox> {
take_network_stream_listener_resource(
&mut state.resource_table,
listener_rid,
)
}
fn get_connection_for_rid(
state: &mut OpState,
stream_rid: ResourceId,
) -> Result<NetworkStream, JsErrorBox> {
take_network_stream_resource(&mut state.resource_table, stream_rid)
.map_err(JsErrorBox::from_err)
}
async fn accept_connection_from_listener(
listener: &NetworkStreamListener,
) -> Result<NetworkStream, JsErrorBox> {
listener
.accept()
.await
.map_err(JsErrorBox::from_err)
.map(|(stm, _)| stm)
}
fn listen_properties_from_listener(
listener: &NetworkStreamListener,
) -> Result<HttpListenProperties, std::io::Error> {
let stream_type = listener.stream();
let local_address = listener.listen_address()?;
listener_properties(stream_type, local_address)
}
fn listen_properties_from_connection(
connection: &Self::Connection,
) -> Result<HttpListenProperties, std::io::Error> {
let stream_type = connection.stream();
let local_address = connection.local_address()?;
listener_properties(stream_type, local_address)
}
fn to_network_stream_from_connection(
connection: Self::Connection,
) -> NetworkStream {
connection
}
fn connection_properties(
listen_properties: &HttpListenProperties,
connection: &NetworkStream,
) -> HttpConnectionProperties {
// We always want some sort of peer address. If we can't get one, just make up one.
let peer_address = connection.peer_address().unwrap_or_else(|_| {
NetworkStreamAddress::Ip(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(0, 0, 0, 0),
0,
)))
});
let peer_port: Option<u32> = match peer_address {
NetworkStreamAddress::Ip(ip) => Some(ip.port() as _),
#[cfg(unix)]
NetworkStreamAddress::Unix(_) => None,
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStreamAddress::Vsock(vsock) => Some(vsock.port()),
NetworkStreamAddress::Tunnel(ref addr) => Some(addr.port() as _),
};
let peer_address = match peer_address {
NetworkStreamAddress::Ip(addr) => Rc::from(addr.ip().to_string()),
#[cfg(unix)]
NetworkStreamAddress::Unix(_) => Rc::from("unix"),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStreamAddress::Vsock(addr) => {
Rc::from(format!("vsock:{}", addr.cid()))
}
NetworkStreamAddress::Tunnel(ref addr) => Rc::from(addr.hostname()),
};
let local_port = listen_properties.local_port;
let stream_type = listen_properties.stream_type;
HttpConnectionProperties {
peer_address,
peer_port,
local_port,
stream_type,
}
}
fn request_properties<'a>(
connection_properties: &'a HttpConnectionProperties,
uri: &'a Uri,
headers: &'a HeaderMap,
) -> HttpRequestProperties<'a> {
let authority = req_host(
uri,
headers,
connection_properties.stream_type,
connection_properties.local_port.unwrap_or_default(),
);
HttpRequestProperties { authority }
}
}
fn listener_properties(
stream_type: NetworkStreamType,
local_address: NetworkStreamAddress,
) -> Result<HttpListenProperties, std::io::Error> {
let scheme = req_scheme_from_stream_type(stream_type);
let fallback_host = req_host_from_addr(stream_type, &local_address);
let local_port: Option<u32> = match local_address {
NetworkStreamAddress::Ip(ip) => Some(ip.port() as _),
#[cfg(unix)]
NetworkStreamAddress::Unix(_) => None,
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStreamAddress::Vsock(vsock) => Some(vsock.port()),
NetworkStreamAddress::Tunnel(addr) => Some(addr.port() as _),
};
Ok(HttpListenProperties {
scheme,
fallback_host,
local_port,
stream_type,
})
}
/// Compute the fallback address from the [`NetworkStreamListenAddress`]. If the request has no authority/host in
/// its URI, and there is no [`HeaderName::HOST`] header, we fall back to this.
fn req_host_from_addr(
stream_type: NetworkStreamType,
addr: &NetworkStreamAddress,
) -> String {
match addr {
NetworkStreamAddress::Ip(addr) => {
if (stream_type == NetworkStreamType::Tls && addr.port() == 443)
|| (stream_type == NetworkStreamType::Tcp && addr.port() == 80)
{
if addr.ip().is_loopback() || addr.ip().is_unspecified() {
return "localhost".to_owned();
}
addr.ip().to_string()
} else {
if addr.ip().is_loopback() || addr.ip().is_unspecified() {
return format!("localhost:{}", addr.port());
}
addr.to_string()
}
}
// There is no standard way for unix domain socket URLs
// nginx and nodejs request use http://unix:[socket_path]:/ but it is not a valid URL
// httpie uses http+unix://[percent_encoding_of_path]/ which we follow
#[cfg(unix)]
NetworkStreamAddress::Unix(unix) => percent_encoding::percent_encode(
unix
.as_pathname()
.and_then(|x| x.to_str())
.unwrap_or_default()
.as_bytes(),
percent_encoding::NON_ALPHANUMERIC,
)
.to_string(),
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStreamAddress::Vsock(vsock) => {
format!("{}:{}", vsock.cid(), vsock.port())
}
NetworkStreamAddress::Tunnel(addr) => {
if addr.port() == 443 {
addr.hostname()
} else {
format!("{}:{}", addr.hostname(), addr.port())
}
}
}
}
fn req_scheme_from_stream_type(stream_type: NetworkStreamType) -> &'static str {
match stream_type {
NetworkStreamType::Tcp => "http://",
NetworkStreamType::Tls | NetworkStreamType::Tunnel => "https://",
#[cfg(unix)]
NetworkStreamType::Unix => "http+unix://",
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStreamType::Vsock => "http+vsock://",
}
}
fn req_host<'a>(
uri: &'a Uri,
headers: &'a HeaderMap,
addr_type: NetworkStreamType,
port: u32,
) -> Option<Cow<'a, str>> {
// It is rare that an authority will be passed, but if it does, it takes priority
if let Some(auth) = uri.authority() {
match addr_type {
NetworkStreamType::Tcp => {
if port == 80 {
return Some(Cow::Borrowed(auth.host()));
}
}
NetworkStreamType::Tls | NetworkStreamType::Tunnel => {
if port == 443 {
return Some(Cow::Borrowed(auth.host()));
}
}
#[cfg(unix)]
NetworkStreamType::Unix => {}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStreamType::Vsock => {}
}
return Some(Cow::Borrowed(auth.as_str()));
}
// TODO(mmastrac): Most requests will use this path and we probably will want to optimize it in the future
if let Some(host) = headers.get(HOST) {
return Some(match host.to_str() {
Ok(host) => Cow::Borrowed(host),
Err(_) => Cow::Owned(
host
.as_bytes()
.iter()
.cloned()
.map(char::from)
.collect::<String>(),
),
});
}
None
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/http_next.rs | ext/http/http_next.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::ffi::c_void;
use std::future::Future;
use std::future::poll_fn;
use std::io;
use std::pin::Pin;
use std::ptr::null;
use std::rc::Rc;
use bytes::Bytes;
use bytes::BytesMut;
use cache_control::CacheControl;
use deno_core::AsyncMut;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::ByteString;
use deno_core::CancelFuture;
use deno_core::CancelHandle;
use deno_core::CancelTryFuture;
use deno_core::ExternalPointer;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::external;
use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::serde_v8::from_v8;
use deno_core::unsync::JoinHandle;
use deno_core::unsync::spawn;
use deno_core::v8;
use deno_net::ops_tls::TlsStream;
use deno_net::raw::NetworkStream;
use deno_net::raw::NetworkStreamReadHalf;
use deno_net::raw::NetworkStreamWriteHalf;
use deno_websocket::ws_create_server_stream;
use fly_accept_encoding::Encoding;
use hyper::StatusCode;
use hyper::body::Incoming;
use hyper::header::ACCEPT_ENCODING;
use hyper::header::CACHE_CONTROL;
use hyper::header::CONTENT_ENCODING;
use hyper::header::CONTENT_LENGTH;
use hyper::header::CONTENT_RANGE;
use hyper::header::CONTENT_TYPE;
use hyper::header::COOKIE;
use hyper::header::HeaderMap;
use hyper::http::HeaderName;
use hyper::http::HeaderValue;
use hyper::server::conn::http1;
use hyper::server::conn::http2;
use hyper::service::HttpService;
use hyper::service::service_fn;
use hyper::upgrade::OnUpgrade;
use hyper_util::rt::TokioIo;
use once_cell::sync::Lazy;
use smallvec::SmallVec;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use super::fly_accept_encoding;
use crate::LocalExecutor;
use crate::Options;
use crate::compressible::is_content_compressible;
use crate::extract_network_stream;
use crate::network_buffered_stream::NetworkStreamPrefixCheck;
use crate::request_body::HttpRequestBody;
use crate::request_properties::HttpConnectionProperties;
use crate::request_properties::HttpListenProperties;
use crate::request_properties::HttpPropertyExtractor;
use crate::response_body::Compression;
use crate::response_body::ResponseBytesInner;
use crate::service::HttpRecord;
use crate::service::HttpRecordResponse;
use crate::service::HttpRequestBodyAutocloser;
use crate::service::HttpServerState;
use crate::service::SignallingRc;
use crate::service::handle_request;
use crate::service::http_general_trace;
use crate::service::http_trace;
type Request = hyper::Request<Incoming>;
static USE_WRITEV: Lazy<bool> = Lazy::new(|| {
let enable = std::env::var("DENO_USE_WRITEV").ok();
if let Some(val) = enable {
return !val.is_empty();
}
false
});
/// All HTTP/2 connections start with this byte string.
///
/// In HTTP/2, each endpoint is required to send a connection preface as a final confirmation
/// of the protocol in use and to establish the initial settings for the HTTP/2 connection. The
/// client and server each send a different connection preface.
///
/// The client connection preface starts with a sequence of 24 octets, which in hex notation is:
///
/// 0x505249202a20485454502f322e300d0a0d0a534d0d0a0d0a
///
/// That is, the connection preface starts with the string PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n). This sequence
/// MUST be followed by a SETTINGS frame (Section 6.5), which MAY be empty.
const HTTP2_PREFIX: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
/// ALPN negotiation for "h2"
const TLS_ALPN_HTTP_2: &[u8] = b"h2";
/// ALPN negotiation for "http/1.1"
const TLS_ALPN_HTTP_11: &[u8] = b"http/1.1";
/// Name a trait for streams we can serve HTTP over.
trait HttpServeStream:
tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static
{
}
impl<S: tokio::io::AsyncRead + tokio::io::AsyncWrite + Unpin + Send + 'static>
HttpServeStream for S
{
}
#[repr(transparent)]
struct RcHttpRecord(Rc<HttpRecord>);
// Register the [`HttpRecord`] as an external.
external!(RcHttpRecord, "http record");
/// Construct Rc<HttpRecord> from raw external pointer, consuming
/// refcount. You must make sure the external is deleted on the JS side.
macro_rules! take_external {
($external:expr, $args:tt) => {{
let ptr = ExternalPointer::<RcHttpRecord>::from_raw($external);
let record = ptr.unsafely_take().0;
http_trace!(record, $args);
record
}};
}
/// Clone Rc<HttpRecord> from raw external pointer.
macro_rules! clone_external {
($external:expr, $args:tt) => {{
let ptr = ExternalPointer::<RcHttpRecord>::from_raw($external);
ptr.unsafely_deref().0.clone()
}};
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum HttpNextError {
#[class(inherit)]
#[error(transparent)]
Resource(#[from] deno_core::error::ResourceError),
#[class(inherit)]
#[error("{0}")]
Io(#[from] io::Error),
#[class("Http")]
#[error("{0}")]
Hyper(#[from] hyper::Error),
#[class(inherit)]
#[error(transparent)]
JoinError(
#[from]
#[inherit]
tokio::task::JoinError,
),
#[class(inherit)]
#[error(transparent)]
Canceled(
#[from]
#[inherit]
deno_core::Canceled,
),
#[class(generic)]
#[error(transparent)]
UpgradeUnavailable(#[from] crate::service::UpgradeUnavailableError),
#[class(inherit)]
#[error("{0}")]
Other(
#[from]
#[inherit]
deno_error::JsErrorBox,
),
#[class("Http")]
#[error("invalid HTTP status line")]
InvalidHttpStatusLine,
#[class("Http")]
#[error("raw upgrade failed")]
RawUpgradeFailed,
}
#[op2(fast)]
#[smi]
pub fn op_http_upgrade_raw(
state: &mut OpState,
external: *const c_void,
) -> Result<ResourceId, HttpNextError> {
// SAFETY: external is deleted before calling this op.
let http = unsafe { take_external!(external, "op_http_upgrade_raw") };
let upgrade = http.upgrade()?;
let read = Rc::new(AsyncRefCell::new(None));
let read_cell = AsyncRefCell::borrow_sync(read.clone()).unwrap();
let write = UpgradeStreamWriteState::Parsing(
BytesMut::with_capacity(b"HTTP/1.1 101 Switching Protocols\r\nConnection: Upgrade\r\nUpgrade: websocket\r\n\r\n".len()),
http,
upgrade,
read_cell,
);
Ok(state.resource_table.add(UpgradeStream::new(read, write)))
}
#[op2(async)]
#[smi]
pub async fn op_http_upgrade_websocket_next(
state: Rc<RefCell<OpState>>,
external: *const c_void,
) -> Result<ResourceId, HttpNextError> {
let upgrade = {
// SAFETY: op is called with external.
let http =
unsafe { clone_external!(external, "op_http_upgrade_websocket_next") };
http.upgrade()?
};
let upgraded = upgrade.await?;
let (stream, bytes) = extract_network_stream(upgraded);
Ok(ws_create_server_stream(
&mut state.borrow_mut(),
stream,
bytes,
))
}
#[op2(fast)]
pub fn op_http_set_promise_complete(external: *const c_void, status: u16) {
let http =
// SAFETY: external is deleted before calling this op.
unsafe { take_external!(external, "op_http_set_promise_complete") };
set_promise_complete(http, status);
}
fn set_promise_complete(http: Rc<HttpRecord>, status: u16) {
// The Javascript code should never provide a status that is invalid here (see 23_response.js), so we
// will quietly ignore invalid values.
if let Ok(code) = StatusCode::from_u16(status) {
http.response_parts().status = code;
http.otel_info_set_status(status);
}
http.complete();
}
#[op2]
pub fn op_http_get_request_method_and_url<'scope, HTTP>(
scope: &mut v8::PinScope<'scope, '_>,
external: *const c_void,
) -> v8::Local<'scope, v8::Array>
where
HTTP: HttpPropertyExtractor,
{
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_get_request_method_and_url") };
let request_info = http.request_info();
let request_parts = http.request_parts();
let request_properties = HTTP::request_properties(
&request_info,
&request_parts.uri,
&request_parts.headers,
);
let method: v8::Local<v8::Value> = v8::String::new_from_utf8(
scope,
request_parts.method.as_str().as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
.into();
let scheme: v8::Local<v8::Value> = match request_parts.uri.scheme_str() {
Some(scheme) => v8::String::new_from_utf8(
scope,
scheme.as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
.into(),
None => v8::undefined(scope).into(),
};
let authority: v8::Local<v8::Value> =
if let Some(authority) = request_parts.uri.authority() {
v8::String::new_from_utf8(
scope,
authority.as_str().as_ref(),
v8::NewStringType::Normal,
)
.unwrap()
.into()
} else if let Some(authority) = request_properties.authority {
v8::String::new_from_utf8(
scope,
authority.as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
.into()
} else {
v8::undefined(scope).into()
};
// Only extract the path part - we handle authority elsewhere
let path = match request_parts.uri.path_and_query() {
Some(path_and_query) => {
let path = path_and_query.as_str();
if matches!(path.as_bytes().first(), Some(b'/' | b'*')) {
Cow::Borrowed(path)
} else {
Cow::Owned(format!("/{}", path))
}
}
None => Cow::Borrowed(""),
};
let path: v8::Local<v8::Value> = v8::String::new_from_utf8(
scope,
path.as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
.into();
let (peer_ip, peer_port) = match &*http.client_addr() {
Some(client_addr) => {
let addr: std::net::SocketAddr =
client_addr.to_str().unwrap().parse().unwrap();
(Rc::from(format!("{}", addr.ip())), Some(addr.port() as u32))
}
_ => (request_info.peer_address.clone(), request_info.peer_port),
};
let peer_ip: v8::Local<v8::Value> = v8::String::new_from_utf8(
scope,
peer_ip.as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
.into();
let peer_port: v8::Local<v8::Value> = match peer_port {
Some(port) => v8::Number::new(scope, port.into()).into(),
None => v8::undefined(scope).into(),
};
let vec = [method, authority, path, peer_ip, peer_port, scheme];
v8::Array::new_with_elements(scope, vec.as_slice())
}
#[op2]
#[serde]
pub fn op_http_get_request_header(
external: *const c_void,
#[string] name: String,
) -> Option<ByteString> {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_get_request_header") };
let request_parts = http.request_parts();
let value = request_parts.headers.get(name);
value.map(|value| value.as_bytes().into())
}
#[op2]
pub fn op_http_get_request_headers<'scope>(
scope: &mut v8::PinScope<'scope, '_>,
external: *const c_void,
) -> v8::Local<'scope, v8::Array> {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_get_request_headers") };
let headers = &http.request_parts().headers;
// Two slots for each header key/value pair
let mut vec: SmallVec<[v8::Local<v8::Value>; 32]> =
SmallVec::with_capacity(headers.len() * 2);
let mut cookies: Option<Vec<&[u8]>> = None;
for (name, value) in headers {
if name == COOKIE {
if let Some(ref mut cookies) = cookies {
cookies.push(value.as_bytes());
} else {
cookies = Some(vec![value.as_bytes()]);
}
} else {
vec.push(
v8::String::new_from_one_byte(
scope,
name.as_ref(),
v8::NewStringType::Normal,
)
.unwrap()
.into(),
);
vec.push(
v8::String::new_from_one_byte(
scope,
value.as_bytes(),
v8::NewStringType::Normal,
)
.unwrap()
.into(),
);
}
}
// We treat cookies specially, because we don't want them to get them
// mangled by the `Headers` object in JS. What we do is take all cookie
// headers and concat them into a single cookie header, separated by
// semicolons.
// TODO(mmastrac): This should probably happen on the JS side on-demand
if let Some(cookies) = cookies {
let cookie_sep = "; ".as_bytes();
vec.push(
v8::String::new_external_onebyte_static(scope, COOKIE.as_ref())
.unwrap()
.into(),
);
vec.push(
v8::String::new_from_one_byte(
scope,
cookies.join(cookie_sep).as_ref(),
v8::NewStringType::Normal,
)
.unwrap()
.into(),
);
}
v8::Array::new_with_elements(scope, vec.as_slice())
}
#[op2(fast)]
#[smi]
pub fn op_http_read_request_body(
state: Rc<RefCell<OpState>>,
external: *const c_void,
) -> ResourceId {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_read_request_body") };
let rid = match http.take_request_body() {
Some(incoming) => {
let body_resource = Rc::new(HttpRequestBody::new(incoming));
state.borrow_mut().resource_table.add_rc(body_resource)
}
_ => {
// This should not be possible, but rather than panicking we'll return an invalid
// resource value to JavaScript.
ResourceId::MAX
}
};
http.put_resource(HttpRequestBodyAutocloser::new(rid, state.clone()));
rid
}
#[op2(fast)]
pub fn op_http_set_response_header(
external: *const c_void,
#[string(onebyte)] name: Cow<'_, [u8]>,
#[string(onebyte)] value: Cow<'_, [u8]>,
) {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_set_response_header") };
let mut response_parts = http.response_parts();
// These are valid latin-1 strings
let name = HeaderName::from_bytes(&name).unwrap();
let value = match value {
Cow::Borrowed(bytes) => HeaderValue::from_bytes(bytes).unwrap(),
// SAFETY: These are valid latin-1 strings
Cow::Owned(bytes_vec) => unsafe {
HeaderValue::from_maybe_shared_unchecked(bytes::Bytes::from(bytes_vec))
},
};
response_parts.headers.append(name, value);
}
#[op2(fast)]
pub fn op_http_set_response_headers(
scope: &mut v8::PinScope<'_, '_>,
external: *const c_void,
headers: v8::Local<v8::Array>,
) {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_set_response_headers") };
// TODO(mmastrac): Invalid headers should be handled?
let mut response_parts = http.response_parts();
let len = headers.length();
let header_len = len * 2;
response_parts
.headers
.reserve(header_len.try_into().unwrap());
for i in 0..len {
let item = headers.get_index(scope, i).unwrap();
let pair = v8::Local::<v8::Array>::try_from(item).unwrap();
let name = pair.get_index(scope, 0).unwrap();
let value = pair.get_index(scope, 1).unwrap();
let v8_name: ByteString = from_v8(scope, name).unwrap();
let v8_value: ByteString = from_v8(scope, value).unwrap();
let header_name = HeaderName::from_bytes(&v8_name).unwrap();
let header_value =
// SAFETY: These are valid latin-1 strings
unsafe { HeaderValue::from_maybe_shared_unchecked(v8_value) };
response_parts.headers.append(header_name, header_value);
}
}
#[op2]
pub fn op_http_set_response_trailers(
external: *const c_void,
#[serde] trailers: Vec<(ByteString, ByteString)>,
) {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_set_response_trailers") };
let mut trailer_map: HeaderMap = HeaderMap::with_capacity(trailers.len());
for (name, value) in trailers {
// These are valid latin-1 strings
let name = HeaderName::from_bytes(&name).unwrap();
// SAFETY: These are valid latin-1 strings
let value = unsafe { HeaderValue::from_maybe_shared_unchecked(value) };
trailer_map.append(name, value);
}
*http.trailers() = Some(trailer_map);
}
fn is_request_compressible(
length: Option<usize>,
headers: &HeaderMap,
) -> Compression {
if let Some(length) = length {
// By the time we add compression headers and Accept-Encoding, it probably doesn't make sense
// to compress stuff that's smaller than this.
if length < 64 {
return Compression::None;
}
}
let Some(accept_encoding) = headers.get(ACCEPT_ENCODING) else {
return Compression::None;
};
match accept_encoding.to_str() {
// Firefox and Chrome send this -- no need to parse
Ok("gzip, deflate, br") => return Compression::Brotli,
Ok("gzip, deflate, br, zstd") => return Compression::Brotli,
Ok("gzip") => return Compression::GZip,
Ok("br") => return Compression::Brotli,
_ => (),
}
// Fall back to the expensive parser
let accepted =
fly_accept_encoding::encodings_iter_http_1(headers).filter(|r| {
matches!(
r,
Ok((
Some(Encoding::Identity | Encoding::Gzip | Encoding::Brotli),
_
))
)
});
match fly_accept_encoding::preferred(accepted) {
Ok(Some(fly_accept_encoding::Encoding::Gzip)) => Compression::GZip,
Ok(Some(fly_accept_encoding::Encoding::Brotli)) => Compression::Brotli,
_ => Compression::None,
}
}
fn is_response_compressible(headers: &HeaderMap) -> bool {
if let Some(content_type) = headers.get(CONTENT_TYPE) {
if !is_content_compressible(content_type) {
return false;
}
} else {
return false;
}
if headers.contains_key(CONTENT_ENCODING) {
return false;
}
if headers.contains_key(CONTENT_RANGE) {
return false;
}
if let Some(cache_control) = headers.get(CACHE_CONTROL)
&& let Ok(s) = std::str::from_utf8(cache_control.as_bytes())
&& let Some(cache_control) = CacheControl::from_value(s)
&& cache_control.no_transform
{
return false;
}
true
}
fn modify_compressibility_from_response(
compression: Compression,
headers: &mut HeaderMap,
) -> Compression {
ensure_vary_accept_encoding(headers);
if compression == Compression::None {
return Compression::None;
}
if !is_response_compressible(headers) {
return Compression::None;
}
let encoding = match compression {
Compression::Brotli => "br",
Compression::GZip => "gzip",
_ => unreachable!(),
};
weaken_etag(headers);
headers.remove(CONTENT_LENGTH);
headers.insert(CONTENT_ENCODING, HeaderValue::from_static(encoding));
compression
}
/// If the user provided a ETag header for uncompressed data, we need to ensure it is a
/// weak Etag header ("W/").
fn weaken_etag(hmap: &mut HeaderMap) {
if let Some(etag) = hmap.get_mut(hyper::header::ETAG)
&& !etag.as_bytes().starts_with(b"W/")
{
let mut v = Vec::with_capacity(etag.as_bytes().len() + 2);
v.extend(b"W/");
v.extend(etag.as_bytes());
*etag = v.try_into().unwrap();
}
}
// Set Vary: Accept-Encoding header for direct body response.
// Note: we set the header irrespective of whether or not we compress the data
// to make sure cache services do not serve uncompressed data to clients that
// support compression.
fn ensure_vary_accept_encoding(hmap: &mut HeaderMap) {
if let Some(v) = hmap.get_mut(hyper::header::VARY)
&& let Ok(s) = v.to_str()
{
if !s.to_lowercase().contains("accept-encoding") {
*v = format!("Accept-Encoding, {s}").try_into().unwrap()
}
return;
}
hmap.insert(
hyper::header::VARY,
HeaderValue::from_static("Accept-Encoding"),
);
}
/// Sets the appropriate response body. Use `force_instantiate_body` if you need
/// to ensure that the response is cleaned up correctly (eg: for resources).
fn set_response(
http: Rc<HttpRecord>,
length: Option<usize>,
status: u16,
force_instantiate_body: bool,
response_fn: impl FnOnce(Compression) -> ResponseBytesInner,
) {
// The request may have been cancelled by this point and if so, there's no need for us to
// do all of this work to send the response.
if !http.cancelled() {
let compression =
is_request_compressible(length, &http.request_parts().headers);
let mut response_headers =
std::cell::RefMut::map(http.response_parts(), |this| &mut this.headers);
let compression =
modify_compressibility_from_response(compression, &mut response_headers);
drop(response_headers);
http.set_response_body(response_fn(compression));
// The Javascript code should never provide a status that is invalid here (see 23_response.js), so we
// will quietly ignore invalid values.
if let Ok(code) = StatusCode::from_u16(status) {
http.response_parts().status = code;
http.otel_info_set_status(status);
}
} else if force_instantiate_body {
response_fn(Compression::None).abort();
}
http.complete();
}
#[op2(fast)]
pub fn op_http_get_request_cancelled(external: *const c_void) -> bool {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_get_request_cancelled") };
http.cancelled()
}
#[op2(async)]
pub async fn op_http_request_on_cancel(external: *const c_void) -> bool {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_request_on_cancel") };
let (tx, rx) = tokio::sync::oneshot::channel();
http.on_cancel(tx);
drop(http);
rx.await.is_ok()
}
/// Returned promise resolves when body streaming finishes.
/// Call [`op_http_close_after_finish`] when done with the external.
#[op2(async)]
pub async fn op_http_set_response_body_resource(
state: Rc<RefCell<OpState>>,
external: *const c_void,
#[smi] stream_rid: ResourceId,
auto_close: bool,
status: u16,
) -> Result<bool, HttpNextError> {
let http =
// SAFETY: op is called with external.
unsafe { clone_external!(external, "op_http_set_response_body_resource") };
// IMPORTANT: We might end up requiring the OpState lock in set_response if we need to drop the request
// body resource so we _cannot_ hold the OpState lock longer than necessary.
// If the stream is auto_close, we will hold the last ref to it until the response is complete.
// TODO(mmastrac): We should be using the same auto-close functionality rather than removing autoclose resources.
// It's possible things could fail elsewhere if code expects the rid to continue existing after the response has been
// returned.
let resource = {
let mut state = state.borrow_mut();
if auto_close {
state.resource_table.take_any(stream_rid)?
} else {
state.resource_table.get_any(stream_rid)?
}
};
*http.needs_close_after_finish() = true;
set_response(
http.clone(),
resource.size_hint().1.map(|s| s as usize),
status,
true,
move |compression| {
ResponseBytesInner::from_resource(compression, resource, auto_close)
},
);
Ok(http.response_body_finished().await)
}
#[op2(fast)]
pub fn op_http_close_after_finish(external: *const c_void) {
let http =
// SAFETY: external is deleted before calling this op.
unsafe { take_external!(external, "op_http_close_after_finish") };
http.close_after_finish();
}
#[op2(fast)]
pub fn op_http_set_response_body_text(
external: *const c_void,
#[string] text: String,
status: u16,
) {
let http =
// SAFETY: external is deleted before calling this op.
unsafe { take_external!(external, "op_http_set_response_body_text") };
if !text.is_empty() {
set_response(http, Some(text.len()), status, false, |compression| {
ResponseBytesInner::from_vec(compression, text.into_bytes())
});
} else {
set_promise_complete(http, status);
}
}
#[op2]
pub fn op_http_set_response_body_bytes(
external: *const c_void,
#[buffer] buffer: JsBuffer,
status: u16,
) {
let http =
// SAFETY: external is deleted before calling this op.
unsafe { take_external!(external, "op_http_set_response_body_bytes") };
if !buffer.is_empty() {
set_response(http, Some(buffer.len()), status, false, |compression| {
ResponseBytesInner::from_bufview(compression, BufView::from(buffer))
});
} else {
set_promise_complete(http, status);
}
}
fn serve_http11_unconditional(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
http1_builder_hook: Option<fn(http1::Builder) -> http1::Builder>,
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
let mut builder = http1::Builder::new();
builder.keep_alive(true).writev(*USE_WRITEV);
if let Some(http1_builder_hook) = http1_builder_hook {
builder = http1_builder_hook(builder);
}
let conn = builder
.serve_connection(TokioIo::new(io), svc)
.with_upgrades();
async {
match conn.or_abort(cancel).await {
Err(mut conn) => {
Pin::new(&mut conn).graceful_shutdown();
conn.await
}
Ok(res) => res,
}
}
}
fn serve_http2_unconditional(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
http2_builder_hook: Option<
fn(http2::Builder<LocalExecutor>) -> http2::Builder<LocalExecutor>,
>,
) -> impl Future<Output = Result<(), hyper::Error>> + 'static {
let mut builder = http2::Builder::new(LocalExecutor);
if let Some(http2_builder_hook) = http2_builder_hook {
builder = http2_builder_hook(builder);
}
let conn = builder.serve_connection(TokioIo::new(io), svc);
async {
match conn.or_abort(cancel).await {
Err(mut conn) => {
Pin::new(&mut conn).graceful_shutdown();
conn.await
}
Ok(res) => res,
}
}
}
async fn serve_http2_autodetect(
io: impl HttpServeStream,
svc: impl HttpService<Incoming, ResBody = HttpRecordResponse> + 'static,
cancel: Rc<CancelHandle>,
options: Options,
) -> Result<(), HttpNextError> {
let prefix = NetworkStreamPrefixCheck::new(io, HTTP2_PREFIX);
let (matches, io) = prefix.match_prefix().await?;
if matches {
serve_http2_unconditional(io, svc, cancel, options.http2_builder_hook)
.await
.map_err(HttpNextError::Hyper)
} else {
serve_http11_unconditional(io, svc, cancel, options.http1_builder_hook)
.await
.map_err(HttpNextError::Hyper)
}
}
fn serve_https(
mut io: TlsStream<TcpStream>,
request_info: HttpConnectionProperties,
lifetime: HttpLifetime,
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
options: Options,
) -> JoinHandle<Result<(), HttpNextError>> {
let HttpLifetime {
server_state,
connection_cancel_handle,
listen_cancel_handle,
} = lifetime;
let legacy_abort = !options.no_legacy_abort;
let svc = service_fn(move |req: Request| {
handle_request(
req,
request_info.clone(),
server_state.clone(),
tx.clone(),
legacy_abort,
)
});
spawn(
async move {
let handshake = io.handshake().await?;
// If the client specifically negotiates a protocol, we will use it. If not, we'll auto-detect
// based on the prefix bytes
let handshake = handshake.alpn;
if Some(TLS_ALPN_HTTP_2) == handshake.as_deref() {
serve_http2_unconditional(
io,
svc,
listen_cancel_handle,
options.http2_builder_hook,
)
.await
.map_err(HttpNextError::Hyper)
} else if Some(TLS_ALPN_HTTP_11) == handshake.as_deref() {
serve_http11_unconditional(
io,
svc,
listen_cancel_handle,
options.http1_builder_hook,
)
.await
.map_err(HttpNextError::Hyper)
} else {
serve_http2_autodetect(io, svc, listen_cancel_handle, options).await
}
}
.try_or_cancel(connection_cancel_handle),
)
}
fn serve_http(
io: impl HttpServeStream,
request_info: HttpConnectionProperties,
lifetime: HttpLifetime,
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
options: Options,
) -> JoinHandle<Result<(), HttpNextError>> {
let HttpLifetime {
server_state,
connection_cancel_handle,
listen_cancel_handle,
} = lifetime;
let legacy_abort = !options.no_legacy_abort;
let svc = service_fn(move |req: Request| {
handle_request(
req,
request_info.clone(),
server_state.clone(),
tx.clone(),
legacy_abort,
)
});
spawn(
serve_http2_autodetect(io, svc, listen_cancel_handle, options)
.try_or_cancel(connection_cancel_handle),
)
}
fn serve_http_on<HTTP>(
connection: HTTP::Connection,
listen_properties: &HttpListenProperties,
lifetime: HttpLifetime,
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
options: Options,
) -> JoinHandle<Result<(), HttpNextError>>
where
HTTP: HttpPropertyExtractor,
{
let connection_properties: HttpConnectionProperties =
HTTP::connection_properties(listen_properties, &connection);
let network_stream = HTTP::to_network_stream_from_connection(connection);
match network_stream {
NetworkStream::Tcp(conn) => {
serve_http(conn, connection_properties, lifetime, tx, options)
}
NetworkStream::Tls(conn) => {
serve_https(conn, connection_properties, lifetime, tx, options)
}
#[cfg(unix)]
NetworkStream::Unix(conn) => {
serve_http(conn, connection_properties, lifetime, tx, options)
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
NetworkStream::Vsock(conn) => {
serve_http(conn, connection_properties, lifetime, tx, options)
}
NetworkStream::Tunnel(conn) => {
serve_http(conn, connection_properties, lifetime, tx, options)
}
}
}
#[derive(Clone)]
struct HttpLifetime {
connection_cancel_handle: Rc<CancelHandle>,
listen_cancel_handle: Rc<CancelHandle>,
server_state: SignallingRc<HttpServerState>,
}
struct HttpJoinHandle {
join_handle: AsyncRefCell<Option<JoinHandle<Result<(), HttpNextError>>>>,
connection_cancel_handle: Rc<CancelHandle>,
listen_cancel_handle: Rc<CancelHandle>,
rx: AsyncRefCell<tokio::sync::mpsc::Receiver<Rc<HttpRecord>>>,
server_state: SignallingRc<HttpServerState>,
}
impl HttpJoinHandle {
fn new(rx: tokio::sync::mpsc::Receiver<Rc<HttpRecord>>) -> Self {
Self {
join_handle: AsyncRefCell::new(None),
connection_cancel_handle: CancelHandle::new_rc(),
listen_cancel_handle: CancelHandle::new_rc(),
rx: AsyncRefCell::new(rx),
server_state: HttpServerState::new(),
}
}
fn lifetime(self: &Rc<Self>) -> HttpLifetime {
HttpLifetime {
connection_cancel_handle: self.connection_cancel_handle.clone(),
listen_cancel_handle: self.listen_cancel_handle.clone(),
server_state: self.server_state.clone(),
}
}
fn connection_cancel_handle(self: &Rc<Self>) -> Rc<CancelHandle> {
self.connection_cancel_handle.clone()
}
fn listen_cancel_handle(self: &Rc<Self>) -> Rc<CancelHandle> {
self.listen_cancel_handle.clone()
}
}
impl Resource for HttpJoinHandle {
fn name(&self) -> Cow<'_, str> {
"http".into()
}
fn close(self: Rc<Self>) {
// During a close operation, we cancel everything
self.connection_cancel_handle.cancel();
self.listen_cancel_handle.cancel();
}
}
impl Drop for HttpJoinHandle {
fn drop(&mut self) {
// In some cases we may be dropped without closing, so let's cancel everything on the way out
self.connection_cancel_handle.cancel();
self.listen_cancel_handle.cancel();
}
}
#[op2]
#[serde]
pub fn op_http_serve<HTTP>(
state: Rc<RefCell<OpState>>,
#[smi] listener_rid: ResourceId,
) -> Result<(ResourceId, &'static str, String, bool), HttpNextError>
where
HTTP: HttpPropertyExtractor,
{
let listener =
HTTP::get_listener_for_rid(&mut state.borrow_mut(), listener_rid)?;
let listen_properties = HTTP::listen_properties_from_listener(&listener)?;
let (tx, rx) = tokio::sync::mpsc::channel(10);
let resource: Rc<HttpJoinHandle> = Rc::new(HttpJoinHandle::new(rx));
let listen_cancel_clone = resource.listen_cancel_handle();
let lifetime = resource.lifetime();
let options = {
let state = state.borrow();
*state.borrow::<Options>()
};
let listen_properties_clone: HttpListenProperties = listen_properties.clone();
let handle = spawn(async move {
loop {
let conn = HTTP::accept_connection_from_listener(&listener)
.try_or_cancel(listen_cancel_clone.clone())
.await?;
serve_http_on::<HTTP>(
conn,
&listen_properties_clone,
lifetime.clone(),
tx.clone(),
options,
);
}
#[allow(unreachable_code)]
Ok::<_, HttpNextError>(())
});
// Set the handle after we start the future
*RcRef::map(&resource, |this| &this.join_handle)
.try_borrow_mut()
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/fly_accept_encoding.rs | ext/http/fly_accept_encoding.rs | // Copyright 2018 Yoshua Wuyts. All rights reserved. MIT license.
// Copyright 2018-2025 the Deno authors. MIT license.
// Forked from https://github.com/superfly/accept-encoding/blob/1cded757ec7ff3916e5bfe7441db76cdc48170dc/
// Forked to support both http 0.3 and http 1.0 crates.
use itertools::Itertools;
/// A list enumerating the categories of errors in this crate.
///
/// This list is intended to grow over time and it is not recommended to
/// exhaustively match against it.
///
/// It is used with the [`Error`] struct.
///
/// [`Error`]: std.struct.Error.html
#[derive(Debug, thiserror::Error)]
pub enum EncodingError {
/// Invalid header encoding.
#[error("Invalid header encoding.")]
InvalidEncoding,
/// The encoding scheme is unknown.
#[error("Unknown encoding scheme.")]
UnknownEncoding,
}
/// Encodings to use.
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum Encoding {
/// The Gzip encoding.
Gzip,
/// The Deflate encoding.
Deflate,
/// The Brotli encoding.
Brotli,
/// The Zstd encoding.
Zstd,
/// No encoding.
Identity,
}
impl Encoding {
/// Parses a given string into its corresponding encoding.
fn parse(s: &str) -> Result<Option<Encoding>, EncodingError> {
match s {
"gzip" => Ok(Some(Encoding::Gzip)),
"deflate" => Ok(Some(Encoding::Deflate)),
"br" => Ok(Some(Encoding::Brotli)),
"zstd" => Ok(Some(Encoding::Zstd)),
"identity" => Ok(Some(Encoding::Identity)),
"*" => Ok(None),
_ => Err(EncodingError::UnknownEncoding),
}
}
}
/// Select the encoding with the largest qval or the first with qval ~= 1
pub fn preferred(
encodings: impl Iterator<Item = Result<(Option<Encoding>, f32), EncodingError>>,
) -> Result<Option<Encoding>, EncodingError> {
let mut preferred_encoding = None;
let mut max_qval = 0.0;
for r in encodings {
let (encoding, qval) = r?;
if (qval - 1.0f32).abs() < 0.01 {
return Ok(encoding);
} else if qval > max_qval {
preferred_encoding = encoding;
max_qval = qval;
}
}
Ok(preferred_encoding)
}
/// Parse a set of HTTP headers into an iterator containing tuples of options containing encodings and their corresponding q-values.
///
/// Compatible with `http` crate for version 0.2.x.
pub fn encodings_iter_http_02(
headers: &http_v02::HeaderMap,
) -> impl Iterator<Item = Result<(Option<Encoding>, f32), EncodingError>> + '_ {
let iter = headers
.get_all(http_v02::header::ACCEPT_ENCODING)
.iter()
.map(|hval| hval.to_str().map_err(|_| EncodingError::InvalidEncoding));
encodings_iter_inner(iter)
}
/// Parse a set of HTTP headers into an iterator containing tuples of options containing encodings and their corresponding q-values.
///
/// Compatible with `http` crate for version 1.x.
pub fn encodings_iter_http_1(
headers: &http::HeaderMap,
) -> impl Iterator<Item = Result<(Option<Encoding>, f32), EncodingError>> + '_ {
let iter = headers
.get_all(http::header::ACCEPT_ENCODING)
.iter()
.map(|hval| hval.to_str().map_err(|_| EncodingError::InvalidEncoding));
encodings_iter_inner(iter)
}
/// Parse a set of HTTP headers into an iterator containing tuples of options containing encodings and their corresponding q-values.
fn encodings_iter_inner<'s>(
headers: impl Iterator<Item = Result<&'s str, EncodingError>> + 's,
) -> impl Iterator<Item = Result<(Option<Encoding>, f32), EncodingError>> + 's {
headers
.map_ok(|s| s.split(',').map(str::trim))
.flatten_ok()
.filter_map_ok(|v| {
let (e, q) = match v.split_once(";q=") {
Some((e, q)) => (e, q),
None => return Some(Ok((Encoding::parse(v).ok()?, 1.0f32))),
};
let encoding = Encoding::parse(e).ok()?; // ignore unknown encodings
let qval = match q.parse() {
Ok(f) if f > 1.0 => return Some(Err(EncodingError::InvalidEncoding)), // q-values over 1 are unacceptable,
Ok(f) => f,
Err(_) => return Some(Err(EncodingError::InvalidEncoding)),
};
Some(Ok((encoding, qval)))
})
.flatten()
}
#[cfg(test)]
mod tests {
use http_v02::HeaderMap;
use http_v02::HeaderValue;
use http_v02::header::ACCEPT_ENCODING;
use super::*;
fn encodings(
headers: &HeaderMap,
) -> Result<Vec<(Option<Encoding>, f32)>, EncodingError> {
encodings_iter_http_02(headers).collect()
}
fn parse(headers: &HeaderMap) -> Result<Option<Encoding>, EncodingError> {
preferred(encodings_iter_http_02(headers))
}
#[test]
fn single_encoding() {
let mut headers = HeaderMap::new();
headers.insert(ACCEPT_ENCODING, HeaderValue::from_str("gzip").unwrap());
let encoding = parse(&headers).unwrap().unwrap();
assert_eq!(encoding, Encoding::Gzip);
}
#[test]
fn multiple_encodings() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("gzip, deflate, br").unwrap(),
);
let encoding = parse(&headers).unwrap().unwrap();
assert_eq!(encoding, Encoding::Gzip);
}
#[test]
fn single_encoding_with_qval() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("deflate;q=1.0").unwrap(),
);
let encoding = parse(&headers).unwrap().unwrap();
assert_eq!(encoding, Encoding::Deflate);
}
#[test]
fn multiple_encodings_with_qval_1() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("deflate, gzip;q=1.0, *;q=0.5").unwrap(),
);
let encoding = parse(&headers).unwrap().unwrap();
assert_eq!(encoding, Encoding::Deflate);
}
#[test]
fn multiple_encodings_with_qval_2() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("gzip;q=0.5, deflate;q=1.0, *;q=0.5").unwrap(),
);
let encoding = parse(&headers).unwrap().unwrap();
assert_eq!(encoding, Encoding::Deflate);
}
#[test]
fn multiple_encodings_with_qval_3() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("gzip;q=0.5, deflate;q=0.75, *;q=1.0").unwrap(),
);
let encoding = parse(&headers).unwrap();
assert!(encoding.is_none());
}
#[test]
fn list_encodings() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("zstd;q=1.0, deflate;q=0.8, br;q=0.9").unwrap(),
);
let encodings = encodings(&headers).unwrap();
assert_eq!(encodings[0], (Some(Encoding::Zstd), 1.0));
assert_eq!(encodings[1], (Some(Encoding::Deflate), 0.8));
assert_eq!(encodings[2], (Some(Encoding::Brotli), 0.9));
}
#[test]
fn list_encodings_ignore_unknown() {
let mut headers = HeaderMap::new();
headers.insert(
ACCEPT_ENCODING,
HeaderValue::from_str("zstd;q=1.0, unknown;q=0.8, br;q=0.9").unwrap(),
);
let encodings = encodings(&headers).unwrap();
assert_eq!(encodings[0], (Some(Encoding::Zstd), 1.0));
assert_eq!(encodings[1], (Some(Encoding::Brotli), 0.9));
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/reader_stream.rs | ext/http/reader_stream.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::pin::Pin;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::task::Context;
use std::task::Poll;
use bytes::Bytes;
use deno_core::futures::Stream;
use pin_project::pin_project;
use tokio::io::AsyncRead;
use tokio_util::io::ReaderStream;
/// [ExternallyAbortableByteStream] adapts a [tokio::AsyncRead] into a [Stream].
/// It is used to bridge between the HTTP response body resource, and
/// `hyper::Body`. The stream has the special property that it errors if the
/// underlying reader is closed before an explicit EOF is sent (in the form of
/// setting the `shutdown` flag to true).
#[pin_project]
pub struct ExternallyAbortableReaderStream<R: AsyncRead> {
#[pin]
inner: ReaderStream<R>,
done: Arc<AtomicBool>,
}
pub struct ShutdownHandle(Arc<AtomicBool>);
impl ShutdownHandle {
pub fn shutdown(&self) {
self.0.store(true, std::sync::atomic::Ordering::SeqCst);
}
}
impl<R: AsyncRead> ExternallyAbortableReaderStream<R> {
pub fn new(reader: R) -> (Self, ShutdownHandle) {
let done = Arc::new(AtomicBool::new(false));
let this = Self {
inner: ReaderStream::new(reader),
done: done.clone(),
};
(this, ShutdownHandle(done))
}
}
impl<R: AsyncRead> Stream for ExternallyAbortableReaderStream<R> {
type Item = std::io::Result<Bytes>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.project();
let val = std::task::ready!(this.inner.poll_next(cx));
match val {
None if this.done.load(Ordering::SeqCst) => Poll::Ready(None),
None => Poll::Ready(Some(Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"stream reader has shut down",
)))),
Some(val) => Poll::Ready(Some(val)),
}
}
}
#[cfg(test)]
mod tests {
use bytes::Bytes;
use deno_core::futures::StreamExt;
use tokio::io::AsyncWriteExt;
use super::*;
#[tokio::test]
async fn success() {
let (a, b) = tokio::io::duplex(64 * 1024);
let (reader, _) = tokio::io::split(a);
let (_, mut writer) = tokio::io::split(b);
let (mut stream, shutdown_handle) =
ExternallyAbortableReaderStream::new(reader);
writer.write_all(b"hello").await.unwrap();
assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello"));
writer.write_all(b"world").await.unwrap();
assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("world"));
shutdown_handle.shutdown();
writer.shutdown().await.unwrap();
drop(writer);
assert!(stream.next().await.is_none());
}
#[tokio::test]
async fn error() {
let (a, b) = tokio::io::duplex(64 * 1024);
let (reader, _) = tokio::io::split(a);
let (_, mut writer) = tokio::io::split(b);
let (mut stream, _shutdown_handle) =
ExternallyAbortableReaderStream::new(reader);
writer.write_all(b"hello").await.unwrap();
assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello"));
drop(writer);
assert_eq!(
stream.next().await.unwrap().unwrap_err().kind(),
std::io::ErrorKind::UnexpectedEof
);
}
#[tokio::test]
async fn error2() {
let (a, b) = tokio::io::duplex(64 * 1024);
let (reader, _) = tokio::io::split(a);
let (_, mut writer) = tokio::io::split(b);
let (mut stream, _shutdown_handle) =
ExternallyAbortableReaderStream::new(reader);
writer.write_all(b"hello").await.unwrap();
assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello"));
writer.shutdown().await.unwrap();
drop(writer);
assert_eq!(
stream.next().await.unwrap().unwrap_err().kind(),
std::io::ErrorKind::UnexpectedEof
);
}
#[tokio::test]
async fn write_after_shutdown() {
let (a, b) = tokio::io::duplex(64 * 1024);
let (reader, _) = tokio::io::split(a);
let (_, mut writer) = tokio::io::split(b);
let (mut stream, shutdown_handle) =
ExternallyAbortableReaderStream::new(reader);
writer.write_all(b"hello").await.unwrap();
assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("hello"));
writer.write_all(b"world").await.unwrap();
assert_eq!(stream.next().await.unwrap().unwrap(), Bytes::from("world"));
shutdown_handle.shutdown();
writer.shutdown().await.unwrap();
assert!(writer.write_all(b"!").await.is_err());
drop(writer);
assert!(stream.next().await.is_none());
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/service.rs | ext/http/service.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::Cell;
use std::cell::Ref;
use std::cell::RefCell;
use std::cell::RefMut;
use std::future::Future;
use std::mem::ManuallyDrop;
use std::pin::Pin;
use std::rc::Rc;
use std::sync::OnceLock;
use std::task::Context;
use std::task::Poll;
use std::task::Waker;
use std::task::ready;
use deno_core::BufView;
use deno_core::OpState;
use deno_core::ResourceId;
use deno_error::JsErrorBox;
use http::request::Parts;
use hyper::body::Body;
use hyper::body::Frame;
use hyper::body::Incoming;
use hyper::body::SizeHint;
use hyper::header::HeaderMap;
use hyper::upgrade::OnUpgrade;
use scopeguard::ScopeGuard;
use scopeguard::guard;
use tokio::sync::oneshot;
use crate::OtelInfo;
use crate::OtelInfoAttributes;
use crate::request_properties::HttpConnectionProperties;
use crate::response_body::ResponseBytesInner;
use crate::response_body::ResponseStreamResult;
pub type Request = hyper::Request<Incoming>;
pub type Response = hyper::Response<HttpRecordResponse>;
#[cfg(feature = "__http_tracing")]
pub static RECORD_COUNT: std::sync::atomic::AtomicUsize =
std::sync::atomic::AtomicUsize::new(0);
macro_rules! http_general_trace {
($($args:expr),*) => {
#[cfg(feature = "__http_tracing")]
{
let count = $crate::service::RECORD_COUNT
.load(std::sync::atomic::Ordering::SeqCst);
println!(
"HTTP [+{count}]: {}",
format!($($args),*),
);
}
};
}
macro_rules! http_trace {
($record:expr $(, $args:expr)*) => {
#[cfg(feature = "__http_tracing")]
{
let count = $crate::service::RECORD_COUNT
.load(std::sync::atomic::Ordering::SeqCst);
println!(
"HTTP [+{count}] id={:p} strong={}: {}",
$record,
std::rc::Rc::strong_count(&$record),
format!($($args),*),
);
}
};
}
pub(crate) use http_general_trace;
pub(crate) use http_trace;
pub(crate) struct HttpServerStateInner {
pool: Vec<(Rc<HttpRecord>, HeaderMap)>,
}
/// A signalling version of `Rc` that allows one to poll for when all other references
/// to the `Rc` have been dropped.
#[repr(transparent)]
pub(crate) struct SignallingRc<T>(Rc<(T, Cell<Option<Waker>>)>);
impl<T> SignallingRc<T> {
#[inline]
pub fn new(t: T) -> Self {
Self(Rc::new((t, Default::default())))
}
#[inline]
pub fn strong_count(&self) -> usize {
Rc::strong_count(&self.0)
}
/// Resolves when this is the only remaining reference.
#[inline]
pub fn poll_complete(&self, cx: &mut Context<'_>) -> Poll<()> {
if Rc::strong_count(&self.0) == 1 {
Poll::Ready(())
} else {
self.0.1.set(Some(cx.waker().clone()));
Poll::Pending
}
}
}
impl<T> Clone for SignallingRc<T> {
#[inline]
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T> Drop for SignallingRc<T> {
#[inline]
fn drop(&mut self) {
// Trigger the waker iff the refcount is about to become 1.
if Rc::strong_count(&self.0) == 2
&& let Some(waker) = self.0.1.take()
{
waker.wake();
}
}
}
impl<T> std::ops::Deref for SignallingRc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0.0
}
}
pub(crate) struct HttpServerState(RefCell<HttpServerStateInner>);
impl HttpServerState {
pub fn new() -> SignallingRc<Self> {
SignallingRc::new(Self(RefCell::new(HttpServerStateInner {
pool: Vec::new(),
})))
}
}
impl std::ops::Deref for HttpServerState {
type Target = RefCell<HttpServerStateInner>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
enum RequestBodyState {
Incoming(Incoming),
Resource(#[allow(dead_code)] HttpRequestBodyAutocloser),
}
impl From<Incoming> for RequestBodyState {
fn from(value: Incoming) -> Self {
RequestBodyState::Incoming(value)
}
}
/// Ensures that the request body closes itself when no longer needed.
pub struct HttpRequestBodyAutocloser(ResourceId, Rc<RefCell<OpState>>);
impl HttpRequestBodyAutocloser {
pub fn new(res: ResourceId, op_state: Rc<RefCell<OpState>>) -> Self {
Self(res, op_state)
}
}
impl Drop for HttpRequestBodyAutocloser {
fn drop(&mut self) {
if let Ok(res) = self.1.borrow_mut().resource_table.take_any(self.0) {
res.close();
}
}
}
#[allow(clippy::collapsible_if)] // for logic clarity
fn validate_request(req: &Request) -> bool {
if req.uri() == "*" {
if req.method() != http::Method::OPTIONS {
return false;
}
} else if req.uri().path().is_empty() {
if req.method() != http::Method::CONNECT {
return false;
}
}
if req.method() == http::Method::CONNECT && req.uri().authority().is_none() {
return false;
}
true
}
pub(crate) async fn handle_request(
request: Request,
request_info: HttpConnectionProperties,
server_state: SignallingRc<HttpServerState>, // Keep server alive for duration of this future.
tx: tokio::sync::mpsc::Sender<Rc<HttpRecord>>,
legacy_abort: bool,
) -> Result<Response, hyper_v014::Error> {
if !validate_request(&request) {
let mut response = Response::new(HttpRecordResponse(None));
*response.version_mut() = request.version();
*response.status_mut() = http::StatusCode::BAD_REQUEST;
return Ok(response);
}
let otel_info = if let Some(otel) = deno_telemetry::OTEL_GLOBALS
.get()
.filter(|o| o.has_metrics())
{
let instant = std::time::Instant::now();
let size_hint = request.size_hint();
Some(OtelInfo::new(
otel,
instant,
size_hint.upper().unwrap_or(size_hint.lower()),
OtelInfoAttributes {
http_request_method: OtelInfoAttributes::method(request.method()),
url_scheme: request
.uri()
.scheme_str()
.map(|s| Cow::Owned(s.to_string()))
.unwrap_or_else(|| Cow::Borrowed("http")),
network_protocol_version: OtelInfoAttributes::version(
request.version(),
),
server_address: request.uri().host().map(|host| host.to_string()),
server_port: request.uri().port_u16().map(|port| port as i64),
error_type: Default::default(),
http_response_status_code: Default::default(),
},
))
} else {
None
};
// If the underlying TCP connection is closed, this future will be dropped
// and execution could stop at any await point.
// The HttpRecord must live until JavaScript is done processing so is wrapped
// in an Rc. The guard ensures unneeded resources are freed at cancellation.
let guarded_record = guard(
HttpRecord::new(
request,
request_info,
server_state,
otel_info,
legacy_abort,
),
HttpRecord::cancel,
);
// Clone HttpRecord and send to JavaScript for processing.
// Safe to unwrap as channel receiver is never closed.
tx.send(guarded_record.clone()).await.unwrap();
// Wait for JavaScript handler to return request.
http_trace!(*guarded_record, "handle_request response_ready.await");
guarded_record.response_ready().await;
// Defuse the guard. Must not await after this point.
let record = ScopeGuard::into_inner(guarded_record);
http_trace!(record, "handle_request complete");
let response = record.into_response();
Ok(response)
}
#[derive(Debug, thiserror::Error)]
#[error("upgrade unavailable")]
pub struct UpgradeUnavailableError;
struct HttpRecordInner {
server_state: SignallingRc<HttpServerState>,
closed_channel: Option<oneshot::Sender<()>>,
request_info: HttpConnectionProperties,
request_parts: http::request::Parts,
request_body: Option<RequestBodyState>,
response_parts: Option<http::response::Parts>,
response_ready: bool,
response_waker: Option<Waker>,
response_body: ResponseBytesInner,
response_body_finished: bool,
response_body_waker: Option<Waker>,
trailers: Option<HeaderMap>,
been_dropped: bool,
finished: bool,
needs_close_after_finish: bool,
legacy_abort: bool,
otel_info: Option<OtelInfo>,
client_addr: Option<http::HeaderValue>,
}
pub struct HttpRecord(RefCell<Option<HttpRecordInner>>);
#[cfg(feature = "__http_tracing")]
impl Drop for HttpRecord {
fn drop(&mut self) {
RECORD_COUNT
.fetch_sub(1, std::sync::atomic::Ordering::SeqCst)
.checked_sub(1)
.expect("Count went below zero");
http_general_trace!("HttpRecord::drop");
}
}
fn trust_proxy_headers() -> bool {
static TRUST_PROXY_HEADERS: OnceLock<bool> = OnceLock::new();
static VAR_NAME: &str = "DENO_TRUST_PROXY_HEADERS";
*TRUST_PROXY_HEADERS.get_or_init(|| {
if let Some(v) = std::env::var_os(VAR_NAME) {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
std::env::remove_var(VAR_NAME)
};
v == "1"
} else {
false
}
})
}
impl HttpRecord {
fn new(
request: Request,
request_info: HttpConnectionProperties,
server_state: SignallingRc<HttpServerState>,
otel_info: Option<OtelInfo>,
legacy_abort: bool,
) -> Rc<Self> {
let (mut request_parts, request_body) = request.into_parts();
let client_addr = if trust_proxy_headers() {
request_parts.headers.remove("x-deno-client-address")
} else {
None
};
let request_body = Some(request_body.into());
let (mut response_parts, _) = http::Response::new(()).into_parts();
let record = match server_state.borrow_mut().pool.pop() {
Some((record, headers)) => {
response_parts.headers = headers;
http_trace!(record, "HttpRecord::reuse");
record
}
_ => {
#[cfg(feature = "__http_tracing")]
{
RECORD_COUNT.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
}
#[allow(clippy::let_and_return)]
let record = Rc::new(Self(RefCell::new(None)));
http_trace!(record, "HttpRecord::new");
record
}
};
*record.0.borrow_mut() = Some(HttpRecordInner {
server_state,
request_info,
request_parts,
request_body,
response_parts: Some(response_parts),
response_ready: false,
response_waker: None,
response_body: ResponseBytesInner::Empty,
response_body_finished: false,
response_body_waker: None,
trailers: None,
closed_channel: None,
been_dropped: false,
finished: false,
legacy_abort,
needs_close_after_finish: false,
otel_info,
client_addr,
});
record
}
fn finish(self: Rc<Self>) {
http_trace!(self, "HttpRecord::finish");
let mut inner = self.self_mut();
inner.response_body_finished = true;
let response_body_waker = inner.response_body_waker.take();
let needs_close_after_finish = inner.needs_close_after_finish;
drop(inner);
if let Some(waker) = response_body_waker {
waker.wake();
}
if !needs_close_after_finish {
self.recycle();
}
}
pub fn close_after_finish(self: Rc<Self>) {
debug_assert!(self.self_ref().needs_close_after_finish);
let mut inner = self.self_mut();
inner.needs_close_after_finish = false;
if !inner.finished {
drop(inner);
self.recycle();
}
}
pub fn needs_close_after_finish(&self) -> RefMut<'_, bool> {
RefMut::map(self.self_mut(), |inner| &mut inner.needs_close_after_finish)
}
pub fn on_cancel(&self, sender: oneshot::Sender<()>) {
self.self_mut().closed_channel = Some(sender);
}
fn recycle(self: Rc<Self>) {
assert!(
Rc::strong_count(&self) == 1,
"HTTP state error: Expected to be last strong reference"
);
let HttpRecordInner {
server_state,
request_parts: Parts { mut headers, .. },
..
} = self.0.borrow_mut().take().unwrap();
let inflight = server_state.strong_count();
http_trace!(self, "HttpRecord::recycle inflight={}", inflight);
// Keep a buffer of allocations on hand to be reused by incoming requests.
// Estimated target size is 16 + 1/8 the number of inflight requests.
let target = 16 + (inflight >> 3);
let pool = &mut server_state.borrow_mut().pool;
if target > pool.len() {
headers.clear();
pool.push((self, headers));
} else if target < pool.len() - 8 {
pool.truncate(target);
}
}
fn self_ref(&self) -> Ref<'_, HttpRecordInner> {
Ref::map(self.0.borrow(), |option| option.as_ref().unwrap())
}
fn self_mut(&self) -> RefMut<'_, HttpRecordInner> {
RefMut::map(self.0.borrow_mut(), |option| option.as_mut().unwrap())
}
/// Perform the Hyper upgrade on this record.
pub fn upgrade(&self) -> Result<OnUpgrade, UpgradeUnavailableError> {
// Manually perform the upgrade. We're peeking into hyper's underlying machinery here a bit
self
.self_mut()
.request_parts
.extensions
.remove::<OnUpgrade>()
.ok_or(UpgradeUnavailableError)
}
/// Take the Hyper body from this record.
pub fn take_request_body(&self) -> Option<Incoming> {
let body_holder = &mut self.self_mut().request_body;
let body = body_holder.take();
match body {
Some(RequestBodyState::Incoming(body)) => Some(body),
x => {
*body_holder = x;
None
}
}
}
/// Replace the request body with a resource ID and the OpState we'll need to shut it down.
/// We cannot keep just the resource itself, as JS code might be reading from the resource ID
/// to generate the response data (requiring us to keep it in the resource table).
pub fn put_resource(&self, res: HttpRequestBodyAutocloser) {
self.self_mut().request_body = Some(RequestBodyState::Resource(res));
}
/// Cleanup resources not needed after the future is dropped.
fn cancel(self: Rc<Self>) {
http_trace!(self, "HttpRecord::cancel");
let mut inner = self.self_mut();
if inner.response_ready {
// Future dropped between wake() and async fn resuming.
drop(inner);
self.finish();
return;
}
inner.been_dropped = true;
// The request body might include actual resources.
inner.request_body.take();
if (inner.legacy_abort || !inner.response_body_finished)
&& let Some(closed_channel) = inner.closed_channel.take()
{
let _ = closed_channel.send(());
}
}
/// Complete this record, potentially expunging it if it is fully complete (ie: cancelled as well).
pub fn complete(self: Rc<Self>) {
http_trace!(self, "HttpRecord::complete");
let mut inner = self.self_mut();
assert!(
!inner.response_ready,
"HTTP state error: Entry has already been completed"
);
if inner.been_dropped {
drop(inner);
self.finish();
return;
}
inner.response_ready = true;
if let Some(waker) = inner.response_waker.take() {
drop(inner);
waker.wake();
}
}
fn take_response_body(&self) -> ResponseBytesInner {
let mut inner = self.self_mut();
debug_assert!(
!matches!(inner.response_body, ResponseBytesInner::Done),
"HTTP state error: response body already complete"
);
std::mem::replace(&mut inner.response_body, ResponseBytesInner::Done)
}
/// Has the future for this record been dropped? ie, has the underlying TCP connection
/// been closed?
pub fn cancelled(&self) -> bool {
self.self_ref().been_dropped
}
/// Get a mutable reference to the response status and headers.
pub fn response_parts(&self) -> RefMut<'_, http::response::Parts> {
RefMut::map(self.self_mut(), |inner| {
inner.response_parts.as_mut().unwrap()
})
}
/// Get a mutable reference to the trailers.
pub fn trailers(&self) -> RefMut<'_, Option<HeaderMap>> {
RefMut::map(self.self_mut(), |inner| &mut inner.trailers)
}
pub fn set_response_body(&self, response_body: ResponseBytesInner) {
let mut inner = self.self_mut();
debug_assert!(matches!(inner.response_body, ResponseBytesInner::Empty));
inner.response_body = response_body;
}
/// Take the response.
fn into_response(self: Rc<Self>) -> Response {
let parts = self.self_mut().response_parts.take().unwrap();
let body = HttpRecordResponse(Some(ManuallyDrop::new(self)));
Response::from_parts(parts, body)
}
/// Get a reference to the connection properties.
pub fn request_info(&self) -> Ref<'_, HttpConnectionProperties> {
Ref::map(self.self_ref(), |inner| &inner.request_info)
}
/// Get a reference to the request parts.
pub fn request_parts(&self) -> Ref<'_, Parts> {
Ref::map(self.self_ref(), |inner| &inner.request_parts)
}
/// Resolves when response head is ready.
fn response_ready(&self) -> impl Future<Output = ()> + '_ {
struct HttpRecordReady<'a>(&'a HttpRecord);
impl Future for HttpRecordReady<'_> {
type Output = ();
fn poll(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Self::Output> {
let mut mut_self = self.0.self_mut();
if mut_self.response_ready {
mut_self.otel_info.take();
return Poll::Ready(());
}
mut_self.response_waker = Some(cx.waker().clone());
Poll::Pending
}
}
HttpRecordReady(self)
}
/// Resolves when response body has finished streaming. Returns true if the
/// response completed.
pub fn response_body_finished(&self) -> impl Future<Output = bool> + '_ {
struct HttpRecordFinished<'a>(&'a HttpRecord);
impl Future for HttpRecordFinished<'_> {
type Output = bool;
fn poll(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Self::Output> {
let mut mut_self = self.0.self_mut();
if mut_self.response_body_finished {
// If we sent the response body and the trailers, this body completed successfully
return Poll::Ready(
mut_self.response_body.is_complete() && mut_self.trailers.is_none(),
);
}
mut_self.response_body_waker = Some(cx.waker().clone());
Poll::Pending
}
}
HttpRecordFinished(self)
}
pub fn otel_info_set_status(&self, status: u16) {
let mut inner = self.self_mut();
if let Some(info) = inner.otel_info.as_mut() {
info.attributes.http_response_status_code = Some(status as _);
info.handle_duration_and_request_size();
}
}
pub fn otel_info_set_error(&self, error: &'static str) {
let mut inner = self.self_mut();
if let Some(info) = inner.otel_info.as_mut() {
info.attributes.error_type = Some(error);
info.handle_duration_and_request_size();
}
}
pub fn client_addr(&self) -> Ref<'_, Option<http::HeaderValue>> {
Ref::map(self.self_ref(), |inner| &inner.client_addr)
}
}
// `None` variant used when no body is present, for example
// when we want to return a synthetic 400 for invalid requests.
#[repr(transparent)]
pub struct HttpRecordResponse(Option<ManuallyDrop<Rc<HttpRecord>>>);
impl Body for HttpRecordResponse {
type Data = BufView;
type Error = JsErrorBox;
fn poll_frame(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
use crate::response_body::PollFrame;
let Some(record) = &self.0 else {
return Poll::Ready(None);
};
let res = loop {
let mut inner = record.self_mut();
let res = match &mut inner.response_body {
ResponseBytesInner::Done | ResponseBytesInner::Empty => {
if let Some(trailers) = inner.trailers.take() {
return Poll::Ready(Some(Ok(Frame::trailers(trailers))));
}
unreachable!()
}
ResponseBytesInner::Bytes(..) => {
drop(inner);
let ResponseBytesInner::Bytes(data) = record.take_response_body()
else {
unreachable!();
};
return Poll::Ready(Some(Ok(Frame::data(data))));
}
ResponseBytesInner::UncompressedStream(stm) => {
ready!(Pin::new(stm).poll_frame(cx))
}
ResponseBytesInner::GZipStream(stm) => {
ready!(Pin::new(stm.as_mut()).poll_frame(cx))
}
ResponseBytesInner::BrotliStream(stm) => {
ready!(Pin::new(stm.as_mut()).poll_frame(cx))
}
};
// This is where we retry the NoData response
if matches!(res, ResponseStreamResult::NoData) {
continue;
}
break res;
};
if matches!(res, ResponseStreamResult::EndOfStream) {
if let Some(trailers) = record.self_mut().trailers.take() {
return Poll::Ready(Some(Ok(Frame::trailers(trailers))));
}
record.take_response_body();
}
if let ResponseStreamResult::NonEmptyBuf(buf) = &res {
let mut http = record.0.borrow_mut();
if let Some(otel_info) = &mut http.as_mut().unwrap().otel_info
&& let Some(response_size) = &mut otel_info.response_size
{
*response_size += buf.len() as u64;
}
}
Poll::Ready(res.into())
}
fn is_end_stream(&self) -> bool {
let Some(record) = &self.0 else {
return true;
};
let inner = record.self_ref();
matches!(
inner.response_body,
ResponseBytesInner::Done | ResponseBytesInner::Empty
) && inner.trailers.is_none()
}
fn size_hint(&self) -> SizeHint {
let Some(record) = &self.0 else {
return SizeHint::with_exact(0);
};
// The size hint currently only used in the case where it is exact bounds in hyper, but we'll pass it through
// anyways just in case hyper needs it.
record.self_ref().response_body.size_hint()
}
}
impl Drop for HttpRecordResponse {
fn drop(&mut self) {
let Some(record) = &mut self.0 else {
return;
};
// SAFETY: this ManuallyDrop is not used again.
let record = unsafe { ManuallyDrop::take(record) };
http_trace!(record, "HttpRecordResponse::drop");
record.finish();
}
}
#[cfg(test)]
mod tests {
use std::error::Error as StdError;
use bytes::Buf;
use deno_net::raw::NetworkStreamType;
use hyper::body::Body;
use hyper::service::HttpService;
use hyper::service::service_fn;
use hyper_util::rt::TokioIo;
use super::*;
use crate::response_body::Compression;
use crate::response_body::ResponseBytesInner;
/// Execute client request on service and concurrently map the response.
async fn serve_request<B, S, T, F>(
req: http::Request<B>,
service: S,
map_response: impl FnOnce(hyper::Response<Incoming>) -> F,
) -> hyper::Result<T>
where
B: Body + Send + 'static, // Send bound due to DuplexStream
B::Data: Send,
B::Error: Into<Box<dyn StdError + Send + Sync>>,
S: HttpService<Incoming>,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::ResBody: 'static,
<S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
F: std::future::Future<Output = hyper::Result<T>>,
{
use hyper::client::conn::http1::handshake;
use hyper::server::conn::http1::Builder;
let (stream_client, stream_server) = tokio::io::duplex(16 * 1024);
let conn_server =
Builder::new().serve_connection(TokioIo::new(stream_server), service);
let (mut sender, conn_client) =
handshake(TokioIo::new(stream_client)).await?;
let (res, _, _) = tokio::try_join!(
async move {
let res = sender.send_request(req).await?;
map_response(res).await
},
conn_server,
conn_client,
)?;
Ok(res)
}
#[tokio::test]
async fn test_handle_request() -> Result<(), deno_core::error::AnyError> {
let (tx, mut rx) = tokio::sync::mpsc::channel(10);
let server_state = HttpServerState::new();
let server_state_check = server_state.clone();
let request_info = HttpConnectionProperties {
peer_address: "".into(),
peer_port: None,
local_port: None,
stream_type: NetworkStreamType::Tcp,
};
let svc = service_fn(move |req: hyper::Request<Incoming>| {
handle_request(
req,
request_info.clone(),
server_state.clone(),
tx.clone(),
true,
)
});
let client_req = http::Request::builder().uri("/").body("".to_string())?;
// Response produced by concurrent tasks
tokio::try_join!(
async move {
// JavaScript handler produces response
let record = rx.recv().await.unwrap();
record.set_response_body(ResponseBytesInner::from_vec(
Compression::None,
b"hello world".to_vec(),
));
record.complete();
Ok(())
},
// Server connection executes service
async move {
serve_request(client_req, svc, |res| async {
// Client reads the response
use http_body_util::BodyExt;
assert_eq!(res.status(), 200);
let body = res.collect().await?.to_bytes();
assert_eq!(body.chunk(), b"hello world");
Ok(())
})
.await
},
)?;
assert_eq!(server_state_check.strong_count(), 1);
Ok(())
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/compressible.rs | ext/http/compressible.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::str::FromStr;
use phf::phf_set;
// Data obtained from https://github.com/jshttp/mime-db/blob/fa5e4ef3cc8907ec3c5ec5b85af0c63d7059a5cd/db.json
// Important! Keep this list sorted alphabetically.
static CONTENT_TYPES: phf::Set<&'static [u8]> = phf_set! {
b"application/3gpdash-qoe-report+xml",
b"application/3gpp-ims+xml",
b"application/3gpphal+json",
b"application/3gpphalforms+json",
b"application/activity+json",
b"application/alto-costmap+json",
b"application/alto-costmapfilter+json",
b"application/alto-directory+json",
b"application/alto-endpointcost+json",
b"application/alto-endpointcostparams+json",
b"application/alto-endpointprop+json",
b"application/alto-endpointpropparams+json",
b"application/alto-error+json",
b"application/alto-networkmap+json",
b"application/alto-networkmapfilter+json",
b"application/alto-updatestreamcontrol+json",
b"application/alto-updatestreamparams+json",
b"application/atom+xml",
b"application/atomcat+xml",
b"application/atomdeleted+xml",
b"application/atomsvc+xml",
b"application/atsc-dwd+xml",
b"application/atsc-held+xml",
b"application/atsc-rdt+json",
b"application/atsc-rsat+xml",
b"application/auth-policy+xml",
b"application/beep+xml",
b"application/calendar+json",
b"application/calendar+xml",
b"application/captive+json",
b"application/ccmp+xml",
b"application/ccxml+xml",
b"application/cdfx+xml",
b"application/cea-2018+xml",
b"application/cellml+xml",
b"application/clue+xml",
b"application/clue_info+xml",
b"application/cnrp+xml",
b"application/coap-group+json",
b"application/conference-info+xml",
b"application/cpl+xml",
b"application/csta+xml",
b"application/cstadata+xml",
b"application/csvm+json",
b"application/dart",
b"application/dash+xml",
b"application/davmount+xml",
b"application/dialog-info+xml",
b"application/dicom+json",
b"application/dicom+xml",
b"application/dns+json",
b"application/docbook+xml",
b"application/dskpp+xml",
b"application/dssc+xml",
b"application/ecmascript",
b"application/elm+json",
b"application/elm+xml",
b"application/emergencycalldata.cap+xml",
b"application/emergencycalldata.comment+xml",
b"application/emergencycalldata.control+xml",
b"application/emergencycalldata.deviceinfo+xml",
b"application/emergencycalldata.providerinfo+xml",
b"application/emergencycalldata.serviceinfo+xml",
b"application/emergencycalldata.subscriberinfo+xml",
b"application/emergencycalldata.veds+xml",
b"application/emma+xml",
b"application/emotionml+xml",
b"application/epp+xml",
b"application/expect-ct-report+json",
b"application/fdt+xml",
b"application/fhir+json",
b"application/fhir+xml",
b"application/fido.trusted-apps+json",
b"application/framework-attributes+xml",
b"application/geo+json",
b"application/geoxacml+xml",
b"application/gml+xml",
b"application/gpx+xml",
b"application/held+xml",
b"application/ibe-key-request+xml",
b"application/ibe-pkg-reply+xml",
b"application/im-iscomposing+xml",
b"application/inkml+xml",
b"application/its+xml",
b"application/javascript",
b"application/jf2feed+json",
b"application/jose+json",
b"application/jrd+json",
b"application/jscalendar+json",
b"application/json",
b"application/json-patch+json",
b"application/jsonml+json",
b"application/jwk+json",
b"application/jwk-set+json",
b"application/kpml-request+xml",
b"application/kpml-response+xml",
b"application/ld+json",
b"application/lgr+xml",
b"application/load-control+xml",
b"application/lost+xml",
b"application/lostsync+xml",
b"application/mads+xml",
b"application/manifest+json",
b"application/marcxml+xml",
b"application/mathml+xml",
b"application/mathml-content+xml",
b"application/mathml-presentation+xml",
b"application/mbms-associated-procedure-description+xml",
b"application/mbms-deregister+xml",
b"application/mbms-envelope+xml",
b"application/mbms-msk+xml",
b"application/mbms-msk-response+xml",
b"application/mbms-protection-description+xml",
b"application/mbms-reception-report+xml",
b"application/mbms-register+xml",
b"application/mbms-register-response+xml",
b"application/mbms-schedule+xml",
b"application/mbms-user-service-description+xml",
b"application/media-policy-dataset+xml",
b"application/media_control+xml",
b"application/mediaservercontrol+xml",
b"application/merge-patch+json",
b"application/metalink+xml",
b"application/metalink4+xml",
b"application/mets+xml",
b"application/mmt-aei+xml",
b"application/mmt-usd+xml",
b"application/mods+xml",
b"application/mrb-consumer+xml",
b"application/mrb-publish+xml",
b"application/msc-ivr+xml",
b"application/msc-mixer+xml",
b"application/mud+json",
b"application/nlsml+xml",
b"application/odm+xml",
b"application/oebps-package+xml",
b"application/omdoc+xml",
b"application/opc-nodeset+xml",
b"application/p2p-overlay+xml",
b"application/patch-ops-error+xml",
b"application/pidf+xml",
b"application/pidf-diff+xml",
b"application/pls+xml",
b"application/poc-settings+xml",
b"application/postscript",
b"application/ppsp-tracker+json",
b"application/problem+json",
b"application/problem+xml",
b"application/provenance+xml",
b"application/prs.xsf+xml",
b"application/pskc+xml",
b"application/pvd+json",
b"application/raml+yaml",
b"application/rdap+json",
b"application/rdf+xml",
b"application/reginfo+xml",
b"application/reputon+json",
b"application/resource-lists+xml",
b"application/resource-lists-diff+xml",
b"application/rfc+xml",
b"application/rlmi+xml",
b"application/rls-services+xml",
b"application/route-apd+xml",
b"application/route-s-tsid+xml",
b"application/route-usd+xml",
b"application/rsd+xml",
b"application/rss+xml",
b"application/rtf",
b"application/samlassertion+xml",
b"application/samlmetadata+xml",
b"application/sarif+json",
b"application/sarif-external-properties+json",
b"application/sbml+xml",
b"application/scaip+xml",
b"application/scim+json",
b"application/senml+json",
b"application/senml+xml",
b"application/senml-etch+json",
b"application/sensml+json",
b"application/sensml+xml",
b"application/sep+xml",
b"application/shf+xml",
b"application/simple-filter+xml",
b"application/smil+xml",
b"application/soap+xml",
b"application/sparql-results+xml",
b"application/spirits-event+xml",
b"application/srgs+xml",
b"application/sru+xml",
b"application/ssdl+xml",
b"application/ssml+xml",
b"application/stix+json",
b"application/swid+xml",
b"application/tar",
b"application/taxii+json",
b"application/td+json",
b"application/tei+xml",
b"application/thraud+xml",
b"application/tlsrpt+json",
b"application/toml",
b"application/ttml+xml",
b"application/urc-grpsheet+xml",
b"application/urc-ressheet+xml",
b"application/urc-targetdesc+xml",
b"application/urc-uisocketdesc+xml",
b"application/vcard+json",
b"application/vcard+xml",
b"application/vnd.1000minds.decision-model+xml",
b"application/vnd.3gpp-prose+xml",
b"application/vnd.3gpp-prose-pc3ch+xml",
b"application/vnd.3gpp.access-transfer-events+xml",
b"application/vnd.3gpp.bsf+xml",
b"application/vnd.3gpp.gmop+xml",
b"application/vnd.3gpp.mcdata-affiliation-command+xml",
b"application/vnd.3gpp.mcdata-info+xml",
b"application/vnd.3gpp.mcdata-service-config+xml",
b"application/vnd.3gpp.mcdata-ue-config+xml",
b"application/vnd.3gpp.mcdata-user-profile+xml",
b"application/vnd.3gpp.mcptt-affiliation-command+xml",
b"application/vnd.3gpp.mcptt-floor-request+xml",
b"application/vnd.3gpp.mcptt-info+xml",
b"application/vnd.3gpp.mcptt-location-info+xml",
b"application/vnd.3gpp.mcptt-mbms-usage-info+xml",
b"application/vnd.3gpp.mcptt-service-config+xml",
b"application/vnd.3gpp.mcptt-signed+xml",
b"application/vnd.3gpp.mcptt-ue-config+xml",
b"application/vnd.3gpp.mcptt-ue-init-config+xml",
b"application/vnd.3gpp.mcptt-user-profile+xml",
b"application/vnd.3gpp.mcvideo-affiliation-command+xml",
b"application/vnd.3gpp.mcvideo-affiliation-info+xml",
b"application/vnd.3gpp.mcvideo-info+xml",
b"application/vnd.3gpp.mcvideo-location-info+xml",
b"application/vnd.3gpp.mcvideo-mbms-usage-info+xml",
b"application/vnd.3gpp.mcvideo-service-config+xml",
b"application/vnd.3gpp.mcvideo-transmission-request+xml",
b"application/vnd.3gpp.mcvideo-ue-config+xml",
b"application/vnd.3gpp.mcvideo-user-profile+xml",
b"application/vnd.3gpp.mid-call+xml",
b"application/vnd.3gpp.sms+xml",
b"application/vnd.3gpp.srvcc-ext+xml",
b"application/vnd.3gpp.srvcc-info+xml",
b"application/vnd.3gpp.state-and-event-info+xml",
b"application/vnd.3gpp.ussd+xml",
b"application/vnd.3gpp2.bcmcsinfo+xml",
b"application/vnd.adobe.xdp+xml",
b"application/vnd.amadeus+json",
b"application/vnd.amundsen.maze+xml",
b"application/vnd.api+json",
b"application/vnd.aplextor.warrp+json",
b"application/vnd.apothekende.reservation+json",
b"application/vnd.apple.installer+xml",
b"application/vnd.artisan+json",
b"application/vnd.avalon+json",
b"application/vnd.avistar+xml",
b"application/vnd.balsamiq.bmml+xml",
b"application/vnd.bbf.usp.msg+json",
b"application/vnd.bekitzur-stech+json",
b"application/vnd.biopax.rdf+xml",
b"application/vnd.byu.uapi+json",
b"application/vnd.capasystems-pg+json",
b"application/vnd.chemdraw+xml",
b"application/vnd.citationstyles.style+xml",
b"application/vnd.collection+json",
b"application/vnd.collection.doc+json",
b"application/vnd.collection.next+json",
b"application/vnd.coreos.ignition+json",
b"application/vnd.criticaltools.wbs+xml",
b"application/vnd.cryptii.pipe+json",
b"application/vnd.ctct.ws+xml",
b"application/vnd.cyan.dean.root+xml",
b"application/vnd.cyclonedx+json",
b"application/vnd.cyclonedx+xml",
b"application/vnd.dart",
b"application/vnd.datapackage+json",
b"application/vnd.dataresource+json",
b"application/vnd.dece.ttml+xml",
b"application/vnd.dm.delegation+xml",
b"application/vnd.document+json",
b"application/vnd.drive+json",
b"application/vnd.dvb.dvbisl+xml",
b"application/vnd.dvb.notif-aggregate-root+xml",
b"application/vnd.dvb.notif-container+xml",
b"application/vnd.dvb.notif-generic+xml",
b"application/vnd.dvb.notif-ia-msglist+xml",
b"application/vnd.dvb.notif-ia-registration-request+xml",
b"application/vnd.dvb.notif-ia-registration-response+xml",
b"application/vnd.dvb.notif-init+xml",
b"application/vnd.emclient.accessrequest+xml",
b"application/vnd.eprints.data+xml",
b"application/vnd.eszigno3+xml",
b"application/vnd.etsi.aoc+xml",
b"application/vnd.etsi.cug+xml",
b"application/vnd.etsi.iptvcommand+xml",
b"application/vnd.etsi.iptvdiscovery+xml",
b"application/vnd.etsi.iptvprofile+xml",
b"application/vnd.etsi.iptvsad-bc+xml",
b"application/vnd.etsi.iptvsad-cod+xml",
b"application/vnd.etsi.iptvsad-npvr+xml",
b"application/vnd.etsi.iptvservice+xml",
b"application/vnd.etsi.iptvsync+xml",
b"application/vnd.etsi.iptvueprofile+xml",
b"application/vnd.etsi.mcid+xml",
b"application/vnd.etsi.overload-control-policy-dataset+xml",
b"application/vnd.etsi.pstn+xml",
b"application/vnd.etsi.sci+xml",
b"application/vnd.etsi.simservs+xml",
b"application/vnd.etsi.tsl+xml",
b"application/vnd.fujifilm.fb.jfi+xml",
b"application/vnd.futoin+json",
b"application/vnd.gentics.grd+json",
b"application/vnd.geo+json",
b"application/vnd.geocube+xml",
b"application/vnd.google-earth.kml+xml",
b"application/vnd.gov.sk.e-form+xml",
b"application/vnd.gov.sk.xmldatacontainer+xml",
b"application/vnd.hal+json",
b"application/vnd.hal+xml",
b"application/vnd.handheld-entertainment+xml",
b"application/vnd.hc+json",
b"application/vnd.heroku+json",
b"application/vnd.hyper+json",
b"application/vnd.hyper-item+json",
b"application/vnd.hyperdrive+json",
b"application/vnd.ims.lis.v2.result+json",
b"application/vnd.ims.lti.v2.toolconsumerprofile+json",
b"application/vnd.ims.lti.v2.toolproxy+json",
b"application/vnd.ims.lti.v2.toolproxy.id+json",
b"application/vnd.ims.lti.v2.toolsettings+json",
b"application/vnd.ims.lti.v2.toolsettings.simple+json",
b"application/vnd.informedcontrol.rms+xml",
b"application/vnd.infotech.project+xml",
b"application/vnd.iptc.g2.catalogitem+xml",
b"application/vnd.iptc.g2.conceptitem+xml",
b"application/vnd.iptc.g2.knowledgeitem+xml",
b"application/vnd.iptc.g2.newsitem+xml",
b"application/vnd.iptc.g2.newsmessage+xml",
b"application/vnd.iptc.g2.packageitem+xml",
b"application/vnd.iptc.g2.planningitem+xml",
b"application/vnd.irepository.package+xml",
b"application/vnd.las.las+json",
b"application/vnd.las.las+xml",
b"application/vnd.leap+json",
b"application/vnd.liberty-request+xml",
b"application/vnd.llamagraphics.life-balance.exchange+xml",
b"application/vnd.marlin.drm.actiontoken+xml",
b"application/vnd.marlin.drm.conftoken+xml",
b"application/vnd.marlin.drm.license+xml",
b"application/vnd.mason+json",
b"application/vnd.micro+json",
b"application/vnd.miele+json",
b"application/vnd.mozilla.xul+xml",
b"application/vnd.ms-fontobject",
b"application/vnd.ms-office.activex+xml",
b"application/vnd.ms-opentype",
b"application/vnd.ms-playready.initiator+xml",
b"application/vnd.ms-printdevicecapabilities+xml",
b"application/vnd.ms-printing.printticket+xml",
b"application/vnd.ms-printschematicket+xml",
b"application/vnd.nearst.inv+json",
b"application/vnd.nokia.conml+xml",
b"application/vnd.nokia.iptv.config+xml",
b"application/vnd.nokia.landmark+xml",
b"application/vnd.nokia.landmarkcollection+xml",
b"application/vnd.nokia.n-gage.ac+xml",
b"application/vnd.nokia.pcd+xml",
b"application/vnd.oci.image.manifest.v1+json",
b"application/vnd.oftn.l10n+json",
b"application/vnd.oipf.contentaccessdownload+xml",
b"application/vnd.oipf.contentaccessstreaming+xml",
b"application/vnd.oipf.dae.svg+xml",
b"application/vnd.oipf.dae.xhtml+xml",
b"application/vnd.oipf.mippvcontrolmessage+xml",
b"application/vnd.oipf.spdiscovery+xml",
b"application/vnd.oipf.spdlist+xml",
b"application/vnd.oipf.ueprofile+xml",
b"application/vnd.oipf.userprofile+xml",
b"application/vnd.oma.bcast.associated-procedure-parameter+xml",
b"application/vnd.oma.bcast.drm-trigger+xml",
b"application/vnd.oma.bcast.imd+xml",
b"application/vnd.oma.bcast.notification+xml",
b"application/vnd.oma.bcast.sgdd+xml",
b"application/vnd.oma.bcast.smartcard-trigger+xml",
b"application/vnd.oma.bcast.sprov+xml",
b"application/vnd.oma.cab-address-book+xml",
b"application/vnd.oma.cab-feature-handler+xml",
b"application/vnd.oma.cab-pcc+xml",
b"application/vnd.oma.cab-subs-invite+xml",
b"application/vnd.oma.cab-user-prefs+xml",
b"application/vnd.oma.dd2+xml",
b"application/vnd.oma.drm.risd+xml",
b"application/vnd.oma.group-usage-list+xml",
b"application/vnd.oma.lwm2m+json",
b"application/vnd.oma.pal+xml",
b"application/vnd.oma.poc.detailed-progress-report+xml",
b"application/vnd.oma.poc.final-report+xml",
b"application/vnd.oma.poc.groups+xml",
b"application/vnd.oma.poc.invocation-descriptor+xml",
b"application/vnd.oma.poc.optimized-progress-report+xml",
b"application/vnd.oma.scidm.messages+xml",
b"application/vnd.oma.xcap-directory+xml",
b"application/vnd.omads-email+xml",
b"application/vnd.omads-file+xml",
b"application/vnd.omads-folder+xml",
b"application/vnd.openblox.game+xml",
b"application/vnd.openstreetmap.data+xml",
b"application/vnd.openxmlformats-officedocument.custom-properties+xml",
b"application/vnd.openxmlformats-officedocument.customxmlproperties+xml",
b"application/vnd.openxmlformats-officedocument.drawing+xml",
b"application/vnd.openxmlformats-officedocument.drawingml.chart+xml",
b"application/vnd.openxmlformats-officedocument.drawingml.chartshapes+xml",
b"application/vnd.openxmlformats-officedocument.drawingml.diagramcolors+xml",
b"application/vnd.openxmlformats-officedocument.drawingml.diagramdata+xml",
b"application/vnd.openxmlformats-officedocument.drawingml.diagramlayout+xml",
b"application/vnd.openxmlformats-officedocument.drawingml.diagramstyle+xml",
b"application/vnd.openxmlformats-officedocument.extended-properties+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.commentauthors+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.comments+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.handoutmaster+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.notesmaster+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.notesslide+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.presentation.main+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.presprops+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.slide+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.slidelayout+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.slidemaster+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.slideshow.main+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.slideupdateinfo+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.tablestyles+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.tags+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.template.main+xml",
b"application/vnd.openxmlformats-officedocument.presentationml.viewprops+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.calcchain+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.chartsheet+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.comments+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.connections+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.dialogsheet+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.externallink+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcachedefinition+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.pivotcacherecords+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.pivottable+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.querytable+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.revisionheaders+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.revisionlog+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.sharedstrings+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet.main+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.sheetmetadata+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.styles+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.table+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.tablesinglecells+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.template.main+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.usernames+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.volatiledependencies+xml",
b"application/vnd.openxmlformats-officedocument.spreadsheetml.worksheet+xml",
b"application/vnd.openxmlformats-officedocument.theme+xml",
b"application/vnd.openxmlformats-officedocument.themeoverride+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.comments+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.document.glossary+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.endnotes+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.fonttable+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.footer+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.footnotes+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.numbering+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.settings+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.styles+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.template.main+xml",
b"application/vnd.openxmlformats-officedocument.wordprocessingml.websettings+xml",
b"application/vnd.openxmlformats-package.core-properties+xml",
b"application/vnd.openxmlformats-package.digital-signature-xmlsignature+xml",
b"application/vnd.openxmlformats-package.relationships+xml",
b"application/vnd.oracle.resource+json",
b"application/vnd.otps.ct-kip+xml",
b"application/vnd.pagerduty+json",
b"application/vnd.poc.group-advertisement+xml",
b"application/vnd.pwg-xhtml-print+xml",
b"application/vnd.radisys.moml+xml",
b"application/vnd.radisys.msml+xml",
b"application/vnd.radisys.msml-audit+xml",
b"application/vnd.radisys.msml-audit-conf+xml",
b"application/vnd.radisys.msml-audit-conn+xml",
b"application/vnd.radisys.msml-audit-dialog+xml",
b"application/vnd.radisys.msml-audit-stream+xml",
b"application/vnd.radisys.msml-conf+xml",
b"application/vnd.radisys.msml-dialog+xml",
b"application/vnd.radisys.msml-dialog-base+xml",
b"application/vnd.radisys.msml-dialog-fax-detect+xml",
b"application/vnd.radisys.msml-dialog-fax-sendrecv+xml",
b"application/vnd.radisys.msml-dialog-group+xml",
b"application/vnd.radisys.msml-dialog-speech+xml",
b"application/vnd.radisys.msml-dialog-transform+xml",
b"application/vnd.recordare.musicxml+xml",
b"application/vnd.restful+json",
b"application/vnd.route66.link66+xml",
b"application/vnd.seis+json",
b"application/vnd.shootproof+json",
b"application/vnd.shopkick+json",
b"application/vnd.siren+json",
b"application/vnd.software602.filler.form+xml",
b"application/vnd.solent.sdkm+xml",
b"application/vnd.sun.wadl+xml",
b"application/vnd.sycle+xml",
b"application/vnd.syncml+xml",
b"application/vnd.syncml.dm+xml",
b"application/vnd.syncml.dmddf+xml",
b"application/vnd.syncml.dmtnds+xml",
b"application/vnd.tableschema+json",
b"application/vnd.think-cell.ppttc+json",
b"application/vnd.tmd.mediaflex.api+xml",
b"application/vnd.uoml+xml",
b"application/vnd.vel+json",
b"application/vnd.wv.csp+xml",
b"application/vnd.wv.ssp+xml",
b"application/vnd.xacml+json",
b"application/vnd.xmi+xml",
b"application/vnd.yamaha.openscoreformat.osfpvg+xml",
b"application/vnd.zzazz.deck+xml",
b"application/voicexml+xml",
b"application/voucher-cms+json",
b"application/wasm",
b"application/watcherinfo+xml",
b"application/webpush-options+json",
b"application/wsdl+xml",
b"application/wspolicy+xml",
b"application/x-dtbncx+xml",
b"application/x-dtbook+xml",
b"application/x-dtbresource+xml",
b"application/x-httpd-php",
b"application/x-javascript",
b"application/x-ns-proxy-autoconfig",
b"application/x-sh",
b"application/x-tar",
b"application/x-virtualbox-hdd",
b"application/x-virtualbox-ova",
b"application/x-virtualbox-ovf",
b"application/x-virtualbox-vbox",
b"application/x-virtualbox-vdi",
b"application/x-virtualbox-vhd",
b"application/x-virtualbox-vmdk",
b"application/x-web-app-manifest+json",
b"application/x-www-form-urlencoded",
b"application/x-xliff+xml",
b"application/xacml+xml",
b"application/xaml+xml",
b"application/xcap-att+xml",
b"application/xcap-caps+xml",
b"application/xcap-diff+xml",
b"application/xcap-el+xml",
b"application/xcap-error+xml",
b"application/xcap-ns+xml",
b"application/xcon-conference-info+xml",
b"application/xcon-conference-info-diff+xml",
b"application/xenc+xml",
b"application/xhtml+xml",
b"application/xhtml-voice+xml",
b"application/xliff+xml",
b"application/xml",
b"application/xml-dtd",
b"application/xml-patch+xml",
b"application/xmpp+xml",
b"application/xop+xml",
b"application/xproc+xml",
b"application/xslt+xml",
b"application/xspf+xml",
b"application/xv+xml",
b"application/yang-data+json",
b"application/yang-data+xml",
b"application/yang-patch+json",
b"application/yang-patch+xml",
b"application/yin+xml",
b"font/otf",
b"font/ttf",
b"image/bmp",
b"image/svg+xml",
b"image/vnd.adobe.photoshop",
b"image/x-icon",
b"image/x-ms-bmp",
b"message/imdn+xml",
b"message/rfc822",
b"model/gltf+json",
b"model/gltf-binary",
b"model/vnd.collada+xml",
b"model/vnd.moml+xml",
b"model/x3d+xml",
b"text/cache-manifest",
b"text/calender",
b"text/cmd",
b"text/css",
b"text/csv",
b"text/html",
b"text/javascript",
b"text/jsx",
b"text/less",
b"text/markdown",
b"text/mdx",
b"text/n3",
b"text/plain",
b"text/richtext",
b"text/rtf",
b"text/tab-separated-values",
b"text/uri-list",
b"text/vcard",
b"text/vtt",
b"text/x-gwt-rpc",
b"text/x-jquery-tmpl",
b"text/x-markdown",
b"text/x-org",
b"text/x-processing",
b"text/x-suse-ymp",
b"text/xml",
b"text/yaml",
b"x-shader/x-fragment",
b"x-shader/x-vertex",
};
fn known_compressible(ct: &[u8]) -> bool {
CONTENT_TYPES.contains(ct)
}
fn known_mime(ct: &[u8]) -> Option<bool> {
let s = std::str::from_utf8(ct).ok()?;
let m = mime::Mime::from_str(s).ok()?;
Some(known_compressible(m.essence_str().as_bytes()))
}
/// Determine if the supplied content type is considered compressible
pub fn is_content_compressible(ct: impl AsRef<[u8]>) -> bool {
let ct = ct.as_ref();
let prefix = ct.split(|c| *c == b';').next().unwrap();
known_compressible(prefix) || known_mime(prefix).unwrap_or_default()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn non_compressible_content_type() {
assert!(!is_content_compressible("application/vnd.deno+json"));
assert!(!is_content_compressible("text/fake"));
}
#[test]
fn compressible_content_type() {
assert!(is_content_compressible("application/json"));
assert!(is_content_compressible("text/plain;charset=UTF-8"));
assert!(is_content_compressible("text/PlAIn; charset=utf-8"));
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/request_body.rs | ext/http/request_body.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::pin::Pin;
use std::rc::Rc;
use std::task::Poll;
use std::task::ready;
use bytes::Bytes;
use deno_core::AsyncRefCell;
use deno_core::AsyncResult;
use deno_core::BufView;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::futures::Stream;
use deno_core::futures::StreamExt;
use deno_core::futures::TryFutureExt;
use deno_core::futures::stream::Peekable;
use deno_error::JsErrorBox;
use hyper::body::Body;
use hyper::body::Incoming;
use hyper::body::SizeHint;
/// Converts a hyper incoming body stream into a stream of [`Bytes`] that we can use to read in V8.
struct ReadFuture(Incoming);
impl Stream for ReadFuture {
type Item = Result<Bytes, hyper::Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
// Loop until we receive a non-empty frame from Hyper
let this = self.get_mut();
loop {
let res = ready!(Pin::new(&mut this.0).poll_frame(cx));
break match res {
Some(Ok(frame)) => {
if let Ok(data) = frame.into_data() {
// Ensure that we never yield an empty frame
if !data.is_empty() {
break Poll::Ready(Some(Ok(data)));
}
}
// Loop again so we don't lose the waker
continue;
}
Some(Err(e)) => Poll::Ready(Some(Err(e))),
None => Poll::Ready(None),
};
}
}
}
pub struct HttpRequestBody(AsyncRefCell<Peekable<ReadFuture>>, SizeHint);
impl HttpRequestBody {
pub fn new(body: Incoming) -> Self {
let size_hint = body.size_hint();
Self(AsyncRefCell::new(ReadFuture(body).peekable()), size_hint)
}
async fn read(self: Rc<Self>, limit: usize) -> Result<BufView, hyper::Error> {
let peekable = RcRef::map(self, |this| &this.0);
let mut peekable = peekable.borrow_mut().await;
match Pin::new(&mut *peekable).peek_mut().await {
None => Ok(BufView::empty()),
Some(Err(_)) => Err(peekable.next().await.unwrap().err().unwrap()),
Some(Ok(bytes)) => {
if bytes.len() <= limit {
// We can safely take the next item since we peeked it
return Ok(BufView::from(peekable.next().await.unwrap()?));
}
let ret = bytes.split_to(limit);
Ok(BufView::from(ret))
}
}
}
}
impl Resource for HttpRequestBody {
fn name(&self) -> Cow<'_, str> {
"requestBody".into()
}
fn read(self: Rc<Self>, limit: usize) -> AsyncResult<BufView> {
Box::pin(
HttpRequestBody::read(self, limit)
.map_err(|e| JsErrorBox::new("Http", e.to_string())),
)
}
fn size_hint(&self) -> (u64, Option<u64>) {
(self.1.lower(), self.1.upper())
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/network_buffered_stream.rs | ext/http/network_buffered_stream.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::future::poll_fn;
use std::io;
use std::mem::MaybeUninit;
use std::pin::Pin;
use std::task::Poll;
use std::task::ready;
use bytes::Bytes;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::ReadBuf;
const MAX_PREFIX_SIZE: usize = 256;
/// [`NetworkStreamPrefixCheck`] is used to differentiate a stream between two different modes, depending
/// on whether the first bytes match a given prefix (or not).
///
/// IMPORTANT: This stream makes the assumption that the incoming bytes will never partially match the prefix
/// and then "hang" waiting for a write. For this code not to hang, the incoming stream must:
///
/// * match the prefix fully and then request writes at a later time
/// * not match the prefix, and then request writes after writing a byte that causes the prefix not to match
/// * not match the prefix and then close
pub struct NetworkStreamPrefixCheck<S: AsyncRead + Unpin> {
buffer: [MaybeUninit<u8>; MAX_PREFIX_SIZE * 2],
io: S,
prefix: &'static [u8],
}
impl<S: AsyncRead + Unpin> NetworkStreamPrefixCheck<S> {
pub fn new(io: S, prefix: &'static [u8]) -> Self {
debug_assert!(prefix.len() < MAX_PREFIX_SIZE);
Self {
io,
prefix,
buffer: [MaybeUninit::<u8>::uninit(); MAX_PREFIX_SIZE * 2],
}
}
// Returns a [`NetworkBufferedStream`] and a flag determining if we matched a prefix, rewound with the bytes we read to determine what
// type of stream this is.
pub async fn match_prefix(
self,
) -> io::Result<(bool, NetworkBufferedStream<S>)> {
let mut buffer = self.buffer;
let mut readbuf = ReadBuf::uninit(&mut buffer);
let mut io = self.io;
let prefix = self.prefix;
loop {
enum State {
Unknown,
Matched,
NotMatched,
}
let state = poll_fn(|cx| {
let filled_len = readbuf.filled().len();
let res = ready!(Pin::new(&mut io).poll_read(cx, &mut readbuf));
if let Err(e) = res {
return Poll::Ready(Err(e));
}
let filled = readbuf.filled();
let new_len = filled.len();
if new_len == filled_len {
// Empty read, no match
return Poll::Ready(Ok(State::NotMatched));
} else if new_len < prefix.len() {
// Read less than prefix, make sure we're still matching the prefix (early exit)
if !prefix.starts_with(filled) {
return Poll::Ready(Ok(State::NotMatched));
}
} else if new_len >= prefix.len() {
// We have enough to determine
if filled.starts_with(prefix) {
return Poll::Ready(Ok(State::Matched));
} else {
return Poll::Ready(Ok(State::NotMatched));
}
}
Poll::Ready(Ok(State::Unknown))
})
.await?;
match state {
State::Unknown => continue,
State::Matched => {
let initialized_len = readbuf.filled().len();
return Ok((
true,
NetworkBufferedStream::new(io, buffer, initialized_len),
));
}
State::NotMatched => {
let initialized_len = readbuf.filled().len();
return Ok((
false,
NetworkBufferedStream::new(io, buffer, initialized_len),
));
}
}
}
}
}
/// [`NetworkBufferedStream`] is a stream that allows us to efficiently search for an incoming prefix in another stream without
/// reading too much data. If the stream detects that the prefix has definitely been matched, or definitely not been matched,
/// it returns a flag and a rewound stream allowing later code to take another pass at that data.
///
/// [`NetworkBufferedStream`] is a custom wrapper around an asynchronous stream that implements AsyncRead
/// and AsyncWrite. It is designed to provide additional buffering functionality to the wrapped stream.
/// The primary use case for this struct is when you want to read a small amount of data from the beginning
/// of a stream, process it, and then continue reading the rest of the stream.
///
/// While the bounds for the class are limited to [`AsyncRead`] for easier testing, it is far more useful to use
/// with interactive duplex streams that have a prefix determining which mode to operate in. For example, this class
/// can determine whether an incoming stream is HTTP/2 or non-HTTP/2 and allow downstream code to make that determination.
pub struct NetworkBufferedStream<S: AsyncRead + Unpin> {
prefix: [MaybeUninit<u8>; MAX_PREFIX_SIZE * 2],
io: S,
initialized_len: usize,
prefix_offset: usize,
/// Have the prefix bytes been completely read out?
prefix_read: bool,
}
impl<S: AsyncRead + Unpin> NetworkBufferedStream<S> {
/// This constructor is private, because passing partially initialized data between the [`NetworkStreamPrefixCheck`] and
/// this [`NetworkBufferedStream`] is challenging without the introduction of extra copies.
fn new(
io: S,
prefix: [MaybeUninit<u8>; MAX_PREFIX_SIZE * 2],
initialized_len: usize,
) -> Self {
Self {
io,
initialized_len,
prefix_offset: 0,
prefix,
prefix_read: false,
}
}
fn current_slice(&self) -> &[u8] {
// We trust that these bytes are initialized properly
let slice = &self.prefix[self.prefix_offset..self.initialized_len];
// This guarantee comes from slice_assume_init_ref (we can't use that until it's stable)
// SAFETY: casting `slice` to a `*const [T]` is safe since the caller guarantees that
// `slice` is initialized, and `MaybeUninit` is guaranteed to have the same layout as `T`.
// The pointer obtained is valid since it refers to memory owned by `slice` which is a
// reference and thus guaranteed to be valid for reads.
unsafe { &*(slice as *const [_] as *const [u8]) as _ }
}
pub fn into_inner(self) -> (S, Bytes) {
let bytes = Bytes::copy_from_slice(self.current_slice());
(self.io, bytes)
}
}
impl<S: AsyncRead + Unpin> AsyncRead for NetworkBufferedStream<S> {
// From hyper's Rewind (https://github.com/hyperium/hyper), MIT License, Copyright (c) Sean McArthur
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
if !self.prefix_read {
let prefix = self.current_slice();
// If there are no remaining bytes, let the bytes get dropped.
if !prefix.is_empty() {
let copy_len = std::cmp::min(prefix.len(), buf.remaining());
buf.put_slice(&prefix[..copy_len]);
self.prefix_offset += copy_len;
return Poll::Ready(Ok(()));
} else {
self.prefix_read = true;
}
}
Pin::new(&mut self.io).poll_read(cx, buf)
}
}
impl<S: AsyncRead + AsyncWrite + Unpin> AsyncWrite
for NetworkBufferedStream<S>
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, std::io::Error>> {
Pin::new(&mut self.io).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.io).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
Pin::new(&mut self.io).poll_shutdown(cx)
}
fn is_write_vectored(&self) -> bool {
self.io.is_write_vectored()
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> std::task::Poll<Result<usize, std::io::Error>> {
Pin::new(&mut self.io).poll_write_vectored(cx, bufs)
}
}
#[cfg(test)]
mod tests {
use tokio::io::AsyncReadExt;
use super::*;
struct YieldsOneByteAtATime(&'static [u8]);
impl AsyncRead for YieldsOneByteAtATime {
fn poll_read(
mut self: Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if let Some((head, tail)) = self.as_mut().0.split_first() {
self.as_mut().0 = tail;
let dest = buf.initialize_unfilled_to(1);
dest[0] = *head;
buf.advance(1);
}
Poll::Ready(Ok(()))
}
}
async fn test(
io: impl AsyncRead + Unpin,
prefix: &'static [u8],
expect_match: bool,
expect_string: &'static str,
) -> io::Result<()> {
let (matches, mut io) = NetworkStreamPrefixCheck::new(io, prefix)
.match_prefix()
.await?;
assert_eq!(matches, expect_match);
let mut s = String::new();
Pin::new(&mut io).read_to_string(&mut s).await?;
assert_eq!(s, expect_string);
Ok(())
}
#[tokio::test]
async fn matches_prefix_simple() -> io::Result<()> {
let buf = b"prefix match".as_slice();
test(buf, b"prefix", true, "prefix match").await
}
#[tokio::test]
async fn matches_prefix_exact() -> io::Result<()> {
let buf = b"prefix".as_slice();
test(buf, b"prefix", true, "prefix").await
}
#[tokio::test]
async fn not_matches_prefix_simple() -> io::Result<()> {
let buf = b"prefill match".as_slice();
test(buf, b"prefix", false, "prefill match").await
}
#[tokio::test]
async fn not_matches_prefix_short() -> io::Result<()> {
let buf = b"nope".as_slice();
test(buf, b"prefix", false, "nope").await
}
#[tokio::test]
async fn not_matches_prefix_empty() -> io::Result<()> {
let buf = b"".as_slice();
test(buf, b"prefix", false, "").await
}
#[tokio::test]
async fn matches_one_byte_at_a_time() -> io::Result<()> {
let buf = YieldsOneByteAtATime(b"prefix");
test(buf, b"prefix", true, "prefix").await
}
#[tokio::test]
async fn not_matches_one_byte_at_a_time() -> io::Result<()> {
let buf = YieldsOneByteAtATime(b"prefill");
test(buf, b"prefix", false, "prefill").await
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/http/benches/compressible.rs | ext/http/benches/compressible.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use bencher::Bencher;
use bencher::benchmark_group;
use bencher::benchmark_main;
use deno_http::compressible::is_content_compressible;
fn compressible_simple_hit(b: &mut Bencher) {
b.iter(|| {
is_content_compressible("text/plain");
})
}
fn compressible_complex_hit(b: &mut Bencher) {
b.iter(|| {
is_content_compressible("text/PlAIn; charset=utf-8");
})
}
fn compressible_simple_miss(b: &mut Bencher) {
b.iter(|| {
is_content_compressible("text/fake");
})
}
fn compressible_complex_miss(b: &mut Bencher) {
b.iter(|| {
is_content_compressible("text/fake;charset=utf-8");
})
}
benchmark_group!(
benches,
compressible_simple_hit,
compressible_complex_hit,
compressible_simple_miss,
compressible_complex_miss,
);
benchmark_main!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/websocket/stream.rs | ext/websocket/stream.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::ErrorKind;
use std::pin::Pin;
use std::task::Poll;
use std::task::ready;
use bytes::Buf;
use bytes::Bytes;
use deno_net::raw::NetworkStream;
use h2::RecvStream;
use h2::SendStream;
use hyper::upgrade::Upgraded;
use hyper_util::rt::TokioIo;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::ReadBuf;
// TODO(bartlomieju): remove this
#[allow(clippy::large_enum_variant)]
pub(crate) enum WsStreamKind {
Upgraded(TokioIo<Upgraded>),
Network(NetworkStream),
H2(SendStream<Bytes>, RecvStream),
}
pub(crate) struct WebSocketStream {
stream: WsStreamKind,
pre: Option<Bytes>,
}
impl WebSocketStream {
pub fn new(stream: WsStreamKind, buffer: Option<Bytes>) -> Self {
Self {
stream,
pre: buffer,
}
}
}
impl AsyncRead for WebSocketStream {
// From hyper's Rewind (https://github.com/hyperium/hyper), MIT License, Copyright (c) Sean McArthur
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
if let Some(mut prefix) = self.pre.take() {
// If there are no remaining bytes, let the bytes get dropped.
if !prefix.is_empty() {
let copy_len = std::cmp::min(prefix.len(), buf.remaining());
// TODO: There should be a way to do following two lines cleaner...
buf.put_slice(&prefix[..copy_len]);
prefix.advance(copy_len);
// Put back what's left
if !prefix.is_empty() {
self.pre = Some(prefix);
}
return Poll::Ready(Ok(()));
}
}
match &mut self.stream {
WsStreamKind::Network(stream) => Pin::new(stream).poll_read(cx, buf),
WsStreamKind::Upgraded(stream) => Pin::new(stream).poll_read(cx, buf),
WsStreamKind::H2(_, recv) => {
let data = ready!(recv.poll_data(cx));
let Some(data) = data else {
// EOF
return Poll::Ready(Ok(()));
};
let mut data = data.map_err(|e| {
std::io::Error::new(std::io::ErrorKind::InvalidData, e)
})?;
recv.flow_control().release_capacity(data.len()).unwrap();
// This looks like the prefix code above -- can we share this?
let copy_len = std::cmp::min(data.len(), buf.remaining());
// TODO: There should be a way to do following two lines cleaner...
buf.put_slice(&data[..copy_len]);
data.advance(copy_len);
// Put back what's left
if !data.is_empty() {
self.pre = Some(data);
}
Poll::Ready(Ok(()))
}
}
}
}
impl AsyncWrite for WebSocketStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, std::io::Error>> {
match &mut self.stream {
WsStreamKind::Network(stream) => Pin::new(stream).poll_write(cx, buf),
WsStreamKind::Upgraded(stream) => Pin::new(stream).poll_write(cx, buf),
WsStreamKind::H2(send, _) => {
// Zero-length write succeeds
if buf.is_empty() {
return Poll::Ready(Ok(0));
}
send.reserve_capacity(buf.len());
let res = ready!(send.poll_capacity(cx));
// TODO(mmastrac): the documentation is not entirely clear what to do here, so we'll continue
_ = res;
// We'll try to send whatever we have capacity for
let size = std::cmp::min(buf.len(), send.capacity());
assert!(size > 0);
let buf: Bytes = Bytes::copy_from_slice(&buf[0..size]);
let len = buf.len();
// TODO(mmastrac): surface the h2 error?
let res = send
.send_data(buf, false)
.map_err(|_| std::io::Error::from(ErrorKind::Other));
Poll::Ready(res.map(|_| len))
}
}
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
match &mut self.stream {
WsStreamKind::Network(stream) => Pin::new(stream).poll_flush(cx),
WsStreamKind::Upgraded(stream) => Pin::new(stream).poll_flush(cx),
WsStreamKind::H2(..) => Poll::Ready(Ok(())),
}
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
match &mut self.stream {
WsStreamKind::Network(stream) => Pin::new(stream).poll_shutdown(cx),
WsStreamKind::Upgraded(stream) => Pin::new(stream).poll_shutdown(cx),
WsStreamKind::H2(send, _) => {
// TODO(mmastrac): surface the h2 error?
let res = send
.send_data(Bytes::new(), false)
.map_err(|_| std::io::Error::from(ErrorKind::Other));
Poll::Ready(res)
}
}
}
fn is_write_vectored(&self) -> bool {
match &self.stream {
WsStreamKind::Network(stream) => stream.is_write_vectored(),
WsStreamKind::Upgraded(stream) => stream.is_write_vectored(),
WsStreamKind::H2(..) => false,
}
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> std::task::Poll<Result<usize, std::io::Error>> {
match &mut self.stream {
WsStreamKind::Network(stream) => {
Pin::new(stream).poll_write_vectored(cx, bufs)
}
WsStreamKind::Upgraded(stream) => {
Pin::new(stream).poll_write_vectored(cx, bufs)
}
WsStreamKind::H2(..) => {
// TODO(mmastrac): this is possibly just too difficult, but we'll never call it
unimplemented!()
}
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/ext/websocket/lib.rs | ext/websocket/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::Cell;
use std::cell::RefCell;
use std::future::Future;
use std::rc::Rc;
use bytes::Bytes;
use deno_core::AsyncMutFuture;
use deno_core::AsyncRefCell;
use deno_core::ByteString;
use deno_core::CancelHandle;
use deno_core::CancelTryFuture;
use deno_core::JsBuffer;
use deno_core::OpState;
use deno_core::RcRef;
use deno_core::Resource;
use deno_core::ResourceId;
use deno_core::ToJsBuffer;
use deno_core::futures::TryFutureExt;
use deno_core::op2;
use deno_core::unsync::spawn;
use deno_core::url;
use deno_error::JsErrorBox;
use deno_fetch::ClientConnectError;
use deno_fetch::HttpClientCreateError;
use deno_fetch::HttpClientResource;
use deno_fetch::get_or_create_client_from_state;
use deno_net::raw::NetworkStream;
use deno_permissions::PermissionCheckError;
use deno_permissions::PermissionsContainer;
use deno_tls::SocketUse;
use fastwebsockets::CloseCode;
use fastwebsockets::FragmentCollectorRead;
use fastwebsockets::Frame;
use fastwebsockets::OpCode;
use fastwebsockets::Role;
use fastwebsockets::WebSocket;
use fastwebsockets::WebSocketWrite;
use http::HeaderName;
use http::HeaderValue;
use http::Method;
use http::Request;
use http::StatusCode;
use http::Uri;
use http::header::CONNECTION;
use http::header::HOST;
use http::header::SEC_WEBSOCKET_KEY;
use http::header::SEC_WEBSOCKET_PROTOCOL;
use http::header::SEC_WEBSOCKET_VERSION;
use http::header::UPGRADE;
use hyper_util::client::legacy::connect::Connection;
use once_cell::sync::Lazy;
use serde::Serialize;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::ReadHalf;
use tokio::io::WriteHalf;
use crate::stream::WebSocketStream;
mod stream;
static USE_WRITEV: Lazy<bool> = Lazy::new(|| {
let enable = std::env::var("DENO_USE_WRITEV").ok();
if let Some(val) = enable {
return !val.is_empty();
}
false
});
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum WebsocketError {
#[class(inherit)]
#[error(transparent)]
Url(url::ParseError),
#[class(inherit)]
#[error(transparent)]
Permission(#[from] PermissionCheckError),
#[class(inherit)]
#[error(transparent)]
Resource(#[from] deno_core::error::ResourceError),
#[class(generic)]
#[error(transparent)]
Uri(#[from] http::uri::InvalidUri),
#[class(inherit)]
#[error("{0}")]
Io(#[from] std::io::Error),
#[class(inherit)]
#[error(transparent)]
ClientCreate(#[from] HttpClientCreateError),
#[class(type)]
#[error(transparent)]
WebSocket(#[from] fastwebsockets::WebSocketError),
#[class("DOMExceptionNetworkError")]
#[error("failed to connect to WebSocket: {0}")]
ConnectionFailed(#[from] HandshakeError),
#[class(inherit)]
#[error(transparent)]
Canceled(#[from] deno_core::Canceled),
}
pub struct WsCancelResource(Rc<CancelHandle>);
impl Resource for WsCancelResource {
fn name(&self) -> Cow<'_, str> {
"webSocketCancel".into()
}
fn close(self: Rc<Self>) {
self.0.cancel()
}
}
// This op is needed because creating a WS instance in JavaScript is a sync
// operation and should throw error when permissions are not fulfilled,
// but actual op that connects WS is async.
#[op2(stack_trace)]
#[smi]
pub fn op_ws_check_permission_and_cancel_handle(
state: &mut OpState,
#[string] api_name: String,
#[string] url: String,
cancel_handle: bool,
) -> Result<Option<ResourceId>, WebsocketError> {
state.borrow_mut::<PermissionsContainer>().check_net_url(
&url::Url::parse(&url).map_err(WebsocketError::Url)?,
&api_name,
)?;
if cancel_handle {
let rid = state
.resource_table
.add(WsCancelResource(CancelHandle::new_rc()));
Ok(Some(rid))
} else {
Ok(None)
}
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateResponse {
rid: ResourceId,
protocol: String,
extensions: String,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum HandshakeError {
#[class(type)]
#[error("Missing host in url")]
MissingHost,
#[class(type)]
#[error("Missing path in url")]
MissingPath,
#[class(type)]
#[error("Invalid scheme in url")]
InvalidScheme,
#[class(generic)]
#[error("Invalid status code {0}")]
InvalidStatusCode(StatusCode),
#[class(generic)]
#[error(transparent)]
Http(#[from] http::Error),
#[class(inherit)]
#[error(transparent)]
Connect(#[from] ClientConnectError),
#[class(type)]
#[error(transparent)]
WebSocket(#[from] fastwebsockets::WebSocketError),
#[class(generic)]
#[error("Didn't receive h2 alpn, aborting connection")]
NoH2Alpn,
#[class(generic)]
#[error(transparent)]
Rustls(#[from] deno_tls::rustls::Error),
#[class(inherit)]
#[error(transparent)]
Io(#[from] std::io::Error),
#[class(generic)]
#[error(transparent)]
H2(#[from] h2::Error),
#[class(type)]
#[error("Invalid hostname: '{0}'")]
InvalidHostname(String),
#[class(inherit)]
#[error(transparent)]
RootStoreError(JsErrorBox),
#[class(inherit)]
#[error(transparent)]
Tls(deno_tls::TlsError),
#[class(type)]
#[error(transparent)]
HeaderName(#[from] http::header::InvalidHeaderName),
#[class(type)]
#[error(transparent)]
HeaderValue(#[from] http::header::InvalidHeaderValue),
}
async fn handshake_websocket(
client: deno_fetch::Client,
allow_host: bool,
uri: Uri,
protocols: &str,
headers: Option<Vec<(ByteString, ByteString)>>,
) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let parts = uri.into_parts();
let Some(authority) = parts.authority else {
return Err(HandshakeError::MissingHost);
};
let Some(path_and_query) = parts.path_and_query else {
return Err(HandshakeError::MissingPath);
};
let scheme = match parts.scheme {
Some(s) if s.as_str() == "ws" => "http",
Some(s) if s.as_str() == "wss" => "https",
_ => return Err(HandshakeError::InvalidScheme),
};
let h1res = handshake_http1(
client.clone(),
allow_host,
scheme,
&authority,
&path_and_query,
protocols,
&headers,
)
.await;
match h1res {
Ok(res) => Ok(res),
Err(_) if scheme == "https" => {
let uri = Uri::builder()
.scheme(scheme)
.authority(authority)
.path_and_query(path_and_query)
.build()?;
handshake_http2(client, allow_host, uri, protocols, &headers).await
}
Err(e) => Err(e),
}
}
async fn handshake_http1(
client: deno_fetch::Client,
allow_host: bool,
scheme: &str,
authority: &http::uri::Authority,
path_and_query: &http::uri::PathAndQuery,
protocols: &str,
headers: &Option<Vec<(ByteString, ByteString)>>,
) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let connection_uri = Uri::builder()
.scheme(scheme)
.authority(authority.clone())
.path_and_query(path_and_query.clone())
.build()?;
let connection = client.connect(connection_uri, SocketUse::Http1Only).await?;
let is_proxied = connection.connected().is_proxied();
let host = match authority.port() {
Some(port) => format!("{}:{}", authority.host(), port),
None => authority.host().to_string(),
};
let req_uri = if is_proxied {
Uri::builder()
.scheme(scheme)
.authority(authority.clone())
.path_and_query(path_and_query.clone())
.build()?
} else {
Uri::builder()
.path_and_query(path_and_query.clone())
.build()?
};
let mut request = Request::builder().method(Method::GET).uri(req_uri);
client.inject_common_headers(&mut request);
request =
populate_common_request_headers(request, protocols, headers, allow_host)?;
if let Some(headers) = request.headers_ref()
&& !headers.contains_key(HOST)
{
request = request.header(HOST, host);
}
request = request
.header(UPGRADE, "websocket")
.header(CONNECTION, "Upgrade")
.header(SEC_WEBSOCKET_KEY, fastwebsockets::handshake::generate_key());
let request = request
.body(http_body_util::Empty::new())
.map_err(HandshakeError::Http)?;
handshake_connection(request, connection).await
}
#[allow(clippy::too_many_arguments)]
async fn handshake_http2(
client: deno_fetch::Client,
allow_host: bool,
uri: Uri,
protocols: &str,
headers: &Option<Vec<(ByteString, ByteString)>>,
) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let connection = client.connect(uri.clone(), SocketUse::Http2Only).await?;
if !connection.connected().is_negotiated_h2() {
return Err(HandshakeError::NoH2Alpn);
}
let h2 = h2::client::Builder::new();
let (mut send, conn) = h2.handshake::<_, Bytes>(connection).await?;
spawn(conn);
let mut request = Request::builder();
request = request.method(Method::CONNECT);
request = request.uri(uri);
client.inject_common_headers(&mut request);
request =
populate_common_request_headers(request, protocols, headers, allow_host)?;
request = request.extension(h2::ext::Protocol::from("websocket"));
let (resp, send) = send.send_request(request.body(())?, false)?;
let resp = resp.await?;
if resp.status() != StatusCode::OK {
return Err(HandshakeError::InvalidStatusCode(resp.status()));
}
let (http::response::Parts { headers, .. }, recv) = resp.into_parts();
let mut stream = WebSocket::after_handshake(
WebSocketStream::new(stream::WsStreamKind::H2(send, recv), None),
Role::Client,
);
// We currently don't support vectored writes in the H2 streams
stream.set_writev(false);
// TODO(mmastrac): we should be able to use a zero masking key over HTTPS
// stream.set_auto_apply_mask(false);
Ok((stream, headers))
}
async fn handshake_connection<
S: AsyncRead + AsyncWrite + Send + Unpin + 'static,
>(
request: Request<http_body_util::Empty<Bytes>>,
socket: S,
) -> Result<(WebSocket<WebSocketStream>, http::HeaderMap), HandshakeError> {
let (upgraded, response) =
fastwebsockets::handshake::client(&LocalExecutor, request, socket).await?;
let upgraded = upgraded.into_inner();
let stream =
WebSocketStream::new(stream::WsStreamKind::Upgraded(upgraded), None);
let stream = WebSocket::after_handshake(stream, Role::Client);
Ok((stream, response.into_parts().0.headers))
}
/// Headers common to both http/1.1 and h2 requests.
fn populate_common_request_headers(
mut request: http::request::Builder,
protocols: &str,
headers: &Option<Vec<(ByteString, ByteString)>>,
allow_host: bool,
) -> Result<http::request::Builder, HandshakeError> {
request = request.header(SEC_WEBSOCKET_VERSION, "13");
if !protocols.is_empty() {
request = request.header(SEC_WEBSOCKET_PROTOCOL, protocols);
}
if let Some(headers) = headers {
for (key, value) in headers {
let name = HeaderName::from_bytes(key)?;
let v = HeaderValue::from_bytes(value)?;
let is_disallowed_header = (!allow_host && name == http::header::HOST)
|| matches!(
name,
http::header::SEC_WEBSOCKET_ACCEPT
| http::header::SEC_WEBSOCKET_EXTENSIONS
| http::header::SEC_WEBSOCKET_KEY
| http::header::SEC_WEBSOCKET_PROTOCOL
| http::header::SEC_WEBSOCKET_VERSION
| http::header::UPGRADE
| http::header::CONNECTION
);
if !is_disallowed_header {
request = request.header(name, v);
}
}
}
Ok(request)
}
#[op2(async, stack_trace)]
#[serde]
pub async fn op_ws_create(
state: Rc<RefCell<OpState>>,
#[string] api_name: String,
#[string] url: String,
#[string] protocols: String,
#[smi] cancel_handle: Option<ResourceId>,
#[serde] headers: Option<Vec<(ByteString, ByteString)>>,
#[smi] client_rid: Option<u32>,
) -> Result<CreateResponse, WebsocketError> {
let (client, allow_host) = {
let mut s = state.borrow_mut();
s.borrow_mut::<PermissionsContainer>()
.check_net_url(
&url::Url::parse(&url).map_err(WebsocketError::Url)?,
&api_name,
)
.expect(
"Permission check should have been done in op_ws_check_permission",
);
if let Some(rid) = client_rid {
let r = s.resource_table.get::<HttpClientResource>(rid)?;
(r.client.clone(), r.allow_host)
} else {
(get_or_create_client_from_state(&mut s)?, false)
}
};
let cancel_resource = if let Some(cancel_rid) = cancel_handle {
let r = state
.borrow_mut()
.resource_table
.get::<WsCancelResource>(cancel_rid)?;
Some(r.0.clone())
} else {
None
};
let uri: Uri = url.parse()?;
let handshake =
handshake_websocket(client, allow_host, uri, &protocols, headers)
.map_err(WebsocketError::ConnectionFailed);
let (stream, response) = match cancel_resource {
Some(rc) => handshake.try_or_cancel(rc).await?,
None => handshake.await?,
};
if let Some(cancel_rid) = cancel_handle
&& let Ok(res) = state.borrow_mut().resource_table.take_any(cancel_rid)
{
res.close();
}
let mut state = state.borrow_mut();
let rid = state.resource_table.add(ServerWebSocket::new(stream));
let protocol = match response.get("Sec-WebSocket-Protocol") {
Some(header) => header.to_str().unwrap(),
None => "",
};
let extensions = response
.get_all("Sec-WebSocket-Extensions")
.iter()
.map(|header| header.to_str().unwrap())
.collect::<String>();
Ok(CreateResponse {
rid,
protocol: protocol.to_string(),
extensions,
})
}
#[repr(u16)]
pub enum MessageKind {
Text = 0,
Binary = 1,
Pong = 2,
Error = 3,
ClosedDefault = 1005,
}
/// To avoid locks, we keep as much as we can inside of [`Cell`]s.
pub struct ServerWebSocket {
buffered: Cell<usize>,
error: Cell<Option<String>>,
errored: Cell<bool>,
closed: Cell<bool>,
buffer: Cell<Option<Vec<u8>>>,
string: Cell<Option<String>>,
ws_read: AsyncRefCell<FragmentCollectorRead<ReadHalf<WebSocketStream>>>,
ws_write: AsyncRefCell<WebSocketWrite<WriteHalf<WebSocketStream>>>,
}
impl ServerWebSocket {
fn new(ws: WebSocket<WebSocketStream>) -> Self {
let (ws_read, ws_write) = ws.split(tokio::io::split);
Self {
buffered: Cell::new(0),
error: Cell::new(None),
errored: Cell::new(false),
closed: Cell::new(false),
buffer: Cell::new(None),
string: Cell::new(None),
ws_read: AsyncRefCell::new(FragmentCollectorRead::new(ws_read)),
ws_write: AsyncRefCell::new(ws_write),
}
}
fn set_error(&self, error: Option<String>) {
if let Some(error) = error {
self.error.set(Some(error));
self.errored.set(true);
} else {
self.error.set(None);
self.errored.set(false);
}
}
/// Reserve a lock, but don't wait on it. This gets us our place in line.
fn reserve_lock(
self: &Rc<Self>,
) -> AsyncMutFuture<WebSocketWrite<WriteHalf<WebSocketStream>>> {
RcRef::map(self, |r| &r.ws_write).borrow_mut()
}
#[inline]
async fn write_frame(
self: &Rc<Self>,
lock: AsyncMutFuture<WebSocketWrite<WriteHalf<WebSocketStream>>>,
frame: Frame<'_>,
) -> Result<(), WebsocketError> {
let mut ws = lock.await;
if ws.is_closed() {
return Ok(());
}
ws.write_frame(frame).await?;
Ok(())
}
}
impl Resource for ServerWebSocket {
fn name(&self) -> Cow<'_, str> {
"serverWebSocket".into()
}
}
pub fn ws_create_server_stream(
state: &mut OpState,
transport: NetworkStream,
read_buf: Bytes,
) -> ResourceId {
let mut ws = WebSocket::after_handshake(
WebSocketStream::new(
stream::WsStreamKind::Network(transport),
Some(read_buf),
),
Role::Server,
);
ws.set_writev(*USE_WRITEV);
ws.set_auto_close(true);
ws.set_auto_pong(true);
state.resource_table.add(ServerWebSocket::new(ws))
}
fn send_binary(state: &mut OpState, rid: ResourceId, data: &[u8]) {
let resource = state.resource_table.get::<ServerWebSocket>(rid).unwrap();
let data = data.to_vec();
let len = data.len();
resource.buffered.set(resource.buffered.get() + len);
let lock = resource.reserve_lock();
deno_core::unsync::spawn(async move {
match resource
.write_frame(lock, Frame::new(true, OpCode::Binary, None, data.into()))
.await
{
Err(err) => {
resource.set_error(Some(err.to_string()));
}
_ => {
resource.buffered.set(resource.buffered.get() - len);
}
}
});
}
#[op2]
pub fn op_ws_send_binary(
state: &mut OpState,
#[smi] rid: ResourceId,
#[anybuffer] data: &[u8],
) {
send_binary(state, rid, data)
}
#[op2(fast)]
pub fn op_ws_send_binary_ab(
state: &mut OpState,
#[smi] rid: ResourceId,
#[arraybuffer] data: &[u8],
) {
send_binary(state, rid, data)
}
#[op2(fast)]
pub fn op_ws_send_text(
state: &mut OpState,
#[smi] rid: ResourceId,
#[string] data: String,
) {
let resource = state.resource_table.get::<ServerWebSocket>(rid).unwrap();
let len = data.len();
resource.buffered.set(resource.buffered.get() + len);
let lock = resource.reserve_lock();
deno_core::unsync::spawn(async move {
match resource
.write_frame(
lock,
Frame::new(true, OpCode::Text, None, data.into_bytes().into()),
)
.await
{
Err(err) => {
resource.set_error(Some(err.to_string()));
}
_ => {
resource.buffered.set(resource.buffered.get() - len);
}
}
});
}
/// Async version of send. Does not update buffered amount as we rely on the socket itself for backpressure.
#[op2(async)]
pub async fn op_ws_send_binary_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[buffer] data: JsBuffer,
) -> Result<(), WebsocketError> {
let resource = state
.borrow_mut()
.resource_table
.get::<ServerWebSocket>(rid)?;
let data = data.to_vec();
let lock = resource.reserve_lock();
resource
.write_frame(lock, Frame::new(true, OpCode::Binary, None, data.into()))
.await
}
/// Async version of send. Does not update buffered amount as we rely on the socket itself for backpressure.
#[op2(async)]
pub async fn op_ws_send_text_async(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[string] data: String,
) -> Result<(), WebsocketError> {
let resource = state
.borrow_mut()
.resource_table
.get::<ServerWebSocket>(rid)?;
let lock = resource.reserve_lock();
resource
.write_frame(
lock,
Frame::new(true, OpCode::Text, None, data.into_bytes().into()),
)
.await
}
const EMPTY_PAYLOAD: &[u8] = &[];
#[op2(fast)]
#[smi]
pub fn op_ws_get_buffered_amount(
state: &mut OpState,
#[smi] rid: ResourceId,
) -> u32 {
state
.resource_table
.get::<ServerWebSocket>(rid)
.unwrap()
.buffered
.get() as u32
}
#[op2(async)]
pub async fn op_ws_send_ping(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> Result<(), WebsocketError> {
let resource = state
.borrow_mut()
.resource_table
.get::<ServerWebSocket>(rid)?;
let lock = resource.reserve_lock();
resource
.write_frame(
lock,
Frame::new(true, OpCode::Ping, None, EMPTY_PAYLOAD.into()),
)
.await
}
#[op2(async(lazy))]
pub async fn op_ws_close(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
#[smi] code: Option<u16>,
#[string] reason: Option<String>,
) -> Result<(), WebsocketError> {
let Ok(resource) = state
.borrow_mut()
.resource_table
.get::<ServerWebSocket>(rid)
else {
return Ok(());
};
const EMPTY_PAYLOAD: &[u8] = &[];
let frame = reason
.map(|reason| Frame::close(code.unwrap_or(1005), reason.as_bytes()))
.unwrap_or_else(|| match code {
Some(code) => Frame::close(code, EMPTY_PAYLOAD),
_ => Frame::close_raw(EMPTY_PAYLOAD.into()),
});
resource.closed.set(true);
let lock = resource.reserve_lock();
resource.write_frame(lock, frame).await
}
#[op2]
#[serde]
pub fn op_ws_get_buffer(
state: &mut OpState,
#[smi] rid: ResourceId,
) -> Option<ToJsBuffer> {
let Ok(resource) = state.resource_table.get::<ServerWebSocket>(rid) else {
return None;
};
resource.buffer.take().map(ToJsBuffer::from)
}
#[op2]
#[string]
pub fn op_ws_get_buffer_as_string(
state: &mut OpState,
#[smi] rid: ResourceId,
) -> Option<String> {
let Ok(resource) = state.resource_table.get::<ServerWebSocket>(rid) else {
return None;
};
resource.string.take()
}
#[op2]
#[string]
pub fn op_ws_get_error(state: &mut OpState, #[smi] rid: ResourceId) -> String {
let Ok(resource) = state.resource_table.get::<ServerWebSocket>(rid) else {
return "Bad resource".into();
};
resource.errored.set(false);
resource.error.take().unwrap_or_default()
}
#[op2(async)]
pub async fn op_ws_next_event(
state: Rc<RefCell<OpState>>,
#[smi] rid: ResourceId,
) -> u16 {
let Ok(resource) = state
.borrow_mut()
.resource_table
.get::<ServerWebSocket>(rid)
else {
// op_ws_get_error will correctly handle a bad resource
return MessageKind::Error as u16;
};
// If there's a pending error, this always returns error
if resource.errored.get() {
return MessageKind::Error as u16;
}
let mut ws = RcRef::map(&resource, |r| &r.ws_read).borrow_mut().await;
let writer = RcRef::map(&resource, |r| &r.ws_write);
let mut sender = move |frame| {
let writer = writer.clone();
async move { writer.borrow_mut().await.write_frame(frame).await }
};
loop {
let res = ws.read_frame(&mut sender).await;
let val = match res {
Ok(val) => val,
Err(err) => {
// No message was received, socket closed while we waited.
// Report closed status to JavaScript.
if resource.closed.get() {
return MessageKind::ClosedDefault as u16;
}
resource.set_error(Some(err.to_string()));
return MessageKind::Error as u16;
}
};
break match val.opcode {
OpCode::Text => match String::from_utf8(val.payload.to_vec()) {
Ok(s) => {
resource.string.set(Some(s));
MessageKind::Text as u16
}
Err(_) => {
resource.set_error(Some("Invalid string data".into()));
MessageKind::Error as u16
}
},
OpCode::Binary => {
resource.buffer.set(Some(val.payload.to_vec()));
MessageKind::Binary as u16
}
OpCode::Close => {
// Close reason is returned through error
if val.payload.len() < 2 {
resource.set_error(None);
MessageKind::ClosedDefault as u16
} else {
let close_code = CloseCode::from(u16::from_be_bytes([
val.payload[0],
val.payload[1],
]));
let reason = String::from_utf8(val.payload[2..].to_vec()).ok();
resource.set_error(reason);
close_code.into()
}
}
OpCode::Pong => MessageKind::Pong as u16,
OpCode::Continuation | OpCode::Ping => {
continue;
}
};
}
}
deno_core::extension!(
deno_websocket,
deps = [deno_web, deno_webidl],
ops = [
op_ws_check_permission_and_cancel_handle,
op_ws_create,
op_ws_close,
op_ws_next_event,
op_ws_get_buffer,
op_ws_get_buffer_as_string,
op_ws_get_error,
op_ws_send_binary,
op_ws_send_binary_ab,
op_ws_send_text,
op_ws_send_binary_async,
op_ws_send_text_async,
op_ws_send_ping,
op_ws_get_buffered_amount,
],
esm = ["01_websocket.js", "02_websocketstream.js"],
);
// Needed so hyper can use non Send futures
#[derive(Clone)]
struct LocalExecutor;
impl<Fut> hyper::rt::Executor<Fut> for LocalExecutor
where
Fut: Future + 'static,
Fut::Output: 'static,
{
fn execute(&self, fut: Fut) {
deno_core::unsync::spawn(fut);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/node.rs | cli/node.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_resolver::cjs::analyzer::DenoCjsCodeAnalyzer;
use deno_resolver::npm::DenoInNpmPackageChecker;
use node_resolver::DenoIsBuiltInNodeModuleChecker;
use node_resolver::analyze::CjsModuleExportAnalyzer;
use crate::npm::CliNpmResolver;
use crate::sys::CliSys;
pub type CliCjsCodeAnalyzer = DenoCjsCodeAnalyzer<CliSys>;
pub type CliCjsModuleExportAnalyzer = CjsModuleExportAnalyzer<
CliCjsCodeAnalyzer,
DenoInNpmPackageChecker,
DenoIsBuiltInNodeModuleChecker,
CliNpmResolver,
CliSys,
>;
pub type CliNodeResolver = deno_runtime::deno_node::NodeResolver<
DenoInNpmPackageChecker,
CliNpmResolver,
CliSys,
>;
pub type CliPackageJsonResolver = node_resolver::PackageJsonResolver<CliSys>;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/module_loader.rs | cli/module_loader.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::collections::HashSet;
use std::future::Future;
use std::path::Path;
use std::path::PathBuf;
use std::pin::Pin;
use std::rc::Rc;
use std::str;
use std::sync::Arc;
use std::sync::atomic::AtomicU16;
use std::sync::atomic::Ordering;
use std::time::SystemTime;
use deno_ast::MediaType;
use deno_ast::ModuleKind;
use deno_cache_dir::file_fetcher::FetchLocalOptions;
use deno_cache_dir::file_fetcher::MemoryFiles as _;
use deno_core::FastString;
use deno_core::ModuleLoadOptions;
use deno_core::ModuleLoadReferrer;
use deno_core::ModuleLoader;
use deno_core::ModuleResolutionError;
use deno_core::ModuleSource;
use deno_core::ModuleSourceCode;
use deno_core::ModuleSpecifier;
use deno_core::ModuleType;
use deno_core::RequestedModuleType;
use deno_core::SourceCodeCacheInfo;
use deno_core::anyhow::Context as _;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::error::ModuleLoaderError;
use deno_core::futures::StreamExt;
use deno_core::futures::future::FutureExt;
use deno_core::futures::io::BufReader;
use deno_core::futures::stream::FuturesOrdered;
use deno_core::parking_lot::Mutex;
use deno_core::resolve_url;
use deno_core::serde_json;
use deno_error::JsErrorBox;
use deno_error::JsErrorClass;
use deno_graph::GraphKind;
use deno_graph::ModuleGraph;
use deno_graph::WalkOptions;
use deno_lib::loader::as_deno_resolver_requested_module_type;
use deno_lib::loader::loaded_module_source_to_module_source_code;
use deno_lib::loader::module_type_from_media_and_requested_type;
use deno_lib::npm::NpmRegistryReadPermissionChecker;
use deno_lib::util::hash::FastInsecureHasher;
use deno_lib::worker::CreateModuleLoaderResult;
use deno_lib::worker::ModuleLoaderFactory;
use deno_npm_installer::resolution::HasJsExecutionStartedFlagRc;
use deno_path_util::PathToUrlError;
use deno_path_util::resolve_url_or_path;
use deno_resolver::cache::ParsedSourceCache;
use deno_resolver::file_fetcher::FetchOptions;
use deno_resolver::file_fetcher::FetchPermissionsOptionRef;
use deno_resolver::graph::ResolveWithGraphErrorKind;
use deno_resolver::graph::ResolveWithGraphOptions;
use deno_resolver::graph::format_range_with_colors;
use deno_resolver::loader::LoadCodeSourceError;
use deno_resolver::loader::LoadPreparedModuleError;
use deno_resolver::loader::LoadedModule;
use deno_resolver::loader::LoadedModuleOrAsset;
use deno_resolver::loader::MemoryFiles;
use deno_resolver::loader::StrippingTypesNodeModulesError;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_runtime::code_cache;
use deno_runtime::deno_node::NodeRequireLoader;
use deno_runtime::deno_node::create_host_defined_options;
use deno_runtime::deno_node::ops::require::UnableToGetCwdError;
use deno_runtime::deno_permissions::CheckSpecifierKind;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_semver::npm::NpmPackageReqReference;
use eszip::EszipV2;
use node_resolver::InNpmPackageChecker;
use node_resolver::NodeResolutionKind;
use node_resolver::ResolutionMode;
use node_resolver::errors::PackageJsonLoadError;
use sys_traits::FsMetadata;
use sys_traits::FsMetadataValue;
use sys_traits::FsRead;
use tokio_util::compat::TokioAsyncReadCompatExt;
use crate::args::CliLockfile;
use crate::args::CliOptions;
use crate::args::DenoSubcommand;
use crate::args::TsTypeLib;
use crate::args::jsr_url;
use crate::cache::CodeCache;
use crate::file_fetcher::CliFileFetcher;
use crate::graph_container::MainModuleGraphContainer;
use crate::graph_container::ModuleGraphContainer;
use crate::graph_container::ModuleGraphUpdatePermit;
use crate::graph_util::BuildGraphRequest;
use crate::graph_util::BuildGraphWithNpmOptions;
use crate::graph_util::ModuleGraphBuilder;
use crate::npm::CliNpmResolver;
use crate::resolver::CliCjsTracker;
use crate::resolver::CliResolver;
use crate::sys::CliSys;
use crate::type_checker::CheckError;
use crate::type_checker::CheckOptions;
use crate::type_checker::TypeChecker;
use crate::util::progress_bar::ProgressBar;
use crate::util::text_encoding::code_without_source_map;
use crate::util::text_encoding::source_map_from_code;
pub type CliEmitter =
deno_resolver::emit::Emitter<DenoInNpmPackageChecker, CliSys>;
pub type CliDenoResolverModuleLoader =
deno_resolver::loader::ModuleLoader<CliSys>;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum PrepareModuleLoadError {
#[class(inherit)]
#[error(transparent)]
BuildGraphWithNpmResolution(
#[from] crate::graph_util::BuildGraphWithNpmResolutionError,
),
#[class(inherit)]
#[error(transparent)]
Check(#[from] CheckError),
#[class(inherit)]
#[error(transparent)]
LockfileWrite(#[from] deno_resolver::lockfile::LockfileWriteError),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
pub struct ModuleLoadPreparer {
options: Arc<CliOptions>,
lockfile: Option<Arc<CliLockfile>>,
module_graph_builder: Arc<ModuleGraphBuilder>,
progress_bar: ProgressBar,
type_checker: Arc<TypeChecker>,
}
pub struct PrepareModuleLoadOptions<'a> {
pub is_dynamic: bool,
pub lib: TsTypeLib,
pub permissions: PermissionsContainer,
pub ext_overwrite: Option<&'a String>,
pub allow_unknown_media_types: bool,
/// Whether to skip validating the graph roots. This is useful
/// for when you want to defer doing this until later (ex. get the
/// graph back, reload some specifiers in it, then do graph validation).
pub skip_graph_roots_validation: bool,
}
impl ModuleLoadPreparer {
#[allow(clippy::too_many_arguments)]
pub fn new(
options: Arc<CliOptions>,
lockfile: Option<Arc<CliLockfile>>,
module_graph_builder: Arc<ModuleGraphBuilder>,
progress_bar: ProgressBar,
type_checker: Arc<TypeChecker>,
) -> Self {
Self {
options,
lockfile,
module_graph_builder,
progress_bar,
type_checker,
}
}
/// This method must be called for a module or a static importer of that
/// module before attempting to `load()` it from a `JsRuntime`. It will
/// populate the graph data in memory with the necessary source code, write
/// emits where necessary or report any module graph / type checking errors.
pub async fn prepare_module_load(
&self,
graph: &mut ModuleGraph,
roots: &[ModuleSpecifier],
options: PrepareModuleLoadOptions<'_>,
) -> Result<(), PrepareModuleLoadError> {
log::debug!("Preparing module load.");
let PrepareModuleLoadOptions {
is_dynamic,
lib,
permissions,
ext_overwrite,
allow_unknown_media_types,
skip_graph_roots_validation,
} = options;
let _pb_clear_guard = self.progress_bar.deferred_keep_initialize_alive();
let mut loader = self
.module_graph_builder
.create_graph_loader_with_permissions(permissions);
if let Some(ext) = ext_overwrite {
let maybe_content_type = match ext.as_str() {
"ts" => Some("text/typescript"),
"tsx" => Some("text/tsx"),
"js" => Some("text/javascript"),
"jsx" => Some("text/jsx"),
_ => None,
};
if let Some(content_type) = maybe_content_type {
for root in roots {
loader.insert_file_header_override(
root.clone(),
std::collections::HashMap::from([(
"content-type".to_string(),
content_type.to_string(),
)]),
);
}
}
}
log::debug!("Building module graph.");
let has_type_checked = !graph.roots.is_empty();
self
.module_graph_builder
.build_graph_with_npm_resolution(
graph,
BuildGraphWithNpmOptions {
is_dynamic,
request: BuildGraphRequest::Roots(roots.to_vec()),
loader: Some(&mut loader),
npm_caching: self.options.default_npm_caching_strategy(),
},
)
.await?;
if !skip_graph_roots_validation {
self.graph_roots_valid(graph, roots, allow_unknown_media_types, false)?;
}
drop(_pb_clear_guard);
// type check if necessary
if self.options.type_check_mode().is_true() && !has_type_checked {
self.type_checker.check(
// todo(perf): since this is only done the first time the graph is
// created, we could avoid the clone of the graph here by providing
// the actual graph on the first run and then getting the Arc<ModuleGraph>
// back from the return value.
graph.clone(),
CheckOptions {
build_fast_check_graph: true,
lib,
reload: self.options.reload_flag(),
type_check_mode: self.options.type_check_mode(),
},
)?;
}
// write the lockfile if there is one and do so after type checking
// as type checking might discover `@types/node`
if let Some(lockfile) = &self.lockfile {
lockfile.write_if_changed()?;
}
log::debug!("Prepared module load.");
Ok(())
}
pub async fn reload_specifiers(
&self,
graph: &mut ModuleGraph,
specifiers: Vec<ModuleSpecifier>,
is_dynamic: bool,
permissions: PermissionsContainer,
) -> Result<(), PrepareModuleLoadError> {
log::debug!(
"Reloading modified files: {}",
specifiers
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(", ")
);
let _pb_clear_guard = self.progress_bar.deferred_keep_initialize_alive();
let mut loader = self
.module_graph_builder
.create_graph_loader_with_permissions(permissions);
self
.module_graph_builder
.build_graph_with_npm_resolution(
graph,
BuildGraphWithNpmOptions {
is_dynamic,
request: BuildGraphRequest::Reload(specifiers),
loader: Some(&mut loader),
npm_caching: self.options.default_npm_caching_strategy(),
},
)
.await?;
if let Some(lockfile) = &self.lockfile {
lockfile.write_if_changed()?;
}
Ok(())
}
pub fn graph_roots_valid(
&self,
graph: &ModuleGraph,
roots: &[ModuleSpecifier],
allow_unknown_media_types: bool,
allow_unknown_jsr_exports: bool,
) -> Result<(), JsErrorBox> {
self.module_graph_builder.graph_roots_valid(
graph,
roots,
allow_unknown_media_types,
allow_unknown_jsr_exports,
)
}
}
struct SharedCliModuleLoaderState {
graph_kind: GraphKind,
lib_window: TsTypeLib,
lib_worker: TsTypeLib,
initial_cwd: PathBuf,
is_inspecting: bool,
is_repl: bool,
cjs_tracker: Arc<CliCjsTracker>,
code_cache: Option<Arc<CodeCache>>,
emitter: Arc<CliEmitter>,
file_fetcher: Arc<CliFileFetcher>,
has_js_execution_started_flag: HasJsExecutionStartedFlagRc,
in_npm_pkg_checker: DenoInNpmPackageChecker,
main_module_graph_container: Arc<MainModuleGraphContainer>,
memory_files: Arc<MemoryFiles>,
module_load_preparer: Arc<ModuleLoadPreparer>,
npm_registry_permission_checker:
Arc<NpmRegistryReadPermissionChecker<CliSys>>,
npm_resolver: CliNpmResolver,
parsed_source_cache: Arc<ParsedSourceCache>,
module_loader: Arc<CliDenoResolverModuleLoader>,
resolver: Arc<CliResolver>,
sys: CliSys,
in_flight_loads_tracker: InFlightModuleLoadsTracker,
maybe_eszip_loader: Option<Arc<EszipModuleLoader>>,
}
struct InFlightModuleLoadsTracker {
loads_number: Arc<AtomicU16>,
cleanup_task_timeout: u64,
cleanup_task_handle: Arc<Mutex<Option<tokio::task::JoinHandle<()>>>>,
}
impl InFlightModuleLoadsTracker {
pub fn increase(&self) {
self.loads_number.fetch_add(1, Ordering::Relaxed);
if let Some(task) = self.cleanup_task_handle.lock().take() {
task.abort();
}
}
pub fn decrease(&self, parsed_source_cache: &Arc<ParsedSourceCache>) {
let prev = self.loads_number.fetch_sub(1, Ordering::Relaxed);
if prev == 1 {
let parsed_source_cache = parsed_source_cache.clone();
let timeout = self.cleanup_task_timeout;
let task_handle = tokio::spawn(async move {
// We use a timeout here, which is defined to 10s,
// so that in situations when dynamic imports are loaded after the startup,
// we don't need to recompute and analyze multiple modules.
tokio::time::sleep(std::time::Duration::from_millis(timeout)).await;
parsed_source_cache.free_all();
});
let maybe_prev_task =
self.cleanup_task_handle.lock().replace(task_handle);
if let Some(prev_task) = maybe_prev_task {
prev_task.abort();
}
}
}
}
pub struct CliModuleLoaderFactory {
shared: Arc<SharedCliModuleLoaderState>,
}
impl CliModuleLoaderFactory {
#[allow(clippy::too_many_arguments)]
pub fn new(
options: &CliOptions,
cjs_tracker: Arc<CliCjsTracker>,
code_cache: Option<Arc<CodeCache>>,
emitter: Arc<CliEmitter>,
file_fetcher: Arc<CliFileFetcher>,
has_js_execution_started_flag: HasJsExecutionStartedFlagRc,
in_npm_pkg_checker: DenoInNpmPackageChecker,
main_module_graph_container: Arc<MainModuleGraphContainer>,
memory_files: Arc<MemoryFiles>,
module_load_preparer: Arc<ModuleLoadPreparer>,
npm_registry_permission_checker: Arc<
NpmRegistryReadPermissionChecker<CliSys>,
>,
npm_resolver: CliNpmResolver,
parsed_source_cache: Arc<ParsedSourceCache>,
module_loader: Arc<CliDenoResolverModuleLoader>,
resolver: Arc<CliResolver>,
sys: CliSys,
maybe_eszip_loader: Option<Arc<EszipModuleLoader>>,
) -> Self {
Self {
shared: Arc::new(SharedCliModuleLoaderState {
graph_kind: options.graph_kind(),
lib_window: options.ts_type_lib_window(),
lib_worker: options.ts_type_lib_worker(),
initial_cwd: options.initial_cwd().to_path_buf(),
is_inspecting: options.is_inspecting(),
is_repl: matches!(
options.sub_command(),
DenoSubcommand::Repl(_) | DenoSubcommand::Jupyter(_)
),
cjs_tracker,
code_cache,
emitter,
file_fetcher,
has_js_execution_started_flag,
in_npm_pkg_checker,
main_module_graph_container,
memory_files,
module_load_preparer,
npm_registry_permission_checker,
npm_resolver,
parsed_source_cache,
module_loader,
resolver,
sys,
in_flight_loads_tracker: InFlightModuleLoadsTracker {
loads_number: Arc::new(AtomicU16::new(0)),
cleanup_task_timeout: 10_000,
cleanup_task_handle: Arc::new(Mutex::new(None)),
},
maybe_eszip_loader,
}),
}
}
fn create_with_lib<TGraphContainer: ModuleGraphContainer>(
&self,
graph_container: TGraphContainer,
lib: TsTypeLib,
is_worker: bool,
parent_permissions: PermissionsContainer,
permissions: PermissionsContainer,
) -> CreateModuleLoaderResult {
let module_loader =
Rc::new(CliModuleLoader(Rc::new(CliModuleLoaderInner {
lib,
is_worker,
parent_permissions,
permissions,
graph_container: graph_container.clone(),
shared: self.shared.clone(),
loaded_files: Default::default(),
})));
let node_require_loader = Rc::new(CliNodeRequireLoader {
cjs_tracker: self.shared.cjs_tracker.clone(),
emitter: self.shared.emitter.clone(),
npm_resolver: self.shared.npm_resolver.clone(),
sys: self.shared.sys.clone(),
graph_container,
in_npm_pkg_checker: self.shared.in_npm_pkg_checker.clone(),
memory_files: self.shared.memory_files.clone(),
npm_registry_permission_checker: self
.shared
.npm_registry_permission_checker
.clone(),
});
CreateModuleLoaderResult {
module_loader,
node_require_loader,
}
}
}
impl ModuleLoaderFactory for CliModuleLoaderFactory {
fn create_for_main(
&self,
root_permissions: PermissionsContainer,
) -> CreateModuleLoaderResult {
self.create_with_lib(
(*self.shared.main_module_graph_container).clone(),
self.shared.lib_window,
/* is worker */ false,
root_permissions.clone(),
root_permissions,
)
}
fn create_for_worker(
&self,
parent_permissions: PermissionsContainer,
permissions: PermissionsContainer,
) -> CreateModuleLoaderResult {
self.create_with_lib(
// create a fresh module graph for the worker
WorkerModuleGraphContainer::new(Arc::new(ModuleGraph::new(
self.shared.graph_kind,
))),
self.shared.lib_worker,
/* is worker */ true,
parent_permissions,
permissions,
)
}
}
struct ModuleCodeStringSource {
pub code: ModuleSourceCode,
pub found_url: ModuleSpecifier,
pub module_type: ModuleType,
}
struct CliModuleLoaderInner<TGraphContainer: ModuleGraphContainer> {
lib: TsTypeLib,
is_worker: bool,
/// The initial set of permissions used to resolve the static imports in the
/// worker. These are "allow all" for main worker, and parent thread
/// permissions for Web Worker.
parent_permissions: PermissionsContainer,
permissions: PermissionsContainer,
shared: Arc<SharedCliModuleLoaderState>,
graph_container: TGraphContainer,
loaded_files: RefCell<HashSet<ModuleSpecifier>>,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ResolveReferrerError {
#[class(inherit)]
#[error(transparent)]
UnableToGetCwd(#[from] UnableToGetCwdError),
#[class(inherit)]
#[error(transparent)]
PathToUrl(#[from] PathToUrlError),
#[class(inherit)]
#[error(transparent)]
ModuleResolution(#[from] ModuleResolutionError),
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CliModuleLoaderError {
#[class(inherit)]
#[error(transparent)]
Fetch(#[from] deno_resolver::file_fetcher::FetchError),
#[class(inherit)]
#[error(transparent)]
LoadCodeSource(#[from] LoadCodeSourceError),
#[class(inherit)]
#[error(transparent)]
LoadPreparedModule(#[from] LoadPreparedModuleError),
#[class(inherit)]
#[error(transparent)]
PathToUrl(#[from] PathToUrlError),
#[class(inherit)]
#[error(transparent)]
ResolveNpmReqRef(#[from] deno_resolver::npm::ResolveNpmReqRefError),
#[class(inherit)]
#[error(transparent)]
ResolveReferrer(#[from] ResolveReferrerError),
}
impl<TGraphContainer: ModuleGraphContainer>
CliModuleLoaderInner<TGraphContainer>
{
async fn load_inner(
&self,
specifier: &ModuleSpecifier,
maybe_referrer: Option<&ModuleSpecifier>,
requested_module_type: &RequestedModuleType,
) -> Result<ModuleSource, ModuleLoaderError> {
let code_source = self
.load_code_source(specifier, maybe_referrer, requested_module_type)
.await
.map_err(JsErrorBox::from_err)?;
let code = if self.shared.is_inspecting
|| code_source.module_type == ModuleType::Wasm
{
// we need the code with the source map in order for
// it to work with --inspect or --inspect-brk
code_source.code
} else {
// v8 is slower when source maps are present, so we strip them
code_without_source_map(code_source.code)
};
let code_cache = if code_source.module_type == ModuleType::JavaScript {
self.shared.code_cache.as_ref().map(|cache| {
let code_hash = FastInsecureHasher::new_deno_versioned()
.write_hashable(&code)
.finish();
let data = cache
.get_sync(specifier, code_cache::CodeCacheType::EsModule, code_hash)
.map(Cow::from)
.inspect(|_| {
// This log line is also used by tests.
log::debug!(
"V8 code cache hit for ES module: {specifier}, [{code_hash:?}]"
);
});
SourceCodeCacheInfo {
hash: code_hash,
data,
}
})
} else {
None
};
Ok(ModuleSource::new_with_redirect(
code_source.module_type,
code,
specifier,
&code_source.found_url,
code_cache,
))
}
async fn load_code_source(
&self,
specifier: &ModuleSpecifier,
maybe_referrer: Option<&ModuleSpecifier>,
requested_module_type: &RequestedModuleType,
) -> Result<ModuleCodeStringSource, CliModuleLoaderError> {
// this loader maintains npm specifiers in dynamic imports when resolving
// so that they can be properly preloaded, but now we might receive them
// here, so we need to actually resolve them to a file: specifier here
let specifier = if let Ok(reference) =
NpmPackageReqReference::from_specifier(specifier)
{
let referrer = match maybe_referrer {
// if we're here, it means it was importing from a dynamic import
// and so there will be a referrer
Some(r) => Cow::Borrowed(r),
// but the repl may also end up here and it won't have
// a referrer so create a referrer for it here
None => Cow::Owned(self.resolve_referrer("")?),
};
Cow::Owned(
self
.shared
.resolver
.resolve_non_workspace_npm_req_ref_to_file(
&reference,
&referrer,
ResolutionMode::Import,
NodeResolutionKind::Execution,
)?
.into_url()?,
)
} else {
Cow::Borrowed(specifier)
};
let graph = self.graph_container.graph();
let deno_resolver_requested_module_type =
as_deno_resolver_requested_module_type(requested_module_type);
match self
.shared
.module_loader
.load(
&graph,
&specifier,
maybe_referrer,
&deno_resolver_requested_module_type,
)
.await?
{
LoadedModuleOrAsset::Module(prepared_module) => {
Ok(self.loaded_module_to_module_code_string_source(
prepared_module,
requested_module_type,
))
}
LoadedModuleOrAsset::ExternalAsset {
specifier,
statically_analyzable,
} => {
Ok(
self
.load_asset(
&specifier,
if statically_analyzable {
CheckSpecifierKind::Static
} else {
// force permissions
CheckSpecifierKind::Dynamic
},
requested_module_type,
)
.await?,
)
}
}
}
fn loaded_module_to_module_code_string_source(
&self,
loaded_module: LoadedModule,
requested_module_type: &RequestedModuleType,
) -> ModuleCodeStringSource {
ModuleCodeStringSource {
code: loaded_module_source_to_module_source_code(loaded_module.source),
found_url: loaded_module.specifier.into_owned(),
module_type: module_type_from_media_and_requested_type(
loaded_module.media_type,
requested_module_type,
),
}
}
async fn load_asset(
&self,
specifier: &ModuleSpecifier,
check_specifier_kind: CheckSpecifierKind,
requested_module_type: &RequestedModuleType,
) -> Result<ModuleCodeStringSource, deno_resolver::file_fetcher::FetchError>
{
let file = self
.shared
.file_fetcher
.fetch_with_options(
specifier,
FetchPermissionsOptionRef::Restricted(
match check_specifier_kind {
CheckSpecifierKind::Static => &self.permissions,
CheckSpecifierKind::Dynamic => &self.parent_permissions,
},
check_specifier_kind,
),
FetchOptions {
local: FetchLocalOptions {
include_mtime: false,
},
maybe_auth: None,
maybe_accept: None,
maybe_cache_setting: Some(
&deno_cache_dir::file_fetcher::CacheSetting::Use,
),
},
)
.await?;
let module_type = match requested_module_type {
RequestedModuleType::Text => ModuleType::Text,
RequestedModuleType::Bytes => ModuleType::Bytes,
RequestedModuleType::None => {
match file.resolve_media_type_and_charset().0 {
MediaType::Wasm => ModuleType::Wasm,
_ => ModuleType::JavaScript,
}
}
t => unreachable!("{t}"),
};
Ok(ModuleCodeStringSource {
code: ModuleSourceCode::Bytes(file.source.into()),
found_url: file.url,
module_type,
})
}
async fn maybe_reload_dynamic(
&self,
graph: &ModuleGraph,
specifier: &ModuleSpecifier,
permissions: &PermissionsContainer,
) -> Result<bool, PrepareModuleLoadError> {
let specifiers_to_reload =
self.check_specifiers_to_reload_for_dynamic_import(graph, specifier);
if specifiers_to_reload.is_empty() {
return Ok(false);
}
let mut graph_permit = self.graph_container.acquire_update_permit().await;
let graph = graph_permit.graph_mut();
self
.shared
.module_load_preparer
.reload_specifiers(
graph,
specifiers_to_reload,
/* is dynamic */ true,
permissions.clone(),
)
.await?;
graph_permit.commit();
Ok(true)
}
fn check_specifiers_to_reload_for_dynamic_import(
&self,
graph: &ModuleGraph,
specifier: &ModuleSpecifier,
) -> Vec<ModuleSpecifier> {
let mut specifiers_to_reload = Vec::new();
let mut module_iter = graph.walk(
std::iter::once(specifier),
WalkOptions {
check_js: deno_graph::CheckJsOption::False,
follow_dynamic: false,
kind: GraphKind::CodeOnly,
prefer_fast_check_graph: false,
},
);
while let Some((specifier, module_entry)) = module_iter.next() {
if specifier.scheme() != "file"
|| self.loaded_files.borrow().contains(specifier)
{
module_iter.skip_previous_dependencies(); // no need to analyze this module's dependencies
continue;
}
let should_reload = match module_entry {
deno_graph::ModuleEntryRef::Module(module) => {
self.has_module_changed_on_file_system(specifier, module.mtime())
}
deno_graph::ModuleEntryRef::Err(err) => {
if matches!(
err.as_kind(),
deno_graph::ModuleErrorKind::Missing { .. }
) {
self.mtime_of_specifier(specifier).is_some() // it exists now
} else {
self.has_module_changed_on_file_system(specifier, err.mtime())
}
}
deno_graph::ModuleEntryRef::Redirect(_) => false,
};
if should_reload {
specifiers_to_reload.push(specifier.clone());
}
}
self.loaded_files.borrow_mut().insert(specifier.clone());
specifiers_to_reload
}
fn has_module_changed_on_file_system(
&self,
specifier: &ModuleSpecifier,
mtime: Option<SystemTime>,
) -> bool {
let Some(loaded_mtime) = mtime else {
return false;
};
self
.mtime_of_specifier(specifier)
.map(|mtime| mtime > loaded_mtime)
.unwrap_or(false)
}
fn mtime_of_specifier(
&self,
specifier: &ModuleSpecifier,
) -> Option<SystemTime> {
deno_path_util::url_to_file_path(specifier)
.ok()
.and_then(|path| self.shared.sys.fs_symlink_metadata(&path).ok())
.and_then(|metadata| metadata.modified().ok())
}
#[allow(clippy::result_large_err)]
fn resolve_referrer(
&self,
referrer: &str,
) -> Result<ModuleSpecifier, ResolveReferrerError> {
let referrer = if referrer.is_empty() && self.shared.is_repl {
// FIXME(bartlomieju): this is a hacky way to provide compatibility with REPL
// and `Deno.core.evalContext` API. Ideally we should always have a referrer filled
"./$deno$repl.mts"
} else {
referrer
};
Ok(if deno_path_util::specifier_has_uri_scheme(referrer) {
deno_core::resolve_url(referrer)?
} else if referrer == "." {
// main module, use the initial cwd
deno_path_util::resolve_path(referrer, &self.shared.initial_cwd)?
} else {
// this cwd check is slow, so try to avoid it
let cwd = std::env::current_dir().map_err(UnableToGetCwdError)?;
deno_path_util::resolve_path(referrer, &cwd)?
})
}
#[allow(clippy::result_large_err)]
fn inner_resolve(
&self,
raw_specifier: &str,
raw_referrer: &str,
kind: deno_core::ResolutionKind,
is_import_meta: bool,
) -> Result<ModuleSpecifier, ModuleLoaderError> {
fn ensure_not_jsr_non_jsr_remote_import(
specifier: &ModuleSpecifier,
referrer: &ModuleSpecifier,
) -> Result<(), JsErrorBox> {
if referrer.as_str().starts_with(jsr_url().as_str())
&& !specifier.as_str().starts_with(jsr_url().as_str())
&& matches!(specifier.scheme(), "http" | "https")
{
return Err(JsErrorBox::generic(format!(
"Importing {} blocked. JSR packages cannot import non-JSR remote modules for security reasons.",
specifier
)));
}
Ok(())
}
let referrer = self
.resolve_referrer(raw_referrer)
.map_err(JsErrorBox::from_err)?;
let graph = self.graph_container.graph();
let result = self.shared.resolver.resolve_with_graph(
graph.as_ref(),
raw_specifier,
&referrer,
deno_graph::Position::zeroed(),
ResolveWithGraphOptions {
mode: ResolutionMode::Import,
kind: NodeResolutionKind::Execution,
// leave npm specifiers as-is for dynamic imports so that
// the loader can properly install them if necessary
maintain_npm_specifiers: matches!(
kind,
deno_core::ResolutionKind::DynamicImport
) && !is_import_meta,
},
);
let specifier = match result {
Ok(specifier) => specifier,
Err(err) => {
if let Some(specifier) = err
.maybe_specifier()
.filter(|_| is_import_meta)
.and_then(|s| s.into_owned().into_url().ok())
{
specifier
} else {
match err.into_kind() {
ResolveWithGraphErrorKind::Resolution(err) => {
// todo(dsherret): why do we have a newline here? Document it.
return Err(JsErrorBox::type_error(format!(
"{}\n",
err.to_string_with_range()
)));
}
err => return Err(JsErrorBox::from_err(err)),
}
}
}
};
// only verify this for an import and not import.meta.resolve
if !is_import_meta {
ensure_not_jsr_non_jsr_remote_import(&specifier, &referrer)?;
}
Ok(specifier)
}
}
#[derive(Clone)]
// todo(dsherret): this double Rc boxing is not ideal
pub struct CliModuleLoader<TGraphContainer: ModuleGraphContainer>(
Rc<CliModuleLoaderInner<TGraphContainer>>,
);
impl<TGraphContainer: ModuleGraphContainer> ModuleLoader
for CliModuleLoader<TGraphContainer>
{
fn resolve(
&self,
specifier: &str,
referrer: &str,
kind: deno_core::ResolutionKind,
) -> Result<ModuleSpecifier, ModuleLoaderError> {
self.0.inner_resolve(specifier, referrer, kind, false)
}
fn import_meta_resolve(
&self,
specifier: &str,
referrer: &str,
) -> Result<ModuleSpecifier, ModuleLoaderError> {
self.0.inner_resolve(
specifier,
referrer,
deno_core::ResolutionKind::DynamicImport,
true,
)
}
fn get_host_defined_options<'s>(
&self,
scope: &mut deno_core::v8::PinScope<'s, '_>,
name: &str,
) -> Option<deno_core::v8::Local<'s, deno_core::v8::Data>> {
let name = deno_core::ModuleSpecifier::parse(name).ok()?;
if self.0.shared.in_npm_pkg_checker.in_npm_package(&name) {
Some(create_host_defined_options(scope))
} else {
None
}
}
fn load(
&self,
specifier: &ModuleSpecifier,
maybe_referrer: Option<&ModuleLoadReferrer>,
options: ModuleLoadOptions,
) -> deno_core::ModuleLoadResponse {
let inner = self.0.clone();
if let Some(eszip_loader) = &inner.shared.maybe_eszip_loader {
return eszip_loader.load(specifier);
}
self.0.loaded_files.borrow_mut().insert(specifier.clone());
let specifier = specifier.clone();
let maybe_referrer = maybe_referrer.cloned();
deno_core::ModuleLoadResponse::Async(
async move {
inner
.load_inner(
&specifier,
maybe_referrer.as_ref().map(|r| &r.specifier),
&options.requested_module_type,
)
.await
.map_err(|err| {
let Some(referrer) = maybe_referrer else {
return err;
};
let position = deno_graph::Position {
line: referrer.line_number as usize - 1,
character: referrer.column_number as usize - 1,
};
JsErrorBox::new(
err.get_class(),
format!(
"{err}\n at {}",
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cdp.rs | cli/cdp.rs | // Copyright 2018-2025 the Deno authors. MIT license.
/// <https://chromedevtools.github.io/devtools-protocol/tot/>
use deno_core::serde_json::Value;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serialize;
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-awaitPromise>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct AwaitPromiseArgs {
pub promise_object_id: RemoteObjectId,
#[serde(skip_serializing_if = "Option::is_none")]
pub return_by_value: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub generate_preview: Option<bool>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-callFunctionOn>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CallFunctionOnArgs {
pub function_declaration: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_id: Option<RemoteObjectId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub arguments: Option<Vec<CallArgument>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub silent: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub return_by_value: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub generate_preview: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub user_gesture: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub await_promise: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub execution_context_id: Option<ExecutionContextId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_group: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub throw_on_side_effect: Option<bool>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-callFunctionOn>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CallFunctionOnResponse {
pub result: RemoteObject,
pub exception_details: Option<ExceptionDetails>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-compileScript>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct CompileScriptArgs {
pub expression: String,
#[serde(rename = "sourceURL")]
pub source_url: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub execution_context_id: Option<ExecutionContextId>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-evaluate>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct EvaluateArgs {
pub expression: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_group: Option<String>,
#[serde(
rename = "includeCommandLineAPI",
skip_serializing_if = "Option::is_none"
)]
pub include_command_line_api: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub silent: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub context_id: Option<ExecutionContextId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub return_by_value: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub generate_preview: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub user_gesture: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub await_promise: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub throw_on_side_effect: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub timeout: Option<TimeDelta>,
#[serde(skip_serializing_if = "Option::is_none")]
pub disable_breaks: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub repl_mode: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "allowUnsafeEvalBlockedByCSP")]
pub allow_unsafe_eval_blocked_by_csp: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub unique_context_id: Option<String>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-evaluate>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct EvaluateResponse {
pub result: RemoteObject,
pub exception_details: Option<ExceptionDetails>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-getProperties>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct GetPropertiesArgs {
pub object_id: RemoteObjectId,
#[serde(skip_serializing_if = "Option::is_none")]
pub own_properties: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub accessor_properties_only: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub generate_preview: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub non_indexed_properties_only: Option<bool>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-getProperties>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GetPropertiesResponse {
pub result: Vec<PropertyDescriptor>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-globalLexicalScopeNames>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct GlobalLexicalScopeNamesArgs {
#[serde(skip_serializing_if = "Option::is_none")]
pub execution_context_id: Option<ExecutionContextId>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-globalLexicalScopeNames>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct GlobalLexicalScopeNamesResponse {
pub names: Vec<String>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-queryObjects>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct QueryObjectsArgs {
pub prototype_object_id: RemoteObjectId,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_group: Option<String>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-releaseObject>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct ReleaseObjectArgs {
pub object_id: RemoteObjectId,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-releaseObjectGroup>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct ReleaseObjectGroupArgs {
pub object_group: String,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-runScript>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct RunScriptArgs {
pub script_id: ScriptId,
#[serde(skip_serializing_if = "Option::is_none")]
pub execution_context_id: Option<ExecutionContextId>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_group: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub silent: Option<bool>,
#[serde(
rename = "includeCommandLineAPI",
skip_serializing_if = "Option::is_none"
)]
pub include_command_line_api: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub return_by_value: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub generate_preview: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub await_promise: Option<bool>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#method-setAsyncCallStackDepth>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct SetAsyncCallStackDepthArgs {
pub max_depth: u64,
}
// types
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-RemoteObject>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RemoteObject {
#[serde(rename = "type")]
pub kind: String,
#[serde(default, deserialize_with = "deserialize_some")]
pub value: Option<Value>,
pub unserializable_value: Option<UnserializableValue>,
pub description: Option<String>,
pub object_id: Option<RemoteObjectId>,
}
// Any value that is present is considered Some value, including null.
// ref: https://github.com/serde-rs/serde/issues/984#issuecomment-314143738
fn deserialize_some<'de, T, D>(deserializer: D) -> Result<Option<T>, D::Error>
where
T: Deserialize<'de>,
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(Some)
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-ExceptionDetails>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExceptionDetails {
pub text: String,
pub exception: Option<RemoteObject>,
}
impl ExceptionDetails {
pub fn get_message_and_description(&self) -> (String, String) {
let description = self
.exception
.clone()
.and_then(|ex| ex.description)
.unwrap_or_else(|| "undefined".to_string());
(self.text.to_string(), description)
}
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-CallArgument>
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CallArgument {
#[serde(skip_serializing_if = "Option::is_none")]
pub value: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub unserializable_value: Option<UnserializableValue>,
#[serde(skip_serializing_if = "Option::is_none")]
pub object_id: Option<RemoteObjectId>,
}
impl From<&RemoteObject> for CallArgument {
fn from(obj: &RemoteObject) -> Self {
Self {
value: obj.value.clone(),
unserializable_value: obj.unserializable_value.clone(),
object_id: obj.object_id.clone(),
}
}
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-PropertyDescriptor>
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PropertyDescriptor {
pub name: String,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-RemoteObjectId>
pub type RemoteObjectId = String;
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-ExecutionContextId>
pub type ExecutionContextId = u64;
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-ScriptId>
pub type ScriptId = String;
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-TimeDelta>
pub type TimeDelta = u64;
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-UnserializableValue>
pub type UnserializableValue = String;
/// <https://chromedevtools.github.io/devtools-protocol/tot/Debugger/#method-setScriptSource>
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SetScriptSourceResponse {
pub status: Status,
pub exception_details: Option<ExceptionDetails>,
}
#[derive(Debug, Deserialize)]
pub enum Status {
Ok,
CompileError,
BlockedByActiveGenerator,
BlockedByActiveFunction,
BlockedByTopLevelEsModuleChange,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Debugger/#event-scriptParsed>
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ScriptParsed {
pub script_id: String,
pub url: String,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Profiler/#type-CoverageRange>
#[derive(Debug, Eq, PartialEq, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct CoverageRange {
/// Start character index.
#[serde(rename = "startOffset")]
pub start_char_offset: usize,
/// End character index.
#[serde(rename = "endOffset")]
pub end_char_offset: usize,
pub count: i64,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Profiler/#type-FunctionCoverage>
#[derive(Debug, Eq, PartialEq, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct FunctionCoverage {
pub function_name: String,
pub ranges: Vec<CoverageRange>,
pub is_block_coverage: bool,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Profiler/#type-ScriptCoverage>
#[derive(Debug, Eq, PartialEq, Serialize, Deserialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ScriptCoverage {
pub script_id: String,
pub url: String,
pub functions: Vec<FunctionCoverage>,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Profiler/#method-startPreciseCoverage>
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct StartPreciseCoverageArgs {
pub call_count: bool,
pub detailed: bool,
pub allow_triggered_updates: bool,
}
// TODO(bartlomieju): in Rust 1.90 some structs started getting flagged as not used
#[allow(dead_code)]
/// <https://chromedevtools.github.io/devtools-protocol/tot/Profiler/#method-startPreciseCoverage>
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct StartPreciseCoverageResponse {
pub timestamp: f64,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Profiler/#method-takePreciseCoverage>
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
#[allow(dead_code)]
pub struct TakePreciseCoverageResponse {
pub result: Vec<ScriptCoverage>,
pub timestamp: f64,
}
#[derive(Debug, Deserialize)]
pub struct Notification {
pub method: String,
pub params: Value,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#event-exceptionThrown>
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExceptionThrown {
pub exception_details: ExceptionDetails,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#event-executionContextCreated>
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExecutionContextCreated {
pub context: ExecutionContextDescription,
}
/// <https://chromedevtools.github.io/devtools-protocol/tot/Runtime/#type-ExecutionContextDescription>
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExecutionContextDescription {
pub id: ExecutionContextId,
pub aux_data: Value,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/build.rs | cli/build.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::env;
use std::io::Write;
use std::path::Path;
use deno_runtime::*;
fn compress_decls(out_dir: &Path) {
let decls = [
"lib.deno_webgpu.d.ts",
"lib.deno.ns.d.ts",
"lib.deno.unstable.d.ts",
"lib.deno.window.d.ts",
"lib.deno.worker.d.ts",
"lib.deno.shared_globals.d.ts",
"lib.deno.unstable.d.ts",
"lib.deno_console.d.ts",
"lib.deno_url.d.ts",
"lib.deno_web.d.ts",
"lib.deno_fetch.d.ts",
"lib.deno_websocket.d.ts",
"lib.deno_webstorage.d.ts",
"lib.deno_canvas.d.ts",
"lib.deno_crypto.d.ts",
"lib.deno_cache.d.ts",
"lib.deno_net.d.ts",
"lib.deno_broadcast_channel.d.ts",
"lib.decorators.d.ts",
"lib.decorators.legacy.d.ts",
"lib.dom.asynciterable.d.ts",
"lib.dom.d.ts",
"lib.dom.extras.d.ts",
"lib.dom.iterable.d.ts",
"lib.es2015.collection.d.ts",
"lib.es2015.core.d.ts",
"lib.es2015.d.ts",
"lib.es2015.generator.d.ts",
"lib.es2015.iterable.d.ts",
"lib.es2015.promise.d.ts",
"lib.es2015.proxy.d.ts",
"lib.es2015.reflect.d.ts",
"lib.es2015.symbol.d.ts",
"lib.es2015.symbol.wellknown.d.ts",
"lib.es2016.array.include.d.ts",
"lib.es2016.d.ts",
"lib.es2016.full.d.ts",
"lib.es2016.intl.d.ts",
"lib.es2017.arraybuffer.d.ts",
"lib.es2017.d.ts",
"lib.es2017.date.d.ts",
"lib.es2017.full.d.ts",
"lib.es2017.intl.d.ts",
"lib.es2017.object.d.ts",
"lib.es2017.sharedmemory.d.ts",
"lib.es2017.string.d.ts",
"lib.es2017.typedarrays.d.ts",
"lib.es2018.asyncgenerator.d.ts",
"lib.es2018.asynciterable.d.ts",
"lib.es2018.d.ts",
"lib.es2018.full.d.ts",
"lib.es2018.intl.d.ts",
"lib.es2018.promise.d.ts",
"lib.es2018.regexp.d.ts",
"lib.es2019.array.d.ts",
"lib.es2019.d.ts",
"lib.es2019.full.d.ts",
"lib.es2019.intl.d.ts",
"lib.es2019.object.d.ts",
"lib.es2019.string.d.ts",
"lib.es2019.symbol.d.ts",
"lib.es2020.bigint.d.ts",
"lib.es2020.d.ts",
"lib.es2020.date.d.ts",
"lib.es2020.full.d.ts",
"lib.es2020.intl.d.ts",
"lib.es2020.number.d.ts",
"lib.es2020.promise.d.ts",
"lib.es2020.sharedmemory.d.ts",
"lib.es2020.string.d.ts",
"lib.es2020.symbol.wellknown.d.ts",
"lib.es2021.d.ts",
"lib.es2021.full.d.ts",
"lib.es2021.intl.d.ts",
"lib.es2021.promise.d.ts",
"lib.es2021.string.d.ts",
"lib.es2021.weakref.d.ts",
"lib.es2022.array.d.ts",
"lib.es2022.d.ts",
"lib.es2022.error.d.ts",
"lib.es2022.full.d.ts",
"lib.es2022.intl.d.ts",
"lib.es2022.object.d.ts",
"lib.es2022.regexp.d.ts",
"lib.es2022.string.d.ts",
"lib.es2023.array.d.ts",
"lib.es2023.collection.d.ts",
"lib.es2023.d.ts",
"lib.es2023.full.d.ts",
"lib.es2023.intl.d.ts",
"lib.es2024.arraybuffer.d.ts",
"lib.es2024.collection.d.ts",
"lib.es2024.d.ts",
"lib.es2024.full.d.ts",
"lib.es2024.object.d.ts",
"lib.es2024.promise.d.ts",
"lib.es2024.regexp.d.ts",
"lib.es2024.sharedmemory.d.ts",
"lib.es2024.string.d.ts",
"lib.es5.d.ts",
"lib.es6.d.ts",
"lib.esnext.array.d.ts",
"lib.esnext.collection.d.ts",
"lib.esnext.d.ts",
"lib.esnext.decorators.d.ts",
"lib.esnext.disposable.d.ts",
"lib.esnext.error.d.ts",
"lib.esnext.float16.d.ts",
"lib.esnext.full.d.ts",
"lib.esnext.intl.d.ts",
"lib.esnext.iterator.d.ts",
"lib.esnext.promise.d.ts",
"lib.esnext.sharedmemory.d.ts",
"lib.node.d.ts",
"lib.scripthost.d.ts",
"lib.webworker.asynciterable.d.ts",
"lib.webworker.d.ts",
"lib.webworker.importscripts.d.ts",
"lib.webworker.iterable.d.ts",
];
for decl in decls {
let file = format!("./tsc/dts/{decl}");
compress_source(out_dir, &file);
}
}
fn process_node_types(out_dir: &Path) {
let root_dir = Path::new(".").canonicalize().unwrap();
let dts_dir = root_dir.join("tsc").join("dts");
let node_dir = dts_dir.join("node");
// Recursively find all .d.ts files in the node directory
fn visit_dirs(dir: &Path, cb: &mut dyn FnMut(&Path)) -> std::io::Result<()> {
for entry in std::fs::read_dir(dir)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
visit_dirs(&path, cb)?;
} else if path.extension().and_then(|s| s.to_str()) == Some("ts")
|| path.extension().and_then(|s| s.to_str()) == Some("cts")
{
cb(&path);
}
}
Ok(())
}
println!("cargo:rerun-if-changed={}", node_dir.display());
let mut paths = Vec::new();
visit_dirs(&node_dir, &mut |path| {
paths.push(path.to_path_buf());
})
.unwrap();
// Sort for deterministic builds
paths.sort();
// Compress all the files if release
if !cfg!(debug_assertions) && std::env::var("CARGO_FEATURE_HMR").is_err() {
for path in &paths {
let relative = path.strip_prefix(&root_dir).unwrap();
compress_source(out_dir, &relative.to_string_lossy());
}
}
// Generate a Rust file with the node type entries (always, for both debug and release)
let mut generated = String::from("// Auto-generated by build.rs\n");
generated.push_str("macro_rules! node_type_libs {\n");
generated.push_str(" () => {\n");
generated.push_str(" [\n");
for path in paths {
let relative = path.strip_prefix(&dts_dir).unwrap();
let relative_str = relative.to_string_lossy().replace('\\', "/");
generated.push_str(&format!(
" maybe_compressed_static_asset!(\"{}\", false),\n",
relative_str
));
}
generated.push_str(" ]\n");
generated.push_str(" };\n");
generated.push_str("}\n");
std::fs::write(out_dir.join("node_types.rs"), generated).unwrap();
}
fn compress_source(out_dir: &Path, file: &str) {
let path = Path::new(file)
.canonicalize()
.unwrap_or_else(|_| panic!("expected file \"{file}\" to exist"));
let contents = std::fs::read(&path).unwrap();
println!("cargo:rerun-if-changed={}", path.display());
let compressed = zstd::bulk::compress(&contents, 19).unwrap();
let mut out = out_dir.join(file.trim_start_matches("../"));
let mut ext = out
.extension()
.map(|s| s.to_string_lossy())
.unwrap_or_default()
.into_owned();
ext.push_str(".zstd");
out.set_extension(ext);
std::fs::create_dir_all(out.parent().unwrap()).unwrap();
let mut file = std::fs::OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(out)
.unwrap();
file
.write_all(&(contents.len() as u32).to_le_bytes())
.unwrap();
file.write_all(&compressed).unwrap();
}
fn compress_sources(out_dir: &Path) {
compress_decls(out_dir);
let ext_sources = [
"./tsc/99_main_compiler.js",
"./tsc/97_ts_host.js",
"./tsc/98_lsp.js",
"./tsc/00_typescript.js",
];
for ext_source in ext_sources {
compress_source(out_dir, ext_source);
}
}
fn main() {
// Skip building from docs.rs.
if env::var_os("DOCS_RS").is_some() {
return;
}
deno_napi::print_linker_flags("deno");
deno_webgpu::print_linker_flags("deno");
// Host snapshots won't work when cross compiling.
let target = env::var("TARGET").unwrap();
let host = env::var("HOST").unwrap();
let skip_cross_check =
env::var("DENO_SKIP_CROSS_BUILD_CHECK").is_ok_and(|v| v == "1");
if !skip_cross_check && target != host {
panic!("Cross compiling with snapshot is not supported.");
}
// To debug snapshot issues uncomment:
// op_fetch_asset::trace_serializer();
let out_dir = std::path::PathBuf::from(std::env::var_os("OUT_DIR").unwrap());
process_node_types(&out_dir);
if !cfg!(debug_assertions) && std::env::var("CARGO_FEATURE_HMR").is_err() {
compress_sources(&out_dir);
}
if let Ok(c) = env::var("DENO_CANARY") {
println!("cargo:rustc-env=DENO_CANARY={c}");
}
println!("cargo:rerun-if-env-changed=DENO_CANARY");
println!("cargo:rustc-env=TARGET={}", env::var("TARGET").unwrap());
println!("cargo:rustc-env=PROFILE={}", env::var("PROFILE").unwrap());
#[cfg(target_os = "windows")]
{
let mut res = winres::WindowsResource::new();
res.set_icon("deno.ico");
res.set_language(winapi::um::winnt::MAKELANGID(
winapi::um::winnt::LANG_ENGLISH,
winapi::um::winnt::SUBLANG_ENGLISH_US,
));
res.compile().unwrap();
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/file_fetcher.rs | cli/file_fetcher.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::Arc;
use deno_ast::MediaType;
use deno_cache_dir::GlobalOrLocalHttpCache;
use deno_cache_dir::file_fetcher::BlobData;
use deno_cache_dir::file_fetcher::CacheSetting;
use deno_cache_dir::file_fetcher::File;
use deno_cache_dir::file_fetcher::SendError;
use deno_cache_dir::file_fetcher::SendResponse;
use deno_core::ModuleSpecifier;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::url::Url;
use deno_resolver::file_fetcher::PermissionedFileFetcherOptions;
use deno_resolver::loader::MemoryFiles;
use deno_runtime::deno_web::BlobStore;
use http::HeaderMap;
use http::StatusCode;
use crate::colors;
use crate::http_util::HttpClientProvider;
use crate::http_util::get_response_body_with_progress;
use crate::sys::CliSys;
use crate::util::progress_bar::ProgressBar;
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct TextDecodedFile {
pub media_type: MediaType,
/// The _final_ specifier for the file. The requested specifier and the final
/// specifier maybe different for remote files that have been redirected.
pub specifier: ModuleSpecifier,
/// The source of the file.
pub source: Arc<str>,
}
impl TextDecodedFile {
/// Decodes the source bytes into a string handling any encoding rules
/// for local vs remote files and dealing with the charset.
pub fn decode(file: File) -> Result<Self, AnyError> {
let (media_type, maybe_charset) =
deno_graph::source::resolve_media_type_and_charset_from_headers(
&file.url,
file.maybe_headers.as_ref(),
);
let specifier = file.url;
let charset = maybe_charset.unwrap_or_else(|| {
deno_media_type::encoding::detect_charset(&specifier, &file.source)
});
match deno_media_type::encoding::decode_arc_source(charset, file.source) {
Ok(source) => Ok(TextDecodedFile {
media_type,
specifier,
source,
}),
Err(err) => {
Err(err).with_context(|| format!("Failed decoding \"{}\".", specifier))
}
}
}
}
pub type CliFileFetcher = deno_resolver::file_fetcher::PermissionedFileFetcher<
BlobStoreAdapter,
CliSys,
HttpClientAdapter,
>;
pub type CliDenoGraphLoader = deno_resolver::file_fetcher::DenoGraphLoader<
BlobStoreAdapter,
CliSys,
HttpClientAdapter,
>;
pub struct CreateCliFileFetcherOptions {
pub allow_remote: bool,
pub cache_setting: CacheSetting,
pub download_log_level: log::Level,
pub progress_bar: Option<ProgressBar>,
}
#[allow(clippy::too_many_arguments)]
pub fn create_cli_file_fetcher(
blob_store: Arc<BlobStore>,
http_cache: GlobalOrLocalHttpCache<CliSys>,
http_client_provider: Arc<HttpClientProvider>,
memory_files: Arc<MemoryFiles>,
sys: CliSys,
options: CreateCliFileFetcherOptions,
) -> CliFileFetcher {
CliFileFetcher::new(
BlobStoreAdapter(blob_store),
Arc::new(http_cache),
HttpClientAdapter {
http_client_provider: http_client_provider.clone(),
download_log_level: options.download_log_level,
progress_bar: options.progress_bar,
},
memory_files,
sys,
PermissionedFileFetcherOptions {
allow_remote: options.allow_remote,
cache_setting: options.cache_setting,
},
)
}
#[derive(Debug)]
pub struct BlobStoreAdapter(Arc<BlobStore>);
#[async_trait::async_trait(?Send)]
impl deno_cache_dir::file_fetcher::BlobStore for BlobStoreAdapter {
async fn get(&self, specifier: &Url) -> std::io::Result<Option<BlobData>> {
let Some(blob) = self.0.get_object_url(specifier.clone()) else {
return Ok(None);
};
Ok(Some(BlobData {
media_type: blob.media_type.clone(),
bytes: blob.read_all().await,
}))
}
}
#[derive(Debug)]
pub struct HttpClientAdapter {
http_client_provider: Arc<HttpClientProvider>,
download_log_level: log::Level,
progress_bar: Option<ProgressBar>,
}
#[async_trait::async_trait(?Send)]
impl deno_cache_dir::file_fetcher::HttpClient for HttpClientAdapter {
async fn send_no_follow(
&self,
url: &Url,
headers: HeaderMap,
) -> Result<SendResponse, SendError> {
async fn handle_request_or_server_error(
retried: &mut bool,
specifier: &Url,
err_str: String,
) -> Result<(), ()> {
// Retry once, and bail otherwise.
if !*retried {
*retried = true;
log::debug!("Import '{}' failed: {}. Retrying...", specifier, err_str);
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
Ok(())
} else {
Err(())
}
}
let mut maybe_progress_guard = None;
if let Some(pb) = self.progress_bar.as_ref() {
maybe_progress_guard = Some(pb.update(url.as_str()));
} else {
log::log!(
self.download_log_level,
"{} {}",
colors::green("Download"),
url
);
}
let mut retried = false; // retry intermittent failures
loop {
let response = match self
.http_client_provider
.get_or_create()
.map_err(|err| SendError::Failed(err.into()))?
.send(url, headers.clone())
.await
{
Ok(response) => response,
Err(crate::http_util::SendError::Send(err)) => {
if err.is_connect_error() {
handle_request_or_server_error(&mut retried, url, err.to_string())
.await
.map_err(|()| SendError::Failed(err.into()))?;
continue;
} else {
return Err(SendError::Failed(err.into()));
}
}
Err(crate::http_util::SendError::InvalidUri(err)) => {
return Err(SendError::Failed(err.into()));
}
};
if response.status() == StatusCode::NOT_MODIFIED {
return Ok(SendResponse::NotModified);
}
if let Some(warning) = response.headers().get("X-Deno-Warning") {
log::warn!(
"{} {}",
crate::colors::yellow("Warning"),
warning.to_str().unwrap()
);
}
if response.status().is_redirection() {
return Ok(SendResponse::Redirect(response.into_parts().0.headers));
}
if response.status().is_server_error() {
handle_request_or_server_error(
&mut retried,
url,
response.status().to_string(),
)
.await
.map_err(|()| SendError::StatusCode(response.status()))?;
} else if response.status().is_client_error() {
let err = if response.status() == StatusCode::NOT_FOUND {
SendError::NotFound
} else {
SendError::StatusCode(response.status())
};
return Err(err);
} else {
let body_result = get_response_body_with_progress(
response,
maybe_progress_guard.as_ref(),
)
.await;
match body_result {
Ok((headers, body)) => {
return Ok(SendResponse::Success(headers, body));
}
Err(err) => {
handle_request_or_server_error(&mut retried, url, err.to_string())
.await
.map_err(|()| SendError::Failed(err.into()))?;
continue;
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
use deno_cache_dir::HttpCache;
use deno_cache_dir::file_fetcher::HttpClient;
use deno_core::resolve_url;
use deno_resolver::file_fetcher::FetchErrorKind;
use deno_resolver::file_fetcher::FetchPermissionsOptionRef;
use deno_resolver::loader::MemoryFilesRc;
use deno_runtime::deno_web::Blob;
use deno_runtime::deno_web::InMemoryBlobPart;
use test_util::TempDir;
use super::*;
use crate::cache::GlobalHttpCache;
use crate::http_util::HttpClientProvider;
fn setup(
cache_setting: CacheSetting,
maybe_temp_dir: Option<TempDir>,
) -> (CliFileFetcher, TempDir) {
let (file_fetcher, temp_dir, _) =
setup_with_blob_store(cache_setting, maybe_temp_dir);
(file_fetcher, temp_dir)
}
fn setup_with_blob_store(
cache_setting: CacheSetting,
maybe_temp_dir: Option<TempDir>,
) -> (CliFileFetcher, TempDir, Arc<BlobStore>) {
let (file_fetcher, temp_dir, blob_store, _) =
setup_with_blob_store_and_cache(cache_setting, maybe_temp_dir);
(file_fetcher, temp_dir, blob_store)
}
fn setup_with_blob_store_and_cache(
cache_setting: CacheSetting,
maybe_temp_dir: Option<TempDir>,
) -> (
CliFileFetcher,
TempDir,
Arc<BlobStore>,
Arc<GlobalHttpCache>,
) {
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let temp_dir = maybe_temp_dir.unwrap_or_default();
let location = temp_dir.path().join("remote").to_path_buf();
let blob_store: Arc<BlobStore> = Default::default();
let cache = Arc::new(GlobalHttpCache::new(CliSys::default(), location));
let file_fetcher = create_cli_file_fetcher(
blob_store.clone(),
GlobalOrLocalHttpCache::Global(cache.clone()),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
(file_fetcher, temp_dir, blob_store, cache)
}
async fn test_fetch(specifier: &ModuleSpecifier) -> (File, CliFileFetcher) {
let (file_fetcher, _) = setup(CacheSetting::ReloadAll, None);
let result = file_fetcher.fetch_bypass_permissions(specifier).await;
assert!(result.is_ok());
(result.unwrap(), file_fetcher)
}
async fn test_fetch_options_remote(
specifier: &ModuleSpecifier,
) -> (File, HashMap<String, String>) {
let _http_server_guard = test_util::http_server();
let (file_fetcher, _, _, http_cache) =
setup_with_blob_store_and_cache(CacheSetting::ReloadAll, None);
let result = file_fetcher
.fetch_with_options_and_max_redirect(
specifier,
FetchPermissionsOptionRef::AllowAll,
Default::default(),
1,
)
.await;
let cache_key = http_cache.cache_item_key(specifier).unwrap();
(
result.unwrap(),
http_cache.read_headers(&cache_key).unwrap().unwrap(),
)
}
// this test used to test how the file fetcher decoded strings, but
// now we're using it as a bit of an integration test with the functionality
// in deno_graph
async fn test_fetch_remote_encoded(
fixture: &str,
expected_charset: &str,
expected: &str,
) {
let url_str = format!("http://127.0.0.1:4545/encoding/{fixture}");
let specifier = resolve_url(&url_str).unwrap();
let (file, headers) = test_fetch_options_remote(&specifier).await;
let (media_type, maybe_charset) =
deno_graph::source::resolve_media_type_and_charset_from_headers(
&specifier,
Some(&headers),
);
assert_eq!(
deno_media_type::encoding::decode_arc_source(
maybe_charset.unwrap_or_else(|| {
deno_media_type::encoding::detect_charset(&specifier, &file.source)
}),
file.source
)
.unwrap()
.as_ref(),
expected
);
assert_eq!(media_type, MediaType::TypeScript);
assert_eq!(
headers.get("content-type").unwrap(),
&format!("application/typescript;charset={expected_charset}")
);
}
async fn test_fetch_local_encoded(charset: &str, expected: String) {
let p = test_util::testdata_path().join(format!("encoding/{charset}.ts"));
let specifier = ModuleSpecifier::from_file_path(p).unwrap();
let (file, _) = test_fetch(&specifier).await;
assert_eq!(
deno_media_type::encoding::decode_arc_source(
deno_media_type::encoding::detect_charset(&specifier, &file.source),
file.source
)
.unwrap()
.as_ref(),
expected
);
}
#[tokio::test]
async fn test_insert_cached() {
let (file_fetcher, temp_dir) = setup(CacheSetting::Use, None);
let local = temp_dir.path().join("a.ts");
let specifier = ModuleSpecifier::from_file_path(&local).unwrap();
let file = File {
source: Arc::from("some source code".as_bytes()),
url: specifier.clone(),
mtime: None,
maybe_headers: Some(HashMap::from([(
"content-type".to_string(),
"application/javascript".to_string(),
)])),
loaded_from: deno_cache_dir::file_fetcher::LoadedFrom::Local,
};
file_fetcher.insert_memory_files(file.clone());
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let result_file = result.unwrap();
assert_eq!(result_file, file);
}
#[tokio::test]
async fn test_fetch_data_url() {
let (file_fetcher, _) = setup(CacheSetting::Use, None);
let specifier = resolve_url("data:application/typescript;base64,ZXhwb3J0IGNvbnN0IGEgPSAiYSI7CgpleHBvcnQgZW51bSBBIHsKICBBLAogIEIsCiAgQywKfQo=").unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(
&*file.source,
"export const a = \"a\";\n\nexport enum A {\n A,\n B,\n C,\n}\n"
);
assert_eq!(file.media_type, MediaType::TypeScript);
assert_eq!(file.specifier, specifier);
}
#[tokio::test]
async fn test_fetch_blob_url() {
let (file_fetcher, _, blob_store) =
setup_with_blob_store(CacheSetting::Use, None);
let bytes =
"export const a = \"a\";\n\nexport enum A {\n A,\n B,\n C,\n}\n"
.as_bytes()
.to_vec();
let specifier = blob_store.insert_object_url(
Blob {
media_type: "application/typescript".to_string(),
parts: vec![Arc::new(InMemoryBlobPart::from(bytes))],
},
None,
);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(
&*file.source,
"export const a = \"a\";\n\nexport enum A {\n A,\n B,\n C,\n}\n"
);
assert_eq!(file.media_type, MediaType::TypeScript);
assert_eq!(file.specifier, specifier);
}
#[tokio::test]
async fn test_fetch_complex() {
let _http_server_guard = test_util::http_server();
let (file_fetcher, temp_dir, _, http_cache) =
setup_with_blob_store_and_cache(CacheSetting::Use, None);
let (file_fetcher_01, _) = setup(CacheSetting::Use, Some(temp_dir.clone()));
let (file_fetcher_02, _, _, http_cache_02) =
setup_with_blob_store_and_cache(
CacheSetting::Use,
Some(temp_dir.clone()),
);
let specifier =
ModuleSpecifier::parse("http://localhost:4545/subdir/mod2.ts").unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(
&*file.source,
"export { printHello } from \"./print_hello.ts\";\n"
);
assert_eq!(file.media_type, MediaType::TypeScript);
let cache_item_key = http_cache.cache_item_key(&specifier).unwrap();
let mut headers = HashMap::new();
headers.insert("content-type".to_string(), "text/javascript".to_string());
http_cache
.set(&specifier, headers.clone(), file.source.as_bytes())
.unwrap();
let result = file_fetcher_01.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(
&*file.source,
"export { printHello } from \"./print_hello.ts\";\n"
);
// This validates that when using the cached value, because we modified
// the value above.
assert_eq!(file.media_type, MediaType::JavaScript);
let headers2 = http_cache_02
.read_headers(&cache_item_key)
.unwrap()
.unwrap();
assert_eq!(headers2.get("content-type").unwrap(), "text/javascript");
headers = HashMap::new();
headers.insert("content-type".to_string(), "application/json".to_string());
http_cache_02
.set(&specifier, headers.clone(), file.source.as_bytes())
.unwrap();
let result = file_fetcher_02.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(
&*file.source,
"export { printHello } from \"./print_hello.ts\";\n"
);
assert_eq!(file.media_type, MediaType::Json);
// This creates a totally new instance, simulating another Deno process
// invocation and indicates to "cache bust".
let location = temp_dir.path().join("remote").to_path_buf();
let file_fetcher = create_cli_file_fetcher(
Default::default(),
Arc::new(GlobalHttpCache::new(CliSys::default(), location)).into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::ReloadAll,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(
&*file.source,
"export { printHello } from \"./print_hello.ts\";\n"
);
assert_eq!(file.media_type, MediaType::TypeScript);
}
#[tokio::test]
async fn test_fetch_uses_cache() {
let _http_server_guard = test_util::http_server();
let temp_dir = TempDir::new();
let location = temp_dir.path().join("remote").to_path_buf();
let specifier =
resolve_url("http://localhost:4545/subdir/mismatch_ext.ts").unwrap();
let http_cache =
Arc::new(GlobalHttpCache::new(CliSys::default(), location.clone()));
let file_modified_01 = {
let file_fetcher = create_cli_file_fetcher(
Default::default(),
http_cache.clone().into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::Use,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let cache_key = http_cache.cache_item_key(&specifier).unwrap();
(
http_cache.read_modified_time(&cache_key).unwrap(),
http_cache.read_headers(&cache_key).unwrap().unwrap(),
http_cache.read_download_time(&cache_key).unwrap().unwrap(),
)
};
let file_modified_02 = {
let file_fetcher = create_cli_file_fetcher(
Default::default(),
Arc::new(GlobalHttpCache::new(CliSys::default(), location)).into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::Use,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let cache_key = http_cache.cache_item_key(&specifier).unwrap();
(
http_cache.read_modified_time(&cache_key).unwrap(),
http_cache.read_headers(&cache_key).unwrap().unwrap(),
http_cache.read_download_time(&cache_key).unwrap().unwrap(),
)
};
assert_eq!(file_modified_01, file_modified_02);
}
#[tokio::test]
async fn test_fetch_redirected() {
let _http_server_guard = test_util::http_server();
let (file_fetcher, _, _, http_cache) =
setup_with_blob_store_and_cache(CacheSetting::Use, None);
let specifier =
resolve_url("http://localhost:4546/subdir/redirects/redirect1.js")
.unwrap();
let redirected_specifier =
resolve_url("http://localhost:4545/subdir/redirects/redirect1.js")
.unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = result.unwrap();
assert_eq!(file.url, redirected_specifier);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &specifier),
"",
"redirected files should have empty cached contents"
);
assert_eq!(
get_location_header_from_cache(http_cache.as_ref(), &specifier),
Some("http://localhost:4545/subdir/redirects/redirect1.js".to_string()),
);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &redirected_specifier),
"export const redirect = 1;\n"
);
assert_eq!(
get_location_header_from_cache(
http_cache.as_ref(),
&redirected_specifier
),
None,
);
}
#[tokio::test]
async fn test_fetch_multiple_redirects() {
let _http_server_guard = test_util::http_server();
let (file_fetcher, _, _, http_cache) =
setup_with_blob_store_and_cache(CacheSetting::Use, None);
let specifier =
resolve_url("http://localhost:4548/subdir/redirects/redirect1.js")
.unwrap();
let redirected_01_specifier =
resolve_url("http://localhost:4546/subdir/redirects/redirect1.js")
.unwrap();
let redirected_02_specifier =
resolve_url("http://localhost:4545/subdir/redirects/redirect1.js")
.unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = result.unwrap();
assert_eq!(file.url, redirected_02_specifier);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &specifier),
"",
"redirected files should have empty cached contents"
);
assert_eq!(
get_location_header_from_cache(http_cache.as_ref(), &specifier),
Some("http://localhost:4546/subdir/redirects/redirect1.js".to_string()),
);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &redirected_01_specifier),
"",
"redirected files should have empty cached contents"
);
assert_eq!(
get_location_header_from_cache(
http_cache.as_ref(),
&redirected_01_specifier
),
Some("http://localhost:4545/subdir/redirects/redirect1.js".to_string()),
);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &redirected_02_specifier),
"export const redirect = 1;\n"
);
assert_eq!(
get_location_header_from_cache(
http_cache.as_ref(),
&redirected_02_specifier
),
None,
);
}
#[tokio::test]
async fn test_fetch_uses_cache_with_redirects() {
let _http_server_guard = test_util::http_server();
let temp_dir = TempDir::new();
let location = temp_dir.path().join("remote").to_path_buf();
let specifier =
resolve_url("http://localhost:4548/subdir/mismatch_ext.ts").unwrap();
let redirected_specifier =
resolve_url("http://localhost:4546/subdir/mismatch_ext.ts").unwrap();
let http_cache =
Arc::new(GlobalHttpCache::new(CliSys::default(), location.clone()));
let metadata_file_modified_01 = {
let file_fetcher = create_cli_file_fetcher(
Default::default(),
http_cache.clone().into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::Use,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let cache_key = http_cache.cache_item_key(&redirected_specifier).unwrap();
(
http_cache.read_modified_time(&cache_key).unwrap(),
http_cache.read_headers(&cache_key).unwrap().unwrap(),
http_cache.read_download_time(&cache_key).unwrap().unwrap(),
)
};
let metadata_file_modified_02 = {
let file_fetcher = create_cli_file_fetcher(
Default::default(),
http_cache.clone().into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::Use,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let result = file_fetcher
.fetch_bypass_permissions(&redirected_specifier)
.await;
assert!(result.is_ok());
let cache_key = http_cache.cache_item_key(&redirected_specifier).unwrap();
(
http_cache.read_modified_time(&cache_key).unwrap(),
http_cache.read_headers(&cache_key).unwrap().unwrap(),
http_cache.read_download_time(&cache_key).unwrap().unwrap(),
)
};
assert_eq!(metadata_file_modified_01, metadata_file_modified_02);
}
#[tokio::test]
async fn test_fetcher_limits_redirects() {
let _http_server_guard = test_util::http_server();
let (file_fetcher, _) = setup(CacheSetting::Use, None);
let specifier =
resolve_url("http://localhost:4548/subdir/redirects/redirect1.js")
.unwrap();
let result = file_fetcher
.fetch_with_options_and_max_redirect(
&specifier,
FetchPermissionsOptionRef::AllowAll,
Default::default(),
2,
)
.await;
assert!(result.is_ok());
let result = file_fetcher
.fetch_with_options_and_max_redirect(
&specifier,
FetchPermissionsOptionRef::AllowAll,
Default::default(),
1,
)
.await;
assert!(result.is_err());
let result = file_fetcher.fetch_cached_remote(&specifier, 2);
assert!(result.is_ok());
let result = file_fetcher.fetch_cached_remote(&specifier, 1);
assert!(result.is_err());
}
#[tokio::test]
async fn test_fetch_same_host_redirect() {
let _http_server_guard = test_util::http_server();
let (file_fetcher, _, _, http_cache) =
setup_with_blob_store_and_cache(CacheSetting::Use, None);
let specifier = resolve_url(
"http://localhost:4550/REDIRECT/subdir/redirects/redirect1.js",
)
.unwrap();
let redirected_specifier =
resolve_url("http://localhost:4550/subdir/redirects/redirect1.js")
.unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = result.unwrap();
assert_eq!(file.url, redirected_specifier);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &specifier),
"",
"redirected files should have empty cached contents"
);
assert_eq!(
get_location_header_from_cache(http_cache.as_ref(), &specifier),
Some("/subdir/redirects/redirect1.js".to_string()),
);
assert_eq!(
get_text_from_cache(http_cache.as_ref(), &redirected_specifier),
"export const redirect = 1;\n"
);
assert_eq!(
get_location_header_from_cache(
http_cache.as_ref(),
&redirected_specifier
),
None
);
}
#[tokio::test]
async fn test_fetch_no_remote() {
let _http_server_guard = test_util::http_server();
let temp_dir = TempDir::new();
let location = temp_dir.path().join("remote").to_path_buf();
let file_fetcher = create_cli_file_fetcher(
Default::default(),
Arc::new(GlobalHttpCache::new(CliSys::default(), location)).into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: false,
cache_setting: CacheSetting::Use,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let specifier =
resolve_url("http://localhost:4545/run/002_hello.ts").unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_err());
let err = result.unwrap_err();
let err = match err.into_kind() {
FetchErrorKind::FetchNoFollow(err) => err,
FetchErrorKind::TooManyRedirects(_) => unreachable!(),
};
let err = match err.into_kind() {
deno_resolver::file_fetcher::FetchNoFollowErrorKind::FetchNoFollow(
err,
) => err,
_ => unreachable!(),
};
let err = err.into_kind();
match &err {
deno_cache_dir::file_fetcher::FetchNoFollowErrorKind::NoRemote {
..
} => {
assert_eq!(
err.to_string(),
"A remote specifier was requested: \"http://localhost:4545/run/002_hello.ts\", but --no-remote is specified."
);
}
_ => unreachable!(),
}
}
#[tokio::test]
async fn test_fetch_cache_only() {
let _http_server_guard = test_util::http_server();
let temp_dir = TempDir::new();
let location = temp_dir.path().join("remote").to_path_buf();
let file_fetcher_01 = create_cli_file_fetcher(
Default::default(),
Arc::new(GlobalHttpCache::new(CliSys::default(), location.clone()))
.into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::Only,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let file_fetcher_02 = create_cli_file_fetcher(
Default::default(),
Arc::new(GlobalHttpCache::new(CliSys::default(), location)).into(),
Arc::new(HttpClientProvider::new(None, None)),
MemoryFilesRc::default(),
CliSys::default(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::Use,
download_log_level: log::Level::Info,
progress_bar: None,
},
);
let specifier =
resolve_url("http://localhost:4545/run/002_hello.ts").unwrap();
let result = file_fetcher_01.fetch_bypass_permissions(&specifier).await;
let err = match result.unwrap_err().into_kind() {
FetchErrorKind::FetchNoFollow(err) => err,
FetchErrorKind::TooManyRedirects(_) => unreachable!(),
};
let err = match err.into_kind() {
deno_resolver::file_fetcher::FetchNoFollowErrorKind::FetchNoFollow(
err,
) => err,
_ => unreachable!(),
};
let err = err.into_kind();
match &err {
deno_cache_dir::file_fetcher::FetchNoFollowErrorKind::NotCached {
..
} => {
assert_eq!(
err.to_string(),
"Specifier not found in cache: \"http://localhost:4545/run/002_hello.ts\", --cached-only is specified."
);
}
_ => unreachable!(),
}
let result = file_fetcher_02.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let result = file_fetcher_01.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_fetch_local_bypasses_file_cache() {
let (file_fetcher, temp_dir) = setup(CacheSetting::Use, None);
let fixture_path = temp_dir.path().join("mod.ts");
let specifier = ModuleSpecifier::from_file_path(&fixture_path).unwrap();
fixture_path.write(r#"console.log("hello deno");"#);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(&*file.source, r#"console.log("hello deno");"#);
fixture_path.write(r#"console.log("goodbye deno");"#);
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = TextDecodedFile::decode(result.unwrap()).unwrap();
assert_eq!(&*file.source, r#"console.log("goodbye deno");"#);
}
#[tokio::test]
async fn test_respect_cache_revalidates() {
let _g = test_util::http_server();
let temp_dir = TempDir::new();
let (file_fetcher, _) =
setup(CacheSetting::RespectHeaders, Some(temp_dir.clone()));
let specifier =
ModuleSpecifier::parse("http://localhost:4545/dynamic").unwrap();
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = result.unwrap();
let first = file.source;
let (file_fetcher, _) =
setup(CacheSetting::RespectHeaders, Some(temp_dir.clone()));
let result = file_fetcher.fetch_bypass_permissions(&specifier).await;
assert!(result.is_ok());
let file = result.unwrap();
let second = file.source;
assert_ne!(first, second);
}
#[tokio::test]
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/worker.rs | cli/worker.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use deno_ast::ModuleSpecifier;
use deno_core::Extension;
use deno_core::OpState;
use deno_core::error::CoreError;
use deno_core::error::JsError;
use deno_core::futures::FutureExt;
use deno_core::v8;
use deno_error::JsErrorBox;
use deno_lib::worker::LibMainWorker;
use deno_lib::worker::LibMainWorkerFactory;
use deno_lib::worker::ResolveNpmBinaryEntrypointError;
use deno_npm_installer::PackageCaching;
use deno_npm_installer::graph::NpmCachingStrategy;
use deno_runtime::WorkerExecutionMode;
use deno_runtime::coverage::CoverageCollector;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_runtime::worker::MainWorker;
use deno_semver::npm::NpmPackageReqReference;
use sys_traits::EnvCurrentDir;
use tokio::select;
use crate::args::CliLockfile;
use crate::npm::CliNpmInstaller;
use crate::npm::CliNpmResolver;
use crate::sys::CliSys;
use crate::tools::run::hmr::HmrRunner;
use crate::tools::run::hmr::HmrRunnerState;
use crate::util::file_watcher::WatcherCommunicator;
use crate::util::file_watcher::WatcherRestartMode;
use crate::util::progress_bar::ProgressBar;
pub type CreateHmrRunnerCb = Box<dyn Fn() -> HmrRunnerState + Send + Sync>;
pub struct CliMainWorkerOptions {
pub create_hmr_runner: Option<CreateHmrRunnerCb>,
pub maybe_coverage_dir: Option<PathBuf>,
pub default_npm_caching_strategy: NpmCachingStrategy,
pub needs_test_modules: bool,
pub maybe_initial_cwd: Option<Arc<ModuleSpecifier>>,
}
/// Data shared between the factory and workers.
struct SharedState {
pub create_hmr_runner: Option<CreateHmrRunnerCb>,
pub maybe_coverage_dir: Option<PathBuf>,
pub maybe_file_watcher_communicator: Option<Arc<WatcherCommunicator>>,
pub maybe_initial_cwd: Option<Arc<ModuleSpecifier>>,
}
pub struct CliMainWorker {
worker: LibMainWorker,
shared: Arc<SharedState>,
}
impl CliMainWorker {
#[inline]
pub fn into_main_worker(self) -> MainWorker {
self.worker.into_main_worker()
}
pub async fn setup_repl(&mut self) -> Result<(), CoreError> {
self.worker.run_event_loop(false).await?;
Ok(())
}
pub async fn run(&mut self) -> Result<i32, CoreError> {
let mut maybe_coverage_collector = self.maybe_setup_coverage_collector();
let mut maybe_hmr_runner = self.maybe_setup_hmr_runner();
// WARNING: Remember to update cli/lib/worker.rs to align with
// changes made here so that they affect deno_compile as well.
log::debug!("main_module {}", self.worker.main_module());
// Run preload modules first if they were defined
self.worker.execute_preload_modules().await?;
self.execute_main_module().await?;
self.worker.dispatch_load_event()?;
loop {
if let Some(hmr_runner) = maybe_hmr_runner.as_mut() {
let hmr_future = hmr_runner.run().boxed_local();
let event_loop_future = self.worker.run_event_loop(false).boxed_local();
let result;
select! {
hmr_result = hmr_future => {
result = hmr_result;
},
event_loop_result = event_loop_future => {
result = event_loop_result;
}
}
if let Err(e) = result {
self
.shared
.maybe_file_watcher_communicator
.as_ref()
.unwrap()
.change_restart_mode(WatcherRestartMode::Automatic);
return Err(e);
}
} else {
// TODO(bartlomieju): this might not be needed anymore
self
.worker
.run_event_loop(maybe_coverage_collector.is_none())
.await?;
}
let web_continue = self.worker.dispatch_beforeunload_event()?;
if !web_continue {
let node_continue = self.worker.dispatch_process_beforeexit_event()?;
if !node_continue {
break;
}
}
}
self.worker.dispatch_unload_event()?;
self.worker.dispatch_process_exit_event()?;
if let Some(coverage_collector) = maybe_coverage_collector.as_mut() {
coverage_collector.stop_collecting()?;
}
if let Some(hmr_runner) = maybe_hmr_runner.as_mut() {
hmr_runner.stop();
}
Ok(self.worker.exit_code())
}
pub async fn run_for_watcher(self) -> Result<(), CoreError> {
/// The FileWatcherModuleExecutor provides module execution with safe dispatching of life-cycle events by tracking the
/// state of any pending events and emitting accordingly on drop in the case of a future
/// cancellation.
struct FileWatcherModuleExecutor {
inner: CliMainWorker,
pending_unload: bool,
}
impl FileWatcherModuleExecutor {
pub fn new(worker: CliMainWorker) -> FileWatcherModuleExecutor {
FileWatcherModuleExecutor {
inner: worker,
pending_unload: false,
}
}
/// Execute the given main module emitting load and unload events before and after execution
/// respectively.
pub async fn execute(&mut self) -> Result<(), CoreError> {
self.inner.execute_main_module().await?;
self.inner.worker.dispatch_load_event()?;
self.pending_unload = true;
let result = loop {
match self.inner.worker.run_event_loop(false).await {
Ok(()) => {}
Err(error) => break Err(error),
}
let web_continue = self.inner.worker.dispatch_beforeunload_event()?;
if !web_continue {
let node_continue =
self.inner.worker.dispatch_process_beforeexit_event()?;
if !node_continue {
break Ok(());
}
}
};
self.pending_unload = false;
result?;
self.inner.worker.dispatch_unload_event()?;
self.inner.worker.dispatch_process_exit_event()?;
Ok(())
}
}
impl Drop for FileWatcherModuleExecutor {
fn drop(&mut self) {
if self.pending_unload {
let _ = self.inner.worker.dispatch_unload_event();
}
}
}
let mut executor = FileWatcherModuleExecutor::new(self);
executor.execute().await
}
#[inline]
pub async fn execute_main_module(&mut self) -> Result<(), CoreError> {
self.worker.execute_main_module().await
}
#[inline]
pub async fn execute_side_module(&mut self) -> Result<(), CoreError> {
self.worker.execute_side_module().await
}
#[inline]
pub async fn execute_preload_modules(&mut self) -> Result<(), CoreError> {
self.worker.execute_preload_modules().await
}
pub fn op_state(&mut self) -> Rc<RefCell<OpState>> {
self.worker.js_runtime().op_state()
}
pub fn maybe_setup_hmr_runner(&mut self) -> Option<HmrRunner> {
let setup_hmr_runner = self.shared.create_hmr_runner.as_ref()?;
let hmr_runner_state = setup_hmr_runner();
let state = hmr_runner_state.clone();
let callback = Box::new(move |message| hmr_runner_state.callback(message));
let session = self.worker.create_inspector_session(callback);
let mut hmr_runner = HmrRunner::new(state, session);
hmr_runner.start();
Some(hmr_runner)
}
pub fn maybe_setup_coverage_collector(
&mut self,
) -> Option<CoverageCollector> {
let coverage_dir = self.shared.maybe_coverage_dir.as_ref()?;
let mut coverage_collector =
CoverageCollector::new(self.worker.js_runtime(), coverage_dir.clone());
coverage_collector.start_collecting();
Some(coverage_collector)
}
pub fn execute_script_static(
&mut self,
name: &'static str,
source_code: &'static str,
) -> Result<v8::Global<v8::Value>, Box<JsError>> {
self.worker.js_runtime().execute_script(name, source_code)
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CreateCustomWorkerError {
#[class(inherit)]
#[error(transparent)]
Io(#[from] std::io::Error),
#[class(inherit)]
#[error(transparent)]
Core(#[from] CoreError),
#[class(inherit)]
#[error(transparent)]
ResolvePkgFolderFromDenoReq(
#[from] deno_resolver::npm::ResolvePkgFolderFromDenoReqError,
),
#[class(inherit)]
#[error(transparent)]
UrlParse(#[from] deno_core::url::ParseError),
#[class(inherit)]
#[error(transparent)]
ResolveNpmBinaryEntrypoint(#[from] ResolveNpmBinaryEntrypointError),
#[class(inherit)]
#[error(transparent)]
NpmPackageReq(JsErrorBox),
#[class(inherit)]
#[error(transparent)]
LockfileWrite(#[from] deno_resolver::lockfile::LockfileWriteError),
}
pub struct CliMainWorkerFactory {
lib_main_worker_factory: LibMainWorkerFactory<CliSys>,
maybe_lockfile: Option<Arc<CliLockfile>>,
npm_installer: Option<Arc<CliNpmInstaller>>,
npm_resolver: CliNpmResolver,
progress_bar: ProgressBar,
root_permissions: PermissionsContainer,
shared: Arc<SharedState>,
sys: CliSys,
default_npm_caching_strategy: NpmCachingStrategy,
needs_test_modules: bool,
}
impl CliMainWorkerFactory {
#[allow(clippy::too_many_arguments)]
pub fn new(
lib_main_worker_factory: LibMainWorkerFactory<CliSys>,
maybe_file_watcher_communicator: Option<Arc<WatcherCommunicator>>,
maybe_lockfile: Option<Arc<CliLockfile>>,
npm_installer: Option<Arc<CliNpmInstaller>>,
npm_resolver: CliNpmResolver,
progress_bar: ProgressBar,
sys: CliSys,
options: CliMainWorkerOptions,
root_permissions: PermissionsContainer,
) -> Self {
Self {
lib_main_worker_factory,
maybe_lockfile,
npm_installer,
npm_resolver,
progress_bar,
root_permissions,
sys,
shared: Arc::new(SharedState {
create_hmr_runner: options.create_hmr_runner,
maybe_coverage_dir: options.maybe_coverage_dir,
maybe_file_watcher_communicator,
maybe_initial_cwd: options.maybe_initial_cwd,
}),
default_npm_caching_strategy: options.default_npm_caching_strategy,
needs_test_modules: options.needs_test_modules,
}
}
pub async fn create_main_worker(
&self,
mode: WorkerExecutionMode,
main_module: ModuleSpecifier,
preload_modules: Vec<ModuleSpecifier>,
require_modules: Vec<ModuleSpecifier>,
) -> Result<CliMainWorker, CreateCustomWorkerError> {
self
.create_custom_worker(
mode,
main_module,
preload_modules,
require_modules,
self.root_permissions.clone(),
vec![],
Default::default(),
None,
)
.await
}
pub async fn create_main_worker_with_unconfigured_runtime(
&self,
mode: WorkerExecutionMode,
main_module: ModuleSpecifier,
preload_modules: Vec<ModuleSpecifier>,
require_modules: Vec<ModuleSpecifier>,
unconfigured_runtime: Option<deno_runtime::UnconfiguredRuntime>,
) -> Result<CliMainWorker, CreateCustomWorkerError> {
self
.create_custom_worker(
mode,
main_module,
preload_modules,
require_modules,
self.root_permissions.clone(),
vec![],
Default::default(),
unconfigured_runtime,
)
.await
}
#[allow(clippy::too_many_arguments)]
pub async fn create_custom_worker(
&self,
mode: WorkerExecutionMode,
main_module: ModuleSpecifier,
preload_modules: Vec<ModuleSpecifier>,
require_modules: Vec<ModuleSpecifier>,
permissions: PermissionsContainer,
custom_extensions: Vec<Extension>,
stdio: deno_runtime::deno_io::Stdio,
unconfigured_runtime: Option<deno_runtime::UnconfiguredRuntime>,
) -> Result<CliMainWorker, CreateCustomWorkerError> {
let main_module = match NpmPackageReqReference::from_specifier(&main_module)
{
Ok(package_ref) => {
if let Some(npm_installer) = &self.npm_installer {
let _clear_guard = self.progress_bar.deferred_keep_initialize_alive();
let reqs = &[package_ref.req().clone()];
npm_installer
.add_package_reqs(
reqs,
if matches!(
self.default_npm_caching_strategy,
NpmCachingStrategy::Lazy
) {
PackageCaching::Only(reqs.into())
} else {
PackageCaching::All
},
)
.await
.map_err(CreateCustomWorkerError::NpmPackageReq)?;
}
// use a fake referrer that can be used to discover the package.json if necessary
let referrer =
ModuleSpecifier::from_directory_path(self.sys.env_current_dir()?)
.unwrap()
.join("package.json")?;
let package_folder =
self.npm_resolver.resolve_pkg_folder_from_deno_module_req(
package_ref.req(),
&referrer,
)?;
let main_module =
self.lib_main_worker_factory.resolve_npm_binary_entrypoint(
&package_folder,
package_ref.sub_path(),
)?;
if let Some(lockfile) = &self.maybe_lockfile {
// For npm binary commands, ensure that the lockfile gets updated
// so that we can re-use the npm resolution the next time it runs
// for better performance
lockfile.write_if_changed()?;
}
main_module
}
_ => main_module,
};
let mut worker = self.lib_main_worker_factory.create_custom_worker(
mode,
main_module,
preload_modules,
require_modules,
permissions,
custom_extensions,
stdio,
unconfigured_runtime,
)?;
if self.needs_test_modules {
macro_rules! test_file {
($($file:literal),*) => {
$(worker.js_runtime().lazy_load_es_module_with_code(
concat!("ext:cli/", $file),
deno_core::ascii_str_include!(concat!("js/", $file)),
)?;)*
}
}
test_file!(
"40_test_common.js",
"40_test.js",
"40_bench.js",
"40_jupyter.js",
// TODO(bartlomieju): probably shouldn't include these files here?
"40_lint_selector.js",
"40_lint.js"
);
}
if let Some(initial_cwd) = &self.shared.maybe_initial_cwd {
let op_state = worker.js_runtime().op_state();
op_state
.borrow_mut()
.put(deno_core::error::InitialCwd(initial_cwd.clone()));
}
Ok(CliMainWorker {
worker,
shared: self.shared.clone(),
})
}
}
#[allow(clippy::print_stdout)]
#[allow(clippy::print_stderr)]
#[cfg(test)]
mod tests {
use std::rc::Rc;
use deno_core::FsModuleLoader;
use deno_core::resolve_path;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_runtime::deno_fs::RealFs;
use deno_runtime::deno_permissions::Permissions;
use deno_runtime::permissions::RuntimePermissionDescriptorParser;
use deno_runtime::worker::WorkerOptions;
use deno_runtime::worker::WorkerServiceOptions;
use super::*;
fn create_test_worker() -> MainWorker {
let main_module =
resolve_path("./hello.js", &std::env::current_dir().unwrap()).unwrap();
let fs = Arc::new(RealFs);
let permission_desc_parser = Arc::new(
RuntimePermissionDescriptorParser::new(crate::sys::CliSys::default()),
);
let options = WorkerOptions {
startup_snapshot: deno_snapshots::CLI_SNAPSHOT,
..Default::default()
};
MainWorker::bootstrap_from_options::<
DenoInNpmPackageChecker,
CliNpmResolver,
CliSys,
>(
&main_module,
WorkerServiceOptions {
deno_rt_native_addon_loader: None,
module_loader: Rc::new(FsModuleLoader),
permissions: PermissionsContainer::new(
permission_desc_parser,
Permissions::none_without_prompt(),
),
blob_store: Default::default(),
broadcast_channel: Default::default(),
feature_checker: Default::default(),
node_services: Default::default(),
npm_process_state_provider: Default::default(),
root_cert_store_provider: Default::default(),
fetch_dns_resolver: Default::default(),
shared_array_buffer_store: Default::default(),
compiled_wasm_module_store: Default::default(),
v8_code_cache: Default::default(),
fs,
bundle_provider: None,
},
options,
)
}
#[tokio::test]
async fn execute_mod_esm_imports_a() {
let p = test_util::testdata_path().join("runtime/esm_imports_a.js");
let module_specifier = ModuleSpecifier::from_file_path(&p).unwrap();
let mut worker = create_test_worker();
let result = worker.execute_main_module(&module_specifier).await;
if let Err(err) = result {
eprintln!("execute_mod err {err:?}");
}
if let Err(e) = worker.run_event_loop(false).await {
panic!("Future got unexpected error: {e:?}");
}
}
#[tokio::test]
async fn execute_mod_circular() {
let p = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.unwrap()
.join("tests/circular1.js");
let module_specifier = ModuleSpecifier::from_file_path(&p).unwrap();
let mut worker = create_test_worker();
let result = worker.execute_main_module(&module_specifier).await;
if let Err(err) = result {
eprintln!("execute_mod err {err:?}");
}
if let Err(e) = worker.run_event_loop(false).await {
panic!("Future got unexpected error: {e:?}");
}
}
#[tokio::test]
async fn execute_mod_resolve_error() {
// "foo" is not a valid module specifier so this should return an error.
let mut worker = create_test_worker();
let module_specifier =
resolve_path("./does-not-exist", &std::env::current_dir().unwrap())
.unwrap();
let result = worker.execute_main_module(&module_specifier).await;
assert!(result.is_err());
}
#[tokio::test]
async fn execute_mod_002_hello() {
// This assumes cwd is project root (an assumption made throughout the
// tests).
let mut worker = create_test_worker();
let p = test_util::testdata_path().join("run/001_hello.js");
let module_specifier = ModuleSpecifier::from_file_path(&p).unwrap();
let result = worker.execute_main_module(&module_specifier).await;
assert!(result.is_ok());
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/registry.rs | cli/registry.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_runtime::deno_fetch;
use serde::de::DeserializeOwned;
use crate::http_util;
use crate::http_util::HttpClient;
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateAuthorizationResponse {
pub verification_url: String,
pub code: String,
pub exchange_token: String,
pub poll_interval: u64,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ExchangeAuthorizationResponse {
pub token: String,
pub user: User,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct User {
pub name: String,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct OidcTokenResponse {
pub value: String,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PublishingTaskError {
#[allow(dead_code)]
pub code: String,
pub message: String,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct PublishingTask {
pub id: String,
pub status: String,
pub error: Option<PublishingTaskError>,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Package {
pub latest_version: Option<String>,
}
#[derive(serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct ApiError {
pub code: String,
pub message: String,
#[serde(flatten)]
pub data: serde_json::Value,
#[serde(skip)]
pub x_deno_ray: Option<String>,
}
impl std::fmt::Display for ApiError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{} ({})", self.message, self.code)?;
if let Some(x_deno_ray) = &self.x_deno_ray {
write!(f, "[x-deno-ray: {}]", x_deno_ray)?;
}
Ok(())
}
}
impl std::fmt::Debug for ApiError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for ApiError {}
pub async fn parse_response<T: DeserializeOwned>(
response: http::Response<deno_fetch::ResBody>,
) -> Result<T, ApiError> {
let status = response.status();
let x_deno_ray = response
.headers()
.get("x-deno-ray")
.and_then(|value| value.to_str().ok())
.map(|s| s.to_string());
let text = http_util::body_to_string(response).await.unwrap();
if !status.is_success() {
match serde_json::from_str::<ApiError>(&text) {
Ok(mut err) => {
err.x_deno_ray = x_deno_ray;
return Err(err);
}
Err(_) => {
let err = ApiError {
code: "unknown".to_string(),
message: format!("{}: {}", status, text),
x_deno_ray,
data: serde_json::json!({}),
};
return Err(err);
}
}
}
serde_json::from_str(&text).map_err(|err| ApiError {
code: "unknown".to_string(),
message: format!("Failed to parse response: {}, response: '{}'", err, text),
x_deno_ray,
data: serde_json::json!({}),
})
}
pub fn get_package_api_url(
registry_api_url: &Url,
scope: &str,
package: &str,
) -> String {
format!("{}scopes/{}/packages/{}", registry_api_url, scope, package)
}
pub async fn get_package(
client: &HttpClient,
registry_api_url: &Url,
scope: &str,
package: &str,
) -> Result<http::Response<deno_fetch::ResBody>, AnyError> {
let package_url = get_package_api_url(registry_api_url, scope, package);
let response = client.get(package_url.parse()?)?.send().await?;
Ok(response)
}
pub fn get_jsr_alternative(imported: &Url) -> Option<String> {
if matches!(imported.host_str(), Some("esm.sh")) {
let mut segments = imported.path_segments()?;
match segments.next()? {
"gh" => None,
module => Some(format!("\"npm:{module}\"")),
}
} else if imported.as_str().starts_with("https://deno.land/") {
let mut segments = imported.path_segments()?;
let maybe_std = segments.next()?;
if maybe_std != "std" && !maybe_std.starts_with("std@") {
return None;
}
let module = segments.next()?;
let export = segments
.next()
.filter(|s| *s != "mod.ts")
.map(|s| s.strip_suffix(".ts").unwrap_or(s).replace("_", "-"));
Some(format!(
"\"jsr:@std/{}@1{}\"",
module,
export.map(|s| format!("/{}", s)).unwrap_or_default()
))
} else {
None
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_jsr_alternative() {
#[track_caller]
fn run_test(imported: &str, output: Option<&str>) {
let imported = Url::parse(imported).unwrap();
let output = output.map(|s| s.to_string());
assert_eq!(get_jsr_alternative(&imported), output);
}
run_test("https://esm.sh/ts-morph", Some("\"npm:ts-morph\""));
run_test(
"https://deno.land/std/path/mod.ts",
Some("\"jsr:@std/path@1\""),
);
run_test(
"https://deno.land/std/path/join.ts",
Some("\"jsr:@std/path@1/join\""),
);
run_test(
"https://deno.land/std@0.229.0/path/join.ts",
Some("\"jsr:@std/path@1/join\""),
);
run_test(
"https://deno.land/std@0.229.0/path/something_underscore.ts",
Some("\"jsr:@std/path@1/something-underscore\""),
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/integration_tests_runner.rs | cli/integration_tests_runner.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub fn main() {
// this file exists to cause the executable to be built when running cargo test
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/type_checker.rs | cli/type_checker.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_config::deno_json::CompilerOptionTypesDeserializeError;
use deno_core::url::Url;
use deno_error::JsErrorBox;
use deno_graph::Module;
use deno_graph::ModuleGraph;
use deno_lib::util::hash::FastInsecureHasher;
use deno_resolver::deno_json::CompilerOptionsData;
use deno_resolver::deno_json::CompilerOptionsParseError;
use deno_resolver::deno_json::CompilerOptionsResolver;
use deno_resolver::deno_json::JsxImportSourceConfigResolver;
use deno_resolver::deno_json::ToMaybeJsxImportSourceConfigError;
use deno_resolver::graph::maybe_additional_sloppy_imports_message;
use deno_semver::npm::NpmPackageNvReference;
use deno_terminal::colors;
use indexmap::IndexMap;
use once_cell::sync::Lazy;
use regex::Regex;
use crate::args::CliOptions;
use crate::args::CompilerOptions;
use crate::args::DenoSubcommand;
use crate::args::TsTypeLib;
use crate::args::TypeCheckMode;
use crate::cache::CacheDBHash;
use crate::cache::Caches;
use crate::cache::TypeCheckCache;
use crate::graph_util::BuildFastCheckGraphOptions;
use crate::graph_util::ModuleGraphBuilder;
use crate::graph_util::module_error_for_tsc_diagnostic;
use crate::node::CliNodeResolver;
use crate::node::CliPackageJsonResolver;
use crate::npm::CliNpmResolver;
use crate::sys::CliSys;
use crate::tsc;
use crate::tsc::Diagnostics;
use crate::tsc::TypeCheckingCjsTracker;
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class(type)]
#[error("Type checking failed.{}", if self.can_skip {
color_print::cstr!(
"\n\n <y>info:</y> The program failed type-checking, but it still might work correctly.\n <c>hint:</c> Re-run with <u>--no-check</u> to skip type-checking.",
)
} else {
""
})]
pub struct FailedTypeCheckingError {
can_skip: bool,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum CheckError {
#[class(inherit)]
#[error(transparent)]
FailedTypeChecking(#[from] FailedTypeCheckingError),
#[class(inherit)]
#[error(transparent)]
ToMaybeJsxImportSourceConfig(#[from] ToMaybeJsxImportSourceConfigError),
#[class(inherit)]
#[error(transparent)]
TscExec(#[from] tsc::ExecError),
#[class(inherit)]
#[error(transparent)]
CompilerOptionTypesDeserialize(#[from] CompilerOptionTypesDeserializeError),
#[class(inherit)]
#[error(transparent)]
CompilerOptionsParse(#[from] CompilerOptionsParseError),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
}
/// Options for performing a check of a module graph. Note that the decision to
/// emit or not is determined by the `compiler_options` settings.
pub struct CheckOptions {
/// Whether to build the fast check type graph if necessary.
///
/// Note: For perf reasons, the fast check type graph is only
/// built if type checking is necessary.
pub build_fast_check_graph: bool,
/// Default type library to type check with.
pub lib: TsTypeLib,
/// If true, valid `.tsbuildinfo` files will be ignored and type checking
/// will always occur.
pub reload: bool,
/// Mode to type check with.
pub type_check_mode: TypeCheckMode,
}
pub struct TypeChecker {
caches: Arc<Caches>,
cjs_tracker: Arc<TypeCheckingCjsTracker>,
cli_options: Arc<CliOptions>,
module_graph_builder: Arc<ModuleGraphBuilder>,
node_resolver: Arc<CliNodeResolver>,
npm_resolver: CliNpmResolver,
package_json_resolver: Arc<CliPackageJsonResolver>,
sys: CliSys,
compiler_options_resolver: Arc<CompilerOptionsResolver>,
code_cache: Option<Arc<crate::cache::CodeCache>>,
tsgo_path: Option<PathBuf>,
}
impl TypeChecker {
#[allow(clippy::too_many_arguments)]
pub fn new(
caches: Arc<Caches>,
cjs_tracker: Arc<TypeCheckingCjsTracker>,
cli_options: Arc<CliOptions>,
module_graph_builder: Arc<ModuleGraphBuilder>,
node_resolver: Arc<CliNodeResolver>,
npm_resolver: CliNpmResolver,
package_json_resolver: Arc<CliPackageJsonResolver>,
sys: CliSys,
compiler_options_resolver: Arc<CompilerOptionsResolver>,
code_cache: Option<Arc<crate::cache::CodeCache>>,
tsgo_path: Option<PathBuf>,
) -> Self {
Self {
caches,
cjs_tracker,
cli_options,
module_graph_builder,
node_resolver,
npm_resolver,
package_json_resolver,
sys,
compiler_options_resolver,
code_cache,
tsgo_path,
}
}
/// Type check the module graph.
///
/// It is expected that it is determined if a check and/or emit is validated
/// before the function is called.
#[allow(clippy::result_large_err)]
pub fn check(
&self,
graph: ModuleGraph,
options: CheckOptions,
) -> Result<Arc<ModuleGraph>, CheckError> {
let mut diagnostics = self.check_diagnostics(graph, options)?;
let mut failed = false;
for result in diagnostics.by_ref() {
let mut diagnostics = result?;
diagnostics.emit_warnings();
if diagnostics.has_diagnostic() {
failed = true;
log::error!("{}\n", diagnostics);
}
}
if failed {
Err(
FailedTypeCheckingError {
can_skip: !matches!(
self.cli_options.sub_command(),
DenoSubcommand::Check(_)
),
}
.into(),
)
} else {
Ok(diagnostics.into_graph())
}
}
/// Type check the module graph returning its diagnostics.
///
/// It is expected that it is determined if a check and/or emit is validated
/// before the function is called.
#[allow(clippy::result_large_err)]
pub fn check_diagnostics(
&self,
mut graph: ModuleGraph,
options: CheckOptions,
) -> Result<DiagnosticsByFolderIterator<'_>, CheckError> {
fn check_state_hash(resolver: &CliNpmResolver) -> Option<u64> {
match resolver {
CliNpmResolver::Byonm(_) => {
// not feasible and probably slower to compute
None
}
CliNpmResolver::Managed(resolver) => {
// we should probably go further and check all the individual npm packages
let mut package_reqs = resolver.resolution().package_reqs();
package_reqs.sort_by(|a, b| a.0.cmp(&b.0)); // determinism
let mut hasher = FastInsecureHasher::new_without_deno_version();
// ensure the cache gets busted when turning nodeModulesDir on or off
// as this could cause changes in resolution
hasher.write_hashable(resolver.root_node_modules_path().is_some());
for (pkg_req, pkg_nv) in package_reqs {
hasher.write_hashable(&pkg_req);
hasher.write_hashable(&pkg_nv);
}
Some(hasher.finish())
}
}
}
if !options.type_check_mode.is_true() || graph.roots.is_empty() {
return Ok(DiagnosticsByFolderIterator(
DiagnosticsByFolderIteratorInner::Empty(Arc::new(graph)),
));
}
log::debug!("Type checking");
// add fast check to the graph before getting the roots
if options.build_fast_check_graph {
self.module_graph_builder.build_fast_check_graph(
&mut graph,
BuildFastCheckGraphOptions {
workspace_fast_check: deno_graph::WorkspaceFastCheckOption::Disabled,
},
)?;
}
let graph = Arc::new(graph);
// split the roots by what we can send to the ts compiler all at once
let grouped_roots =
self.group_roots_by_compiler_options(&graph, options.lib)?;
Ok(DiagnosticsByFolderIterator(
DiagnosticsByFolderIteratorInner::Real(DiagnosticsByFolderRealIterator {
graph,
sys: &self.sys,
cjs_tracker: &self.cjs_tracker,
jsx_import_source_config_resolver: Arc::new(
JsxImportSourceConfigResolver::from_compiler_options_resolver(
&self.compiler_options_resolver,
)?,
),
node_resolver: &self.node_resolver,
npm_resolver: &self.npm_resolver,
package_json_resolver: &self.package_json_resolver,
compiler_options_resolver: &self.compiler_options_resolver,
log_level: self.cli_options.log_level(),
npm_check_state_hash: check_state_hash(&self.npm_resolver),
type_check_cache: TypeCheckCache::new(
self.caches.type_checking_cache_db(),
),
groups: grouped_roots,
current_group_index: 0,
options,
seen_diagnotics: Default::default(),
code_cache: self.code_cache.clone(),
tsgo_path: self.tsgo_path.clone(),
initial_cwd: self.cli_options.initial_cwd().to_path_buf(),
current_dir: deno_path_util::url_from_directory_path(
self.cli_options.initial_cwd(),
)
.map_err(|e| CheckError::Other(JsErrorBox::from_err(e)))?,
}),
))
}
/// Groups the roots based on the compiler options, which includes the
/// resolved CompilerOptions and resolved compilerOptions.types
#[allow(clippy::result_large_err)]
fn group_roots_by_compiler_options<'a>(
&'a self,
graph: &ModuleGraph,
lib: TsTypeLib,
) -> Result<Vec<CheckGroup<'a>>, CheckError> {
let group_count = self.compiler_options_resolver.size();
let mut imports_for_specifier = HashMap::with_capacity(group_count);
let mut groups_by_key = IndexMap::with_capacity(group_count);
for root in &graph.roots {
let compiler_options_data =
self.compiler_options_resolver.for_specifier(root);
let compiler_options =
compiler_options_data.compiler_options_for_lib(lib)?;
let imports = imports_for_specifier
.entry(compiler_options_data.sources.last().map(|s| &s.specifier))
.or_insert_with(|| {
Rc::new(resolve_graph_imports_for_compiler_options_data(
graph,
compiler_options_data,
))
})
.clone();
let group_key = (compiler_options, imports.clone());
let group = groups_by_key.entry(group_key).or_insert_with(|| {
let dir = self.cli_options.workspace().resolve_member_dir(root);
CheckGroup {
roots: Default::default(),
compiler_options,
imports,
// this is slightly hacky. It's used as the referrer for resolving
// npm imports in the key
referrer: dir
.member_or_root_deno_json()
.map(|d| d.specifier.clone())
.unwrap_or_else(|| dir.dir_url().as_ref().clone()),
}
});
group.roots.push(root.clone());
}
Ok(groups_by_key.into_values().collect())
}
}
/// This function assumes that 'graph imports' strictly refer to tsconfig
/// `files` and `compilerOptions.types` which they currently do. In fact, if
/// they were more general than that, we don't really have sufficient context to
/// group them for type-checking.
fn resolve_graph_imports_for_compiler_options_data(
graph: &ModuleGraph,
compiler_options: &CompilerOptionsData,
) -> Vec<Url> {
let mut specifiers = compiler_options
.sources
.iter()
.map(|s| &s.specifier)
.filter_map(|s| graph.imports.get(s.as_ref()))
.flat_map(|i| i.dependencies.values())
.filter_map(|d| Some(graph.resolve(d.get_type().or_else(|| d.get_code())?)))
.cloned()
.collect::<Vec<_>>();
specifiers.sort();
specifiers
}
#[derive(Debug)]
struct CheckGroup<'a> {
roots: Vec<Url>,
imports: Rc<Vec<Url>>,
referrer: Url,
compiler_options: &'a Arc<CompilerOptions>,
}
pub struct DiagnosticsByFolderIterator<'a>(
DiagnosticsByFolderIteratorInner<'a>,
);
impl DiagnosticsByFolderIterator<'_> {
pub fn into_graph(self) -> Arc<ModuleGraph> {
match self.0 {
DiagnosticsByFolderIteratorInner::Empty(module_graph) => module_graph,
DiagnosticsByFolderIteratorInner::Real(r) => r.graph,
}
}
}
impl Iterator for DiagnosticsByFolderIterator<'_> {
type Item = Result<Diagnostics, CheckError>;
fn next(&mut self) -> Option<Self::Item> {
match &mut self.0 {
DiagnosticsByFolderIteratorInner::Empty(_) => None,
DiagnosticsByFolderIteratorInner::Real(r) => r.next(),
}
}
}
#[allow(clippy::large_enum_variant)]
enum DiagnosticsByFolderIteratorInner<'a> {
Empty(Arc<ModuleGraph>),
Real(DiagnosticsByFolderRealIterator<'a>),
}
struct DiagnosticsByFolderRealIterator<'a> {
graph: Arc<ModuleGraph>,
sys: &'a CliSys,
cjs_tracker: &'a Arc<TypeCheckingCjsTracker>,
jsx_import_source_config_resolver: Arc<JsxImportSourceConfigResolver>,
node_resolver: &'a Arc<CliNodeResolver>,
npm_resolver: &'a CliNpmResolver,
package_json_resolver: &'a Arc<CliPackageJsonResolver>,
compiler_options_resolver: &'a CompilerOptionsResolver,
type_check_cache: TypeCheckCache,
groups: Vec<CheckGroup<'a>>,
current_group_index: usize,
log_level: Option<log::Level>,
npm_check_state_hash: Option<u64>,
seen_diagnotics: HashSet<String>,
options: CheckOptions,
code_cache: Option<Arc<crate::cache::CodeCache>>,
tsgo_path: Option<PathBuf>,
initial_cwd: PathBuf,
current_dir: Url,
}
impl Iterator for DiagnosticsByFolderRealIterator<'_> {
type Item = Result<Diagnostics, CheckError>;
fn next(&mut self) -> Option<Self::Item> {
let check_group = self.groups.get(self.current_group_index)?;
self.current_group_index += 1;
let mut result = self.check_diagnostics_in_folder(check_group);
if let Ok(diagnostics) = &mut result {
diagnostics.retain(|d| {
if let (Some(file_name), Some(start)) = (&d.file_name, &d.start) {
let data = format!(
"{}{}:{}:{}{}",
d.code,
file_name,
start.line,
start.character,
d.message_text.as_deref().unwrap_or_default()
);
self.seen_diagnotics.insert(data)
} else {
// show these for each type of config
true
}
});
}
Some(result)
}
}
/// Converts the list of ambient module names to regex string
pub fn ambient_modules_to_regex_string(ambient_modules: &[String]) -> String {
let mut regex_string = String::with_capacity(ambient_modules.len() * 8);
regex_string.push_str("^(");
let last = ambient_modules.len() - 1;
for (idx, part) in ambient_modules.iter().enumerate() {
let trimmed = part.trim_matches('"');
let escaped = regex::escape(trimmed);
let regex = escaped.replace("\\*", ".*");
regex_string.push_str(®ex);
if idx != last {
regex_string.push('|');
}
}
regex_string.push_str(")$");
regex_string
}
impl DiagnosticsByFolderRealIterator<'_> {
#[allow(clippy::too_many_arguments)]
#[allow(clippy::result_large_err)]
fn check_diagnostics_in_folder(
&self,
check_group: &CheckGroup,
) -> Result<Diagnostics, CheckError> {
fn log_provided_roots(provided_roots: &[Url], current_dir: &Url) {
for root in provided_roots {
log::info!(
"{} {}",
colors::green("Check"),
crate::util::path::relative_specifier_path_for_display(
current_dir,
root
),
);
}
}
// walk the graph
let mut graph_walker = GraphWalker::new(
&self.graph,
self.sys,
self.node_resolver,
self.npm_resolver,
self.compiler_options_resolver,
self.npm_check_state_hash,
check_group.compiler_options,
self.options.type_check_mode,
);
for import in check_group.imports.iter() {
graph_walker.add_config_import(import, &check_group.referrer);
}
for root in &check_group.roots {
graph_walker.add_root(root);
}
let TscRoots {
roots: root_names,
missing_diagnostics,
maybe_check_hash,
} = graph_walker.into_tsc_roots();
let mut missing_diagnostics = missing_diagnostics.filter(|d| {
self.should_include_diagnostic(self.options.type_check_mode, d)
});
missing_diagnostics.apply_fast_check_source_maps(&self.graph);
if root_names.is_empty() {
if missing_diagnostics.has_diagnostic() {
log_provided_roots(&check_group.roots, &self.current_dir);
}
return Ok(missing_diagnostics);
}
if !self.options.reload && !missing_diagnostics.has_diagnostic() {
// do not type check if we know this is type checked
if let Some(check_hash) = maybe_check_hash
&& self.type_check_cache.has_check_hash(check_hash)
{
log::debug!("Already type checked {}", &check_group.referrer);
return Ok(Default::default());
}
}
// log out the roots that we're checking
log_provided_roots(&check_group.roots, &self.current_dir);
// the first root will always either be the specifier that the user provided
// or the first specifier in a directory
let first_root = check_group
.roots
.first()
.expect("must be at least one root");
// while there might be multiple roots, we can't "merge" the build info, so we
// try to retrieve the build info for first root, which is the most common use
// case.
let maybe_tsbuildinfo = if self.options.reload {
None
} else {
self.type_check_cache.get_tsbuildinfo(first_root)
};
// to make tsc build info work, we need to consistently hash modules, so that
// tsc can better determine if an emit is still valid or not, so we provide
// that data here.
let compiler_options_hash_data = FastInsecureHasher::new_deno_versioned()
.write_hashable(check_group.compiler_options)
.finish();
let code_cache = self.code_cache.as_ref().map(|c| {
let c: Arc<dyn deno_runtime::code_cache::CodeCache> = c.clone();
c
});
let response = tsc::exec(
tsc::Request {
config: check_group.compiler_options.clone(),
debug: self.log_level == Some(log::Level::Debug),
graph: self.graph.clone(),
jsx_import_source_config_resolver: self
.jsx_import_source_config_resolver
.clone(),
hash_data: compiler_options_hash_data,
maybe_npm: Some(tsc::RequestNpmState {
cjs_tracker: self.cjs_tracker.clone(),
node_resolver: self.node_resolver.clone(),
npm_resolver: self.npm_resolver.clone(),
package_json_resolver: self.package_json_resolver.clone(),
}),
maybe_tsbuildinfo,
root_names,
check_mode: self.options.type_check_mode,
initial_cwd: self.initial_cwd.clone(),
},
code_cache,
self.tsgo_path.as_deref(),
)?;
let ambient_modules = response.ambient_modules;
log::debug!("Ambient Modules: {:?}", ambient_modules);
let ambient_modules_regex = if ambient_modules.is_empty() {
None
} else {
regex::Regex::new(&ambient_modules_to_regex_string(&ambient_modules))
.inspect_err(|e| {
log::warn!("Failed to create regex for ambient modules: {}", e);
})
.ok()
};
let mut response_diagnostics = response.diagnostics.filter(|d| {
self.should_include_diagnostic(self.options.type_check_mode, d)
});
response_diagnostics.apply_fast_check_source_maps(&self.graph);
let mut diagnostics = missing_diagnostics.filter(|d| {
if let Some(ambient_modules_regex) = &ambient_modules_regex
&& let Some(missing_specifier) = &d.missing_specifier
{
return !ambient_modules_regex.is_match(missing_specifier);
}
true
});
diagnostics.extend(response_diagnostics);
if let Some(tsbuildinfo) = response.maybe_tsbuildinfo {
self
.type_check_cache
.set_tsbuildinfo(first_root, &tsbuildinfo);
}
if !diagnostics.has_diagnostic()
&& let Some(check_hash) = maybe_check_hash
{
self.type_check_cache.add_check_hash(check_hash);
}
log::debug!("{}", response.stats);
Ok(diagnostics)
}
fn should_include_diagnostic(
&self,
type_check_mode: TypeCheckMode,
d: &tsc::Diagnostic,
) -> bool {
// this shouldn't check for duplicate diagnostics across folders because
// we don't want to accidentally mark a folder as being successful and save
// to the check cache if a previous folder caused a diagnostic
if self.is_remote_diagnostic(d) {
type_check_mode == TypeCheckMode::All && d.include_when_remote()
} else {
true
}
}
fn is_remote_diagnostic(&self, d: &tsc::Diagnostic) -> bool {
let Some(file_name) = &d.file_name else {
return false;
};
if file_name.starts_with("https://") || file_name.starts_with("http://") {
return true;
}
// check if in an npm package
let Ok(specifier) = ModuleSpecifier::parse(file_name) else {
return false;
};
self.node_resolver.in_npm_package(&specifier)
}
}
struct TscRoots {
roots: Vec<(ModuleSpecifier, MediaType)>,
missing_diagnostics: tsc::Diagnostics,
maybe_check_hash: Option<CacheDBHash>,
}
struct GraphWalker<'a> {
graph: &'a ModuleGraph,
sys: &'a CliSys,
node_resolver: &'a CliNodeResolver,
npm_resolver: &'a CliNpmResolver,
compiler_options_resolver: &'a CompilerOptionsResolver,
maybe_hasher: Option<FastInsecureHasher>,
seen: HashSet<&'a Url>,
pending: VecDeque<(&'a Url, bool)>,
has_seen_node_builtin: bool,
roots: Vec<(ModuleSpecifier, MediaType)>,
missing_diagnostics: tsc::Diagnostics,
}
impl<'a> GraphWalker<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
graph: &'a ModuleGraph,
sys: &'a CliSys,
node_resolver: &'a CliNodeResolver,
npm_resolver: &'a CliNpmResolver,
compiler_options_resolver: &'a CompilerOptionsResolver,
npm_cache_state_hash: Option<u64>,
compiler_options: &CompilerOptions,
type_check_mode: TypeCheckMode,
) -> Self {
let maybe_hasher = npm_cache_state_hash.map(|npm_cache_state_hash| {
let mut hasher = FastInsecureHasher::new_deno_versioned();
hasher.write_hashable(npm_cache_state_hash);
hasher.write_u8(match type_check_mode {
TypeCheckMode::All => 0,
TypeCheckMode::Local => 1,
TypeCheckMode::None => 2,
});
hasher.write_hashable(graph.has_node_specifier);
hasher.write_hashable(compiler_options);
hasher
});
Self {
graph,
sys,
node_resolver,
npm_resolver,
compiler_options_resolver,
maybe_hasher,
seen: HashSet::with_capacity(
graph.imports.len() + graph.specifiers_count(),
),
pending: VecDeque::new(),
has_seen_node_builtin: false,
roots: Vec::with_capacity(graph.imports.len() + graph.specifiers_count()),
missing_diagnostics: Default::default(),
}
}
pub fn add_config_import(&mut self, specifier: &'a Url, referrer: &Url) {
let specifier = self.graph.resolve(specifier);
if self.seen.insert(specifier) {
match NpmPackageNvReference::from_specifier(specifier) {
Ok(nv_ref) => match self.resolve_npm_nv_ref(&nv_ref, referrer) {
Some(resolved) => {
let mt = MediaType::from_specifier(&resolved);
self.roots.push((resolved, mt));
}
None => {
self
.missing_diagnostics
.push(tsc::Diagnostic::from_missing_error(
specifier.as_str(),
None,
maybe_additional_sloppy_imports_message(self.sys, specifier),
));
}
},
_ => {
self.pending.push_back((specifier, false));
self.resolve_pending();
}
}
}
}
pub fn add_root(&mut self, root: &'a Url) {
let specifier = self.graph.resolve(root);
if self.seen.insert(specifier) {
self.pending.push_back((specifier, false));
}
self.resolve_pending()
}
/// Transform the graph into root specifiers that we can feed `tsc`. We have to
/// provide the media type for root modules because `tsc` does not "resolve" the
/// media type like other modules, as well as a root specifier needs any
/// redirects resolved. We need to include all the emittable files in
/// the roots, so they get type checked and optionally emitted,
/// otherwise they would be ignored if only imported into JavaScript.
pub fn into_tsc_roots(mut self) -> TscRoots {
if self.has_seen_node_builtin && !self.roots.is_empty() {
// inject a specifier that will force node types to be resolved
self.roots.push((
ModuleSpecifier::parse("asset:///reference_types_node.d.ts").unwrap(),
MediaType::Dts,
));
}
TscRoots {
roots: self.roots,
missing_diagnostics: self.missing_diagnostics,
maybe_check_hash: self.maybe_hasher.map(|h| CacheDBHash::new(h.finish())),
}
}
fn resolve_pending(&mut self) {
while let Some((specifier, is_dynamic)) = self.pending.pop_front() {
let module = match self.graph.try_get(specifier) {
Ok(Some(module)) => module,
Ok(None) => continue,
Err(err) => {
if !is_dynamic
&& let Some(err) = module_error_for_tsc_diagnostic(self.sys, err)
{
self
.missing_diagnostics
.push(tsc::Diagnostic::from_missing_error(
err.specifier.as_str(),
err.maybe_range,
maybe_additional_sloppy_imports_message(
self.sys,
err.specifier,
),
));
}
continue;
}
};
if is_dynamic && !self.seen.insert(specifier) {
continue;
}
if let Some(entry) = self.maybe_get_check_entry(module) {
self.roots.push(entry);
}
let mut maybe_module_dependencies = None;
let mut maybe_types_dependency = None;
match module {
Module::Js(module) => {
maybe_module_dependencies =
Some(module.dependencies_prefer_fast_check());
maybe_types_dependency = module
.maybe_types_dependency
.as_ref()
.and_then(|d| d.dependency.ok());
}
Module::Wasm(module) => {
maybe_module_dependencies = Some(&module.dependencies);
}
Module::Json(_) | Module::Npm(_) => {}
Module::External(module) => {
// NPM files for `"nodeModulesDir": "manual"`.
let media_type = MediaType::from_specifier(&module.specifier);
if media_type.is_declaration() {
self.roots.push((module.specifier.clone(), media_type));
}
}
Module::Node(_) => {
if !self.has_seen_node_builtin {
self.has_seen_node_builtin = true;
}
}
}
if module.media_type().is_declaration() {
let compiler_options_data = self
.compiler_options_resolver
.for_specifier(module.specifier());
if compiler_options_data.skip_lib_check() {
continue;
}
}
if let Some(deps) = maybe_module_dependencies {
for dep in deps.values() {
// walk both the code and type dependencies
for resolution in [&dep.maybe_type, &dep.maybe_code] {
match resolution {
deno_graph::Resolution::Ok(resolution) => {
self.handle_specifier(&resolution.specifier, dep.is_dynamic);
}
deno_graph::Resolution::Err(_) | deno_graph::Resolution::None => {
}
}
}
if dep.is_dynamic {
continue;
}
// only surface the code error if there's no type
let dep_to_check_error = if dep.maybe_type.is_none() {
&dep.maybe_code
} else {
&dep.maybe_type
};
if let deno_graph::Resolution::Err(resolution_error) =
dep_to_check_error
&& let Some(diagnostic) =
tsc::Diagnostic::maybe_from_resolution_error(resolution_error)
{
self.missing_diagnostics.push(diagnostic);
}
}
}
if let Some(dep) = maybe_types_dependency {
self.handle_specifier(&dep.specifier, false);
}
}
}
fn maybe_get_check_entry(
&mut self,
module: &deno_graph::Module,
) -> Option<(ModuleSpecifier, MediaType)> {
match module {
Module::Js(module) => {
let result = match module.media_type {
MediaType::TypeScript
| MediaType::Tsx
| MediaType::Mts
| MediaType::Cts
| MediaType::Dts
| MediaType::Dmts
| MediaType::Dcts => {
Some((module.specifier.clone(), module.media_type))
}
MediaType::JavaScript
| MediaType::Mjs
| MediaType::Cjs
| MediaType::Jsx => {
if self
.compiler_options_resolver
.for_specifier(&module.specifier)
.check_js()
|| has_ts_check(module.media_type, &module.source.text)
{
Some((module.specifier.clone(), module.media_type))
} else {
None
}
}
MediaType::Json
| MediaType::Jsonc
| MediaType::Json5
| MediaType::Wasm
| MediaType::Css
| MediaType::Html
| MediaType::SourceMap
| MediaType::Sql
| MediaType::Unknown => None,
};
if result.is_some()
&& let Some(hasher) = &mut self.maybe_hasher
{
hasher.write_str(module.specifier.as_str());
hasher.write_str(
// the fast check module will only be set when publishing
module
.fast_check_module()
.map(|s| s.source.as_ref())
.unwrap_or(&module.source.text),
);
}
result
}
Module::Node(_) => {
// the @types/node package will be in the resolved
// snapshot so don't bother including it in the hash
None
}
Module::Npm(_) => {
// don't bother adding this specifier to the hash
// because what matters is the resolved npm snapshot,
// which is hashed below
None
}
Module::Json(module) => {
if let Some(hasher) = &mut self.maybe_hasher {
hasher.write_str(module.specifier.as_str());
hasher.write_str(&module.source.text);
}
None
}
Module::Wasm(module) => {
if let Some(hasher) = &mut self.maybe_hasher {
hasher.write_str(module.specifier.as_str());
hasher.write_str(&module.source_dts);
}
Some((module.specifier.clone(), MediaType::Dmts))
}
Module::External(module) => {
if let Some(hasher) = &mut self.maybe_hasher {
hasher.write_str(module.specifier.as_str());
}
None
}
}
}
fn handle_specifier(
&mut self,
specifier: &'a ModuleSpecifier,
is_dynamic: bool,
) {
let specifier = self.graph.resolve(specifier);
if is_dynamic {
if !self.seen.contains(specifier) {
self.pending.push_back((specifier, true));
}
} else if self.seen.insert(specifier) {
self.pending.push_back((specifier, false));
}
}
fn resolve_npm_nv_ref(
&self,
nv_ref: &NpmPackageNvReference,
referrer: &ModuleSpecifier,
) -> Option<ModuleSpecifier> {
let pkg_dir = self
.npm_resolver
.as_managed()
.unwrap()
.resolve_pkg_folder_from_deno_module(nv_ref.nv())
.ok()?;
let resolved = self
.node_resolver
.resolve_package_subpath_from_deno_module(
&pkg_dir,
nv_ref.sub_path(),
Some(referrer),
node_resolver::ResolutionMode::Import,
node_resolver::NodeResolutionKind::Types,
)
.ok()?;
resolved.into_url().ok()
}
}
/// Matches the `@ts-check` pragma.
static TS_CHECK_RE: Lazy<Regex> =
lazy_regex::lazy_regex!(r#"(?i)^\s*@ts-check(?:\s+|$)"#);
fn has_ts_check(media_type: MediaType, file_text: &str) -> bool {
match &media_type {
MediaType::JavaScript
| MediaType::Mjs
| MediaType::Cjs
| MediaType::Jsx => get_leading_comments(file_text)
.iter()
.any(|text| TS_CHECK_RE.is_match(text)),
MediaType::TypeScript
| MediaType::Mts
| MediaType::Cts
| MediaType::Dts
| MediaType::Dcts
| MediaType::Dmts
| MediaType::Tsx
| MediaType::Json
| MediaType::Jsonc
| MediaType::Json5
| MediaType::Wasm
| MediaType::Css
| MediaType::Html
| MediaType::SourceMap
| MediaType::Sql
| MediaType::Unknown => false,
}
}
fn get_leading_comments(file_text: &str) -> Vec<String> {
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/resolver.rs | cli/resolver.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_resolver::npm::DenoInNpmPackageChecker;
use node_resolver::DenoIsBuiltInNodeModuleChecker;
use crate::npm::CliNpmResolver;
use crate::sys::CliSys;
pub type CliCjsTracker =
deno_resolver::cjs::CjsTracker<DenoInNpmPackageChecker, CliSys>;
pub type CliIsCjsResolver =
deno_resolver::cjs::IsCjsResolver<DenoInNpmPackageChecker, CliSys>;
pub type CliNpmReqResolver = deno_resolver::npm::NpmReqResolver<
DenoInNpmPackageChecker,
DenoIsBuiltInNodeModuleChecker,
CliNpmResolver,
CliSys,
>;
pub type CliResolver = deno_resolver::graph::DenoResolver<
DenoInNpmPackageChecker,
DenoIsBuiltInNodeModuleChecker,
CliNpmResolver,
CliSys,
>;
pub fn on_resolve_diagnostic(
diagnostic: deno_resolver::graph::MappedResolutionDiagnosticWithPosition,
) {
log::warn!(
"{} {}\n at {}:{}",
deno_runtime::colors::yellow("Warning"),
diagnostic.diagnostic,
diagnostic.referrer,
diagnostic.start
);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/graph_util.rs | cli/graph_util.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashSet;
use std::path::PathBuf;
use std::sync::Arc;
use deno_config::deno_json;
use deno_config::deno_json::CompilerOptionTypesDeserializeError;
use deno_config::deno_json::NodeModulesDirMode;
use deno_config::workspace::JsrPackageConfig;
use deno_core::ModuleSpecifier;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_error::JsErrorBox;
use deno_error::JsErrorClass;
use deno_graph::CheckJsOption;
use deno_graph::GraphKind;
use deno_graph::JsrLoadError;
use deno_graph::ModuleError;
use deno_graph::ModuleErrorKind;
use deno_graph::ModuleGraph;
use deno_graph::ModuleGraphError;
use deno_graph::ModuleLoadError;
use deno_graph::ResolutionError;
use deno_graph::SpecifierError;
use deno_graph::WorkspaceFastCheckOption;
use deno_graph::packages::JsrVersionResolver;
use deno_graph::source::Loader;
use deno_graph::source::ResolveError;
use deno_lib::util::result::downcast_ref_deno_resolve_error;
use deno_npm_installer::PackageCaching;
use deno_npm_installer::graph::NpmCachingStrategy;
use deno_path_util::url_to_file_path;
use deno_resolver::cache::ParsedSourceCache;
use deno_resolver::deno_json::CompilerOptionsResolver;
use deno_resolver::deno_json::JsxImportSourceConfigResolver;
use deno_resolver::deno_json::ToMaybeJsxImportSourceConfigError;
use deno_resolver::file_fetcher::GraphLoaderReporterRc;
use deno_resolver::graph::EnhanceGraphErrorMode;
use deno_resolver::graph::enhance_graph_error;
use deno_resolver::graph::enhanced_integrity_error_message;
use deno_resolver::graph::format_deno_graph_error;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_semver::SmallStackString;
use deno_semver::jsr::JsrDepPackageReq;
use import_map::ImportMapErrorKind;
use indexmap::IndexMap;
use node_resolver::errors::NodeJsErrorCode;
use sys_traits::FsMetadata;
use crate::args::CliLockfile;
use crate::args::CliOptions;
use crate::args::config_to_deno_graph_workspace_member;
use crate::args::jsr_url;
use crate::cache;
use crate::cache::GlobalHttpCache;
use crate::cache::ModuleInfoCache;
use crate::colors;
use crate::file_fetcher::CliDenoGraphLoader;
use crate::file_fetcher::CliFileFetcher;
use crate::npm::CliNpmGraphResolver;
use crate::npm::CliNpmInstaller;
use crate::npm::CliNpmResolver;
use crate::resolver::CliCjsTracker;
use crate::resolver::CliResolver;
use crate::sys::CliSys;
use crate::type_checker::CheckError;
use crate::type_checker::CheckOptions;
use crate::type_checker::TypeChecker;
use crate::util::file_watcher::WatcherCommunicator;
use crate::util::fs::canonicalize_path;
use crate::util::progress_bar::ProgressBar;
#[derive(Clone)]
pub struct GraphValidOptions<'a> {
pub check_js: CheckJsOption<'a>,
pub kind: GraphKind,
pub will_type_check: bool,
/// Whether to exit the process for integrity check errors such as
/// lockfile checksum mismatches and JSR integrity failures.
/// Otherwise, surfaces integrity errors as errors.
pub exit_integrity_errors: bool,
pub allow_unknown_media_types: bool,
pub allow_unknown_jsr_exports: bool,
}
/// Check if `roots` and their deps are available. Returns `Ok(())` if
/// so. Returns `Err(_)` if there is a known module graph or resolution
/// error statically reachable from `roots`.
///
/// It is preferable to use this over using deno_graph's API directly
/// because it will have enhanced error message information specifically
/// for the CLI.
pub fn graph_valid(
graph: &ModuleGraph,
sys: &CliSys,
roots: &[ModuleSpecifier],
options: GraphValidOptions,
) -> Result<(), JsErrorBox> {
if options.exit_integrity_errors {
graph_exit_integrity_errors(graph);
}
let mut errors = graph_walk_errors(
graph,
sys,
roots,
GraphWalkErrorsOptions {
check_js: options.check_js,
kind: options.kind,
will_type_check: options.will_type_check,
allow_unknown_media_types: options.allow_unknown_media_types,
allow_unknown_jsr_exports: options.allow_unknown_jsr_exports,
},
);
match errors.next() {
Some(error) => Err(error),
_ => {
// finally surface the npm resolution result
if let Err(err) = &graph.npm_dep_graph_result {
return Err(JsErrorBox::new(
err.get_class(),
format_deno_graph_error(err),
));
}
Ok(())
}
}
}
#[derive(Clone)]
pub struct GraphWalkErrorsOptions<'a> {
pub check_js: CheckJsOption<'a>,
pub kind: GraphKind,
pub will_type_check: bool,
pub allow_unknown_media_types: bool,
pub allow_unknown_jsr_exports: bool,
}
/// Walks the errors found in the module graph that should be surfaced to users
/// and enhances them with CLI information.
pub fn graph_walk_errors<'a>(
graph: &'a ModuleGraph,
sys: &'a CliSys,
roots: &'a [ModuleSpecifier],
options: GraphWalkErrorsOptions<'a>,
) -> impl Iterator<Item = JsErrorBox> + 'a {
fn should_ignore_error(
sys: &CliSys,
graph_kind: GraphKind,
allow_unknown_media_types: bool,
will_type_check: bool,
error: &ModuleGraphError,
) -> bool {
if (graph_kind == GraphKind::TypesOnly || allow_unknown_media_types)
&& matches!(
error.as_module_error_kind(),
Some(ModuleErrorKind::UnsupportedMediaType { .. })
)
{
return true;
}
// surface these as typescript diagnostics instead
will_type_check && has_module_graph_error_for_tsc_diagnostic(sys, error)
}
graph
.walk(
roots.iter(),
deno_graph::WalkOptions {
check_js: options.check_js,
kind: options.kind,
follow_dynamic: false,
prefer_fast_check_graph: false,
},
)
.errors()
.flat_map(move |error| {
if should_ignore_error(
sys,
graph.graph_kind(),
options.allow_unknown_media_types,
options.will_type_check,
&error,
) {
log::debug!("Ignoring: {}", error);
return None;
}
let is_root = match &error {
ModuleGraphError::ResolutionError(_)
| ModuleGraphError::TypesResolutionError(_) => false,
ModuleGraphError::ModuleError(error) => {
roots.contains(error.specifier())
}
};
if is_root
&& options.allow_unknown_jsr_exports
&& matches!(
error.as_module_error_kind(),
Some(ModuleErrorKind::Load {
err: ModuleLoadError::Jsr(JsrLoadError::UnknownExport { .. }),
..
})
)
{
return None;
}
let message = enhance_graph_error(
sys,
&error,
if is_root {
EnhanceGraphErrorMode::HideRange
} else {
EnhanceGraphErrorMode::ShowRange
},
);
Some(JsErrorBox::new(error.get_class(), message))
})
}
fn has_module_graph_error_for_tsc_diagnostic(
sys: &CliSys,
error: &ModuleGraphError,
) -> bool {
match error {
ModuleGraphError::ModuleError(error) => {
module_error_for_tsc_diagnostic(sys, error).is_some()
}
ModuleGraphError::ResolutionError(error) => {
resolution_error_for_tsc_diagnostic(error).is_some()
}
ModuleGraphError::TypesResolutionError(error) => {
resolution_error_for_tsc_diagnostic(error).is_some()
}
}
}
pub struct ModuleNotFoundGraphErrorRef<'a> {
pub specifier: &'a ModuleSpecifier,
pub maybe_range: Option<&'a deno_graph::Range>,
}
pub fn module_error_for_tsc_diagnostic<'a>(
sys: &CliSys,
error: &'a ModuleError,
) -> Option<ModuleNotFoundGraphErrorRef<'a>> {
match error.as_kind() {
ModuleErrorKind::Missing {
specifier,
maybe_referrer,
} => Some(ModuleNotFoundGraphErrorRef {
specifier,
maybe_range: maybe_referrer.as_ref(),
}),
ModuleErrorKind::Load {
specifier,
maybe_referrer,
err: ModuleLoadError::Loader(_),
} => {
if let Ok(path) = deno_path_util::url_to_file_path(specifier)
&& sys.fs_is_dir_no_err(path)
{
return Some(ModuleNotFoundGraphErrorRef {
specifier,
maybe_range: maybe_referrer.as_ref(),
});
}
None
}
_ => None,
}
}
#[derive(Debug)]
pub struct ResolutionErrorRef<'a> {
pub specifier: &'a str,
pub range: &'a deno_graph::Range,
pub is_module_not_found: bool,
}
pub fn resolution_error_for_tsc_diagnostic(
error: &ResolutionError,
) -> Option<ResolutionErrorRef<'_>> {
fn is_module_not_found_code(code: NodeJsErrorCode) -> bool {
match code {
NodeJsErrorCode::ERR_INVALID_MODULE_SPECIFIER
| NodeJsErrorCode::ERR_INVALID_PACKAGE_CONFIG
| NodeJsErrorCode::ERR_INVALID_PACKAGE_TARGET
| NodeJsErrorCode::ERR_UNKNOWN_FILE_EXTENSION
| NodeJsErrorCode::ERR_UNSUPPORTED_DIR_IMPORT
| NodeJsErrorCode::ERR_UNSUPPORTED_ESM_URL_SCHEME
| NodeJsErrorCode::ERR_INVALID_FILE_URL_PATH
| NodeJsErrorCode::ERR_PACKAGE_IMPORT_NOT_DEFINED
| NodeJsErrorCode::ERR_PACKAGE_PATH_NOT_EXPORTED => false,
NodeJsErrorCode::ERR_MODULE_NOT_FOUND
| NodeJsErrorCode::ERR_TYPES_NOT_FOUND
| NodeJsErrorCode::ERR_UNKNOWN_BUILTIN_MODULE => true,
}
}
match error {
ResolutionError::InvalidDowngrade { .. }
| ResolutionError::InvalidJsrHttpsTypesImport { .. }
| ResolutionError::InvalidLocalImport { .. } => None,
ResolutionError::InvalidSpecifier { error, range } => match error {
SpecifierError::InvalidUrl(..) => None,
SpecifierError::ImportPrefixMissing { specifier, .. } => {
Some(ResolutionErrorRef {
specifier,
range,
is_module_not_found: false,
})
}
},
ResolutionError::ResolverError {
error,
specifier,
range,
} => match error.as_ref() {
ResolveError::Specifier(error) => match error {
SpecifierError::InvalidUrl(..) => None,
SpecifierError::ImportPrefixMissing { specifier, .. } => {
Some(ResolutionErrorRef {
specifier,
range,
is_module_not_found: false,
})
}
},
ResolveError::ImportMap(error) => match error.as_kind() {
ImportMapErrorKind::JsonParse(_)
| ImportMapErrorKind::ImportMapNotObject
| ImportMapErrorKind::ImportsFieldNotObject
| ImportMapErrorKind::ScopesFieldNotObject
| ImportMapErrorKind::ScopePrefixNotObject(_)
| ImportMapErrorKind::BlockedByNullEntry(_)
| ImportMapErrorKind::SpecifierResolutionFailure { .. }
| ImportMapErrorKind::SpecifierBacktracksAbovePrefix { .. } => None,
ImportMapErrorKind::UnmappedBareSpecifier(specifier, _) => {
Some(ResolutionErrorRef {
specifier,
range,
is_module_not_found: false,
})
}
},
ResolveError::Other(error) => {
let is_module_not_found_error = downcast_ref_deno_resolve_error(error)
.and_then(|err| err.maybe_node_code())
.map(is_module_not_found_code)
.unwrap_or(false);
is_module_not_found_error.then(|| ResolutionErrorRef {
specifier,
range,
is_module_not_found: true,
})
}
},
}
}
pub fn graph_exit_integrity_errors(graph: &ModuleGraph) {
for error in graph.module_errors() {
exit_for_integrity_error(error);
}
}
fn exit_for_integrity_error(err: &ModuleError) {
if let Some(err_message) = enhanced_integrity_error_message(err) {
log::error!("{} {}", colors::red("error:"), err_message);
deno_runtime::exit(10);
}
}
pub struct CreateGraphOptions<'a> {
pub graph_kind: GraphKind,
pub roots: Vec<ModuleSpecifier>,
pub is_dynamic: bool,
/// Specify `None` to use the default CLI loader.
pub loader: Option<&'a mut dyn Loader>,
pub npm_caching: NpmCachingStrategy,
}
pub struct CreatePublishGraphOptions<'a> {
pub packages: &'a [JsrPackageConfig],
pub build_fast_check_graph: bool,
pub validate_graph: bool,
}
pub struct ModuleGraphCreator {
options: Arc<CliOptions>,
module_graph_builder: Arc<ModuleGraphBuilder>,
type_checker: Arc<TypeChecker>,
}
impl ModuleGraphCreator {
pub fn new(
options: Arc<CliOptions>,
module_graph_builder: Arc<ModuleGraphBuilder>,
type_checker: Arc<TypeChecker>,
) -> Self {
Self {
options,
module_graph_builder,
type_checker,
}
}
pub async fn create_graph(
&self,
graph_kind: GraphKind,
roots: Vec<ModuleSpecifier>,
npm_caching: NpmCachingStrategy,
) -> Result<deno_graph::ModuleGraph, AnyError> {
let mut cache = self
.module_graph_builder
.create_graph_loader_with_root_permissions();
self
.create_graph_with_loader(graph_kind, roots, &mut cache, npm_caching)
.await
}
pub async fn create_graph_with_loader(
&self,
graph_kind: GraphKind,
roots: Vec<ModuleSpecifier>,
loader: &mut dyn Loader,
npm_caching: NpmCachingStrategy,
) -> Result<ModuleGraph, AnyError> {
self
.create_graph_with_options(CreateGraphOptions {
is_dynamic: false,
graph_kind,
roots,
loader: Some(loader),
npm_caching,
})
.await
}
pub async fn create_publish_graph(
&self,
options: CreatePublishGraphOptions<'_>,
) -> Result<ModuleGraph, AnyError> {
struct PublishLoader(CliDenoGraphLoader);
impl Loader for PublishLoader {
fn load(
&self,
specifier: &deno_ast::ModuleSpecifier,
options: deno_graph::source::LoadOptions,
) -> deno_graph::source::LoadFuture {
if matches!(specifier.scheme(), "bun" | "virtual" | "cloudflare") {
Box::pin(std::future::ready(Ok(Some(
deno_graph::source::LoadResponse::External {
specifier: specifier.clone(),
},
))))
} else if matches!(specifier.scheme(), "http" | "https")
&& !specifier.as_str().starts_with(jsr_url().as_str())
{
// mark non-JSR remote modules as external so we don't need --allow-import
// permissions as these will error out later when publishing
Box::pin(std::future::ready(Ok(Some(
deno_graph::source::LoadResponse::External {
specifier: specifier.clone(),
},
))))
} else {
self.0.load(specifier, options)
}
}
}
fn graph_has_external_remote(graph: &ModuleGraph) -> bool {
// Earlier on, we marked external non-JSR modules as external.
// If the graph contains any of those, it would cause type checking
// to crash, so since publishing is going to fail anyway, skip type
// checking.
graph.modules().any(|module| match module {
deno_graph::Module::External(external_module) => {
matches!(external_module.specifier.scheme(), "http" | "https")
}
_ => false,
})
}
let mut roots = Vec::new();
for package_config in options.packages {
roots.extend(package_config.config_file.resolve_export_value_urls()?);
}
let loader = self
.module_graph_builder
.create_graph_loader_with_root_permissions();
let mut publish_loader = PublishLoader(loader);
let mut graph = self
.create_graph_with_options(CreateGraphOptions {
is_dynamic: false,
graph_kind: deno_graph::GraphKind::All,
roots,
loader: Some(&mut publish_loader),
npm_caching: self.options.default_npm_caching_strategy(),
})
.await?;
if options.validate_graph {
self.graph_valid(&graph)?;
}
if self.options.type_check_mode().is_true()
&& !graph_has_external_remote(&graph)
{
self.type_check_graph(graph.clone())?;
}
if options.build_fast_check_graph {
let fast_check_workspace_members = options
.packages
.iter()
.map(|p| config_to_deno_graph_workspace_member(&p.config_file))
.collect::<Result<Vec<_>, _>>()?;
self.module_graph_builder.build_fast_check_graph(
&mut graph,
BuildFastCheckGraphOptions {
workspace_fast_check: WorkspaceFastCheckOption::Enabled(
&fast_check_workspace_members,
),
},
)?;
}
Ok(graph)
}
pub async fn create_graph_with_options(
&self,
options: CreateGraphOptions<'_>,
) -> Result<ModuleGraph, AnyError> {
let mut graph = ModuleGraph::new(options.graph_kind);
self
.module_graph_builder
.build_graph_with_npm_resolution(
&mut graph,
BuildGraphWithNpmOptions {
request: BuildGraphRequest::Roots(options.roots),
is_dynamic: options.is_dynamic,
loader: options.loader,
npm_caching: options.npm_caching,
},
)
.await?;
Ok(graph)
}
pub async fn create_graph_and_maybe_check(
&self,
roots: Vec<ModuleSpecifier>,
) -> Result<Arc<deno_graph::ModuleGraph>, AnyError> {
let graph_kind = self.options.type_check_mode().as_graph_kind();
let graph = self
.create_graph_with_options(CreateGraphOptions {
is_dynamic: false,
graph_kind,
roots,
loader: None,
npm_caching: self.options.default_npm_caching_strategy(),
})
.await?;
self.graph_valid(&graph)?;
if self.options.type_check_mode().is_true() {
// provide the graph to the type checker, then get it back after it's done
let graph = self.type_check_graph(graph)?;
Ok(graph)
} else {
Ok(Arc::new(graph))
}
}
pub fn graph_valid(&self, graph: &ModuleGraph) -> Result<(), JsErrorBox> {
self.module_graph_builder.graph_valid(graph)
}
#[allow(clippy::result_large_err)]
fn type_check_graph(
&self,
graph: ModuleGraph,
) -> Result<Arc<ModuleGraph>, CheckError> {
self.type_checker.check(
graph,
CheckOptions {
build_fast_check_graph: true,
lib: self.options.ts_type_lib_window(),
reload: self.options.reload_flag(),
type_check_mode: self.options.type_check_mode(),
},
)
}
}
pub struct BuildFastCheckGraphOptions<'a> {
/// Whether to do fast check on workspace members. This
/// is mostly only useful when publishing.
pub workspace_fast_check: deno_graph::WorkspaceFastCheckOption<'a>,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum BuildGraphWithNpmResolutionError {
#[class(inherit)]
#[error(transparent)]
CompilerOptionTypesDeserialize(#[from] CompilerOptionTypesDeserializeError),
#[class(inherit)]
#[error(transparent)]
SerdeJson(#[from] serde_json::Error),
#[class(inherit)]
#[error(transparent)]
ToMaybeJsxImportSourceConfig(#[from] ToMaybeJsxImportSourceConfigError),
#[class(inherit)]
#[error(transparent)]
NodeModulesDirParse(#[from] deno_json::NodeModulesDirParseError),
#[class(inherit)]
#[error(transparent)]
Other(#[from] JsErrorBox),
#[class(generic)]
#[error(
"Resolving npm specifier entrypoints this way is currently not supported with \"nodeModules\": \"manual\". In the meantime, try with --node-modules-dir=auto instead"
)]
UnsupportedNpmSpecifierEntrypointResolutionWay,
}
pub enum BuildGraphRequest {
Roots(Vec<ModuleSpecifier>),
Reload(Vec<ModuleSpecifier>),
}
pub struct BuildGraphWithNpmOptions<'a> {
pub request: BuildGraphRequest,
pub is_dynamic: bool,
/// Specify `None` to use the default CLI loader.
pub loader: Option<&'a mut dyn Loader>,
pub npm_caching: NpmCachingStrategy,
}
pub struct ModuleGraphBuilder {
caches: Arc<cache::Caches>,
cjs_tracker: Arc<CliCjsTracker>,
cli_options: Arc<CliOptions>,
file_fetcher: Arc<CliFileFetcher>,
global_http_cache: Arc<GlobalHttpCache>,
in_npm_pkg_checker: DenoInNpmPackageChecker,
jsr_version_resolver: Arc<JsrVersionResolver>,
lockfile: Option<Arc<CliLockfile>>,
maybe_reporter: Option<Arc<dyn deno_graph::source::Reporter>>,
module_info_cache: Arc<ModuleInfoCache>,
npm_graph_resolver: Arc<CliNpmGraphResolver>,
npm_installer: Option<Arc<CliNpmInstaller>>,
npm_resolver: CliNpmResolver,
parsed_source_cache: Arc<ParsedSourceCache>,
progress_bar: ProgressBar,
resolver: Arc<CliResolver>,
root_permissions_container: PermissionsContainer,
sys: CliSys,
compiler_options_resolver: Arc<CompilerOptionsResolver>,
load_reporter: Option<GraphLoaderReporterRc>,
}
impl ModuleGraphBuilder {
#[allow(clippy::too_many_arguments)]
pub fn new(
caches: Arc<cache::Caches>,
cjs_tracker: Arc<CliCjsTracker>,
cli_options: Arc<CliOptions>,
file_fetcher: Arc<CliFileFetcher>,
global_http_cache: Arc<GlobalHttpCache>,
in_npm_pkg_checker: DenoInNpmPackageChecker,
jsr_version_resolver: Arc<JsrVersionResolver>,
lockfile: Option<Arc<CliLockfile>>,
maybe_reporter: Option<Arc<dyn deno_graph::source::Reporter>>,
module_info_cache: Arc<ModuleInfoCache>,
npm_graph_resolver: Arc<CliNpmGraphResolver>,
npm_installer: Option<Arc<CliNpmInstaller>>,
npm_resolver: CliNpmResolver,
parsed_source_cache: Arc<ParsedSourceCache>,
progress_bar: ProgressBar,
resolver: Arc<CliResolver>,
root_permissions_container: PermissionsContainer,
sys: CliSys,
compiler_options_resolver: Arc<CompilerOptionsResolver>,
load_reporter: Option<GraphLoaderReporterRc>,
) -> Self {
Self {
caches,
cjs_tracker,
cli_options,
file_fetcher,
global_http_cache,
in_npm_pkg_checker,
jsr_version_resolver,
lockfile,
maybe_reporter,
module_info_cache,
npm_graph_resolver,
npm_installer,
npm_resolver,
parsed_source_cache,
progress_bar,
resolver,
root_permissions_container,
sys,
compiler_options_resolver,
load_reporter,
}
}
pub async fn build_graph_with_npm_resolution(
&self,
graph: &mut ModuleGraph,
options: BuildGraphWithNpmOptions<'_>,
) -> Result<(), BuildGraphWithNpmResolutionError> {
enum MutLoaderRef<'a> {
Borrowed(&'a mut dyn Loader),
Owned(CliDenoGraphLoader),
}
impl MutLoaderRef<'_> {
pub fn as_mut_loader(&mut self) -> &mut dyn Loader {
match self {
Self::Borrowed(loader) => *loader,
Self::Owned(loader) => loader,
}
}
}
let _clear_guard = self.progress_bar.deferred_keep_initialize_alive();
let analyzer = self.module_info_cache.as_module_analyzer();
let mut loader = match options.loader {
Some(loader) => MutLoaderRef::Borrowed(loader),
None => {
MutLoaderRef::Owned(self.create_graph_loader_with_root_permissions())
}
};
let jsx_import_source_config_resolver =
JsxImportSourceConfigResolver::from_compiler_options_resolver(
&self.compiler_options_resolver,
)?;
let graph_resolver = self.resolver.as_graph_resolver(
self.cjs_tracker.as_ref(),
&jsx_import_source_config_resolver,
);
let maybe_reporter = self.maybe_reporter.as_deref();
let mut locker = self.lockfile.as_ref().map(|l| l.as_deno_graph_locker());
self
.build_graph_with_npm_resolution_and_build_options(
graph,
options.request,
loader.as_mut_loader(),
deno_graph::BuildOptions {
skip_dynamic_deps: self.cli_options.unstable_lazy_dynamic_imports()
&& graph.graph_kind() == GraphKind::CodeOnly,
is_dynamic: options.is_dynamic,
passthrough_jsr_specifiers: false,
executor: Default::default(),
file_system: &self.sys,
jsr_metadata_store: None,
jsr_url_provider: &CliJsrUrlProvider,
jsr_version_resolver: Cow::Borrowed(
self.jsr_version_resolver.as_ref(),
),
npm_resolver: Some(self.npm_graph_resolver.as_ref()),
module_analyzer: &analyzer,
module_info_cacher: self.module_info_cache.as_ref(),
reporter: maybe_reporter,
resolver: Some(&graph_resolver),
locker: locker.as_mut().map(|l| l as _),
unstable_bytes_imports: self.cli_options.unstable_raw_imports(),
unstable_text_imports: self.cli_options.unstable_raw_imports(),
},
options.npm_caching,
)
.await?;
Ok(())
}
async fn build_graph_with_npm_resolution_and_build_options<'a>(
&self,
graph: &mut ModuleGraph,
request: BuildGraphRequest,
loader: &'a mut dyn deno_graph::source::Loader,
options: deno_graph::BuildOptions<'a>,
npm_caching: NpmCachingStrategy,
) -> Result<(), BuildGraphWithNpmResolutionError> {
// ensure an "npm install" is done if the user has explicitly
// opted into using a node_modules directory
if self
.cli_options
.specified_node_modules_dir()?
.map(|m| m == NodeModulesDirMode::Auto)
.unwrap_or(false)
&& let Some(npm_installer) = &self.npm_installer
{
let already_done = npm_installer
.ensure_top_level_package_json_install()
.await?;
if !already_done && matches!(npm_caching, NpmCachingStrategy::Eager) {
npm_installer.cache_packages(PackageCaching::All).await?;
}
}
// fill the graph with the information from the lockfile
let is_first_execution = graph.roots.is_empty();
if is_first_execution {
// populate the information from the lockfile
if let Some(lockfile) = &self.lockfile {
lockfile.fill_graph(graph)
}
}
let initial_redirects_len = graph.redirects.len();
let initial_package_deps_len = graph.packages.package_deps_sum();
let initial_package_mappings_len = graph.packages.mappings().len();
match request {
BuildGraphRequest::Roots(roots) => {
if roots.iter().any(|r| r.scheme() == "npm")
&& self.npm_resolver.is_byonm()
{
return Err(BuildGraphWithNpmResolutionError::UnsupportedNpmSpecifierEntrypointResolutionWay);
}
let imports = if graph.graph_kind().include_types() {
// Resolve all the imports from every config file. We'll separate
// them later based on the folder we're type checking.
let mut imports_by_referrer = IndexMap::<_, Vec<_>>::with_capacity(
self.compiler_options_resolver.size(),
);
for (_, compiler_options_data, maybe_files) in
self.compiler_options_resolver.entries()
{
if let Some((referrer, files)) = maybe_files {
imports_by_referrer
.entry(referrer.as_ref())
.or_default()
.extend(files.iter().map(|f| f.relative_specifier.clone()));
}
for (referrer, types) in
compiler_options_data.compiler_options_types().as_ref()
{
imports_by_referrer
.entry(referrer)
.or_default()
.extend(types.iter().cloned());
}
}
imports_by_referrer
.into_iter()
.map(|(referrer, imports)| deno_graph::ReferrerImports {
referrer: referrer.clone(),
imports,
})
.collect()
} else {
Vec::new()
};
graph.build(roots, imports, loader, options).await;
}
BuildGraphRequest::Reload(urls) => {
graph.reload(urls, loader, options).await
}
}
let has_redirects_changed = graph.redirects.len() != initial_redirects_len;
let has_jsr_package_deps_changed =
graph.packages.package_deps_sum() != initial_package_deps_len;
let has_jsr_package_mappings_changed =
graph.packages.mappings().len() != initial_package_mappings_len;
if (has_redirects_changed
|| has_jsr_package_deps_changed
|| has_jsr_package_mappings_changed)
&& let Some(lockfile) = &self.lockfile
{
let mut lockfile = lockfile.lock();
// https redirects
if has_redirects_changed {
let graph_redirects = graph.redirects.iter().filter(|(from, _)| {
!matches!(from.scheme(), "npm" | "file" | "deno")
});
for (from, to) in graph_redirects {
lockfile.insert_redirect(from.to_string(), to.to_string());
}
}
// jsr package mappings
if has_jsr_package_mappings_changed {
for (from, to) in graph.packages.mappings() {
lockfile.insert_package_specifier(
JsrDepPackageReq::jsr(from.clone()),
to.version.to_custom_string::<SmallStackString>(),
);
}
}
// jsr packages
if has_jsr_package_deps_changed {
for (nv, deps) in graph.packages.packages_with_deps() {
lockfile.add_package_deps(nv, deps.cloned());
}
}
}
Ok(())
}
pub fn build_fast_check_graph(
&self,
graph: &mut ModuleGraph,
options: BuildFastCheckGraphOptions,
) -> Result<(), ToMaybeJsxImportSourceConfigError> {
if !graph.graph_kind().include_types() {
return Ok(());
}
log::debug!("Building fast check graph");
let fast_check_cache = if matches!(
options.workspace_fast_check,
deno_graph::WorkspaceFastCheckOption::Disabled
) {
Some(cache::FastCheckCache::new(self.caches.fast_check_db()))
} else {
None
};
let parser = self.parsed_source_cache.as_capturing_parser();
let jsx_import_source_config_resolver =
JsxImportSourceConfigResolver::from_compiler_options_resolver(
&self.compiler_options_resolver,
)?;
let graph_resolver = self.resolver.as_graph_resolver(
self.cjs_tracker.as_ref(),
&jsx_import_source_config_resolver,
);
graph.build_fast_check_type_graph(
deno_graph::BuildFastCheckTypeGraphOptions {
es_parser: Some(&parser),
fast_check_cache: fast_check_cache.as_ref().map(|c| c as _),
fast_check_dts: false,
jsr_url_provider: &CliJsrUrlProvider,
resolver: Some(&graph_resolver),
workspace_fast_check: options.workspace_fast_check,
},
);
Ok(())
}
/// Creates the default loader used for creating a graph.
pub fn create_graph_loader_with_root_permissions(
&self,
) -> CliDenoGraphLoader {
self.create_graph_loader_with_permissions(
self.root_permissions_container.clone(),
)
}
pub fn create_graph_loader_with_permissions(
&self,
permissions: PermissionsContainer,
) -> CliDenoGraphLoader {
CliDenoGraphLoader::new(
self.file_fetcher.clone(),
self.global_http_cache.clone(),
self.in_npm_pkg_checker.clone(),
self.sys.clone(),
deno_resolver::file_fetcher::DenoGraphLoaderOptions {
file_header_overrides: self.cli_options.resolve_file_header_overrides(),
permissions: Some(permissions),
reporter: self.load_reporter.clone(),
},
)
}
/// Check if `roots` and their deps are available. Returns `Ok(())` if
/// so. Returns `Err(_)` if there is a known module graph or resolution
/// error statically reachable from `roots` and not a dynamic import.
pub fn graph_valid(&self, graph: &ModuleGraph) -> Result<(), JsErrorBox> {
self.graph_roots_valid(
graph,
&graph.roots.iter().cloned().collect::<Vec<_>>(),
false,
false,
)
}
pub fn graph_roots_valid(
&self,
graph: &ModuleGraph,
roots: &[ModuleSpecifier],
allow_unknown_media_types: bool,
allow_unknown_jsr_exports: bool,
) -> Result<(), JsErrorBox> {
let will_type_check = self.cli_options.type_check_mode().is_true();
graph_valid(
graph,
&self.sys,
roots,
GraphValidOptions {
kind: if will_type_check {
GraphKind::All
} else {
GraphKind::CodeOnly
},
will_type_check,
check_js: CheckJsOption::Custom(
self.compiler_options_resolver.as_ref(),
),
exit_integrity_errors: true,
allow_unknown_media_types,
allow_unknown_jsr_exports,
},
)
}
}
/// Gets if any of the specified root's "file:" dependents are in the
/// provided changed set.
pub fn has_graph_root_local_dependent_changed(
graph: &ModuleGraph,
root: &ModuleSpecifier,
canonicalized_changed_paths: &HashSet<PathBuf>,
) -> bool {
let mut dependent_specifiers = graph.walk(
std::iter::once(root),
deno_graph::WalkOptions {
follow_dynamic: true,
kind: GraphKind::All,
prefer_fast_check_graph: true,
check_js: CheckJsOption::True,
},
);
while let Some((s, _)) = dependent_specifiers.next() {
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/main.rs | cli/main.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod args;
mod cache;
mod cdp;
mod factory;
mod file_fetcher;
mod graph_container;
mod graph_util;
mod http_util;
mod jsr;
mod lsp;
mod module_loader;
mod node;
mod npm;
mod ops;
mod registry;
mod resolver;
mod standalone;
mod task_runner;
mod tools;
mod tsc;
mod type_checker;
mod util;
mod worker;
pub mod sys {
#[allow(clippy::disallowed_types)] // ok, definition
pub type CliSys = sys_traits::impls::RealSys;
}
use std::collections::HashMap;
use std::env;
use std::future::Future;
use std::io::IsTerminal;
use std::io::Write as _;
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::Arc;
use args::TaskFlags;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_core::unsync::JoinHandle;
use deno_lib::util::result::any_and_jserrorbox_downcast_ref;
use deno_lib::util::result::js_error_downcast_ref;
use deno_lib::worker::LibWorkerFactoryRoots;
use deno_resolver::npm::ByonmResolvePkgFolderFromDenoReqError;
use deno_resolver::npm::ResolvePkgFolderFromDenoReqError;
use deno_runtime::UnconfiguredRuntime;
use deno_runtime::WorkerExecutionMode;
use deno_runtime::fmt_errors::format_js_error;
use deno_runtime::tokio_util::create_and_run_current_thread_with_maybe_metrics;
use deno_telemetry::OtelConfig;
use deno_terminal::colors;
use factory::CliFactory;
const MODULE_NOT_FOUND: &str = "Module not found";
const UNSUPPORTED_SCHEME: &str = "Unsupported scheme";
use self::util::draw_thread::DrawThread;
use crate::args::CompletionsFlags;
use crate::args::DenoSubcommand;
use crate::args::Flags;
use crate::args::flags_from_vec_with_initial_cwd;
use crate::args::get_default_v8_flags;
use crate::util::display;
use crate::util::v8::get_v8_flags_from_env;
use crate::util::v8::init_v8_flags;
use crate::util::watch_env_tracker::WatchEnvTracker;
use crate::util::watch_env_tracker::load_env_variables_from_env_files;
#[cfg(feature = "dhat-heap")]
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;
/// Ensures that all subcommands return an i32 exit code and an [`AnyError`] error type.
trait SubcommandOutput {
fn output(self) -> Result<i32, AnyError>;
}
impl SubcommandOutput for Result<i32, AnyError> {
fn output(self) -> Result<i32, AnyError> {
self
}
}
impl SubcommandOutput for Result<(), AnyError> {
fn output(self) -> Result<i32, AnyError> {
self.map(|_| 0)
}
}
impl SubcommandOutput for Result<(), std::io::Error> {
fn output(self) -> Result<i32, AnyError> {
self.map(|_| 0).map_err(|e| e.into())
}
}
/// Ensure that the subcommand runs in a task, rather than being directly executed. Since some of these
/// futures are very large, this prevents the stack from getting blown out from passing them by value up
/// the callchain (especially in debug mode when Rust doesn't have a chance to elide copies!).
#[inline(always)]
fn spawn_subcommand<F: Future<Output = T> + 'static, T: SubcommandOutput>(
f: F,
) -> JoinHandle<Result<i32, AnyError>> {
// the boxed_local() is important in order to get windows to not blow the stack in debug
deno_core::unsync::spawn(
async move { f.map(|r| r.output()).await }.boxed_local(),
)
}
async fn run_subcommand(
flags: Arc<Flags>,
unconfigured_runtime: Option<UnconfiguredRuntime>,
roots: LibWorkerFactoryRoots,
) -> Result<i32, AnyError> {
let handle = match flags.subcommand.clone() {
DenoSubcommand::Add(add_flags) => spawn_subcommand(async {
tools::pm::add(flags, add_flags, tools::pm::AddCommandName::Add).await
}),
DenoSubcommand::Audit(audit_flags) => {
spawn_subcommand(async { tools::pm::audit(flags, audit_flags).await })
}
DenoSubcommand::ApproveScripts(approve_scripts_flags) => {
spawn_subcommand(async move {
tools::pm::approve_scripts(flags, approve_scripts_flags).await
})
}
DenoSubcommand::Remove(remove_flags) => {
spawn_subcommand(async { tools::pm::remove(flags, remove_flags).await })
}
DenoSubcommand::Bench(bench_flags) => spawn_subcommand(async {
if bench_flags.watch.is_some() {
tools::bench::run_benchmarks_with_watch(flags, bench_flags)
.boxed_local()
.await
} else {
tools::bench::run_benchmarks(flags, bench_flags).await
}
}),
DenoSubcommand::Bundle(bundle_flags) => spawn_subcommand(async {
log::warn!(
"β οΈ {} is experimental and subject to changes",
colors::cyan("deno bundle")
);
tools::bundle::bundle(flags, bundle_flags).await
}),
DenoSubcommand::Deploy(deploy_flags) => spawn_subcommand(async move {
tools::deploy::deploy(Arc::unwrap_or_clone(flags), deploy_flags).await
}),
DenoSubcommand::Doc(doc_flags) => {
spawn_subcommand(async { tools::doc::doc(flags, doc_flags).await })
}
DenoSubcommand::Eval(eval_flags) => spawn_subcommand(async {
tools::run::eval_command(flags, eval_flags).await
}),
DenoSubcommand::Cache(cache_flags) => spawn_subcommand(async move {
tools::installer::install_from_entrypoints(
flags,
self::args::InstallEntrypointsFlags {
entrypoints: cache_flags.files,
lockfile_only: false,
},
)
.await
}),
DenoSubcommand::Check(check_flags) => {
spawn_subcommand(
async move { tools::check::check(flags, check_flags).await },
)
}
DenoSubcommand::Clean(clean_flags) => {
spawn_subcommand(
async move { tools::clean::clean(flags, clean_flags).await },
)
}
DenoSubcommand::Compile(compile_flags) => spawn_subcommand(async {
if compile_flags.eszip {
tools::compile::compile_eszip(flags, compile_flags)
.boxed_local()
.await
} else {
tools::compile::compile(flags, compile_flags).await
}
}),
DenoSubcommand::Coverage(coverage_flags) => spawn_subcommand(async move {
let reporter =
crate::tools::coverage::reporter::create(coverage_flags.r#type.clone());
tools::coverage::cover_files(
flags,
coverage_flags.files.include,
coverage_flags.files.ignore,
coverage_flags.include,
coverage_flags.exclude,
coverage_flags.output,
&[&*reporter],
)
}),
DenoSubcommand::Fmt(fmt_flags) => {
spawn_subcommand(
async move { tools::fmt::format(flags, fmt_flags).await },
)
}
DenoSubcommand::Init(init_flags) => {
spawn_subcommand(async { tools::init::init_project(init_flags).await })
}
DenoSubcommand::Info(info_flags) => {
spawn_subcommand(async { tools::info::info(flags, info_flags).await })
}
DenoSubcommand::Install(install_flags) => spawn_subcommand(async {
tools::installer::install_command(flags, install_flags).await
}),
DenoSubcommand::JSONReference(json_reference) => {
spawn_subcommand(async move {
display::write_to_stdout_ignore_sigpipe(
&deno_core::serde_json::to_vec_pretty(&json_reference.json).unwrap(),
)
})
}
DenoSubcommand::Jupyter(jupyter_flags) => spawn_subcommand(async {
tools::jupyter::kernel(flags, jupyter_flags).await
}),
DenoSubcommand::Uninstall(uninstall_flags) => spawn_subcommand(async {
tools::installer::uninstall(flags, uninstall_flags).await
}),
DenoSubcommand::Lsp => spawn_subcommand(async move {
if std::io::stderr().is_terminal() {
log::warn!(
"{} command is intended to be run by text editors and IDEs and shouldn't be run manually.
Visit https://docs.deno.com/runtime/getting_started/setup_your_environment/ for instruction
how to setup your favorite text editor.
Press Ctrl+C to exit.
", colors::cyan("deno lsp"));
}
lsp::start().await
}),
DenoSubcommand::Lint(lint_flags) => spawn_subcommand(async {
if lint_flags.rules {
tools::lint::print_rules_list(
lint_flags.json,
lint_flags.maybe_rules_tags,
);
Ok(())
} else {
tools::lint::lint(flags, lint_flags).await
}
}),
DenoSubcommand::Outdated(update_flags) => {
spawn_subcommand(
async move { tools::pm::outdated(flags, update_flags).await },
)
}
DenoSubcommand::Repl(repl_flags) => {
spawn_subcommand(async move { tools::repl::run(flags, repl_flags).await })
}
DenoSubcommand::X(x_flags) => spawn_subcommand(async move {
tools::x::run(flags, x_flags, unconfigured_runtime, roots).await
}),
DenoSubcommand::Run(run_flags) => spawn_subcommand(async move {
if run_flags.print_task_list {
let task_flags = TaskFlags {
cwd: None,
task: None,
is_run: true,
recursive: false,
filter: None,
eval: false,
};
let mut flags = flags.deref().clone();
flags.subcommand = DenoSubcommand::Task(task_flags.clone());
writeln!(
&mut std::io::stdout(),
"Please specify a {} or a {}.\n",
colors::bold("[SCRIPT_ARG]"),
colors::bold("task name")
)?;
std::io::stdout().flush()?;
tools::task::execute_script(Arc::new(flags), task_flags)
.await
.map(|_| 1)
} else if run_flags.is_stdin() {
// these futures are boxed to prevent stack overflows on Windows
tools::run::run_from_stdin(flags.clone(), unconfigured_runtime, roots)
.boxed_local()
.await
} else if flags.eszip {
tools::run::run_eszip(flags, run_flags, unconfigured_runtime, roots)
.boxed_local()
.await
} else {
let result = tools::run::run_script(
WorkerExecutionMode::Run,
flags.clone(),
run_flags.watch,
unconfigured_runtime,
roots.clone(),
)
.await;
match result {
Ok(v) => Ok(v),
Err(script_err) => {
if let Some(
worker::CreateCustomWorkerError::ResolvePkgFolderFromDenoReq(
ResolvePkgFolderFromDenoReqError::Byonm(
ByonmResolvePkgFolderFromDenoReqError::UnmatchedReq(_),
),
),
) = any_and_jserrorbox_downcast_ref::<
worker::CreateCustomWorkerError,
>(&script_err)
&& flags.node_modules_dir.is_none()
{
let mut flags = flags.deref().clone();
let watch = match &flags.subcommand {
DenoSubcommand::Run(run_flags) => run_flags.watch.clone(),
_ => unreachable!(),
};
flags.node_modules_dir =
Some(deno_config::deno_json::NodeModulesDirMode::None);
// use the current lockfile, but don't write it out
if flags.frozen_lockfile.is_none() {
flags.internal.lockfile_skip_write = true;
}
return tools::run::run_script(
WorkerExecutionMode::Run,
Arc::new(flags),
watch,
None,
roots,
)
.boxed_local()
.await;
}
let script_err_msg = script_err.to_string();
if should_fallback_on_run_error(script_err_msg.as_str()) {
if run_flags.bare {
let mut cmd = args::clap_root();
cmd.build();
let command_names = cmd
.get_subcommands()
.map(|command| command.get_name())
.collect::<Vec<_>>();
let suggestions =
args::did_you_mean(&run_flags.script, command_names);
if !suggestions.is_empty() && !run_flags.script.contains('.') {
let mut error =
clap::error::Error::<clap::error::DefaultFormatter>::new(
clap::error::ErrorKind::InvalidSubcommand,
)
.with_cmd(&cmd);
error.insert(
clap::error::ContextKind::SuggestedSubcommand,
clap::error::ContextValue::Strings(suggestions),
);
Err(error.into())
} else {
Err(script_err)
}
} else {
let mut new_flags = flags.deref().clone();
let task_flags = TaskFlags {
cwd: None,
task: Some(run_flags.script.clone()),
is_run: true,
recursive: false,
filter: None,
eval: false,
};
new_flags.subcommand = DenoSubcommand::Task(task_flags.clone());
let result = tools::task::execute_script(
Arc::new(new_flags),
task_flags.clone(),
)
.await;
match result {
Ok(v) => Ok(v),
Err(_) => {
// Return script error for backwards compatibility.
Err(script_err)
}
}
}
} else {
Err(script_err)
}
}
}
}
}),
DenoSubcommand::Serve(serve_flags) => spawn_subcommand(async move {
tools::serve::serve(flags, serve_flags, unconfigured_runtime, roots).await
}),
DenoSubcommand::Task(task_flags) => spawn_subcommand(async {
tools::task::execute_script(flags, task_flags).await
}),
DenoSubcommand::Test(test_flags) => {
spawn_subcommand(async {
if let Some(ref coverage_dir) = test_flags.coverage_dir {
if !test_flags.coverage_raw_data_only || test_flags.clean {
// Keeps coverage_dir contents only when --coverage-raw-data-only is set and --clean is not set
let _ = std::fs::remove_dir_all(coverage_dir);
}
std::fs::create_dir_all(coverage_dir)
.with_context(|| format!("Failed creating: {coverage_dir}"))?;
// this is set in order to ensure spawned processes use the same
// coverage directory
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
env::set_var(
"DENO_COVERAGE_DIR",
PathBuf::from(coverage_dir).canonicalize()?,
)
};
}
if test_flags.watch.is_some() {
tools::test::run_tests_with_watch(flags, test_flags).await
} else {
tools::test::run_tests(flags, test_flags).await
}
})
}
DenoSubcommand::Completions(completions_flags) => {
spawn_subcommand(async move {
match completions_flags {
CompletionsFlags::Static(buf) => {
display::write_to_stdout_ignore_sigpipe(&buf)
.map_err(AnyError::from)
}
CompletionsFlags::Dynamic(f) => {
f()?;
Ok(())
}
}
})
}
DenoSubcommand::Types => spawn_subcommand(async move {
let types = tsc::get_types_declaration_file_text();
display::write_to_stdout_ignore_sigpipe(types.as_bytes())
}),
#[cfg(feature = "upgrade")]
DenoSubcommand::Upgrade(upgrade_flags) => spawn_subcommand(async {
tools::upgrade::upgrade(flags, upgrade_flags).await
}),
#[cfg(not(feature = "upgrade"))]
DenoSubcommand::Upgrade(_) => exit_with_message(
"This deno was built without the \"upgrade\" feature. Please upgrade using the installation method originally used to install Deno.",
1,
),
DenoSubcommand::Vendor => exit_with_message(
"β οΈ `deno vendor` was removed in Deno 2.\n\nSee the Deno 1.x to 2.x Migration Guide for migration instructions: https://docs.deno.com/runtime/manual/advanced/migrate_deprecations",
1,
),
DenoSubcommand::Publish(publish_flags) => spawn_subcommand(async {
tools::publish::publish(flags, publish_flags).await
}),
DenoSubcommand::Help(help_flags) => spawn_subcommand(async move {
use std::io::Write;
let mut stream = anstream::AutoStream::new(
std::io::stdout(),
if colors::use_color() {
anstream::ColorChoice::Auto
} else {
anstream::ColorChoice::Never
},
);
match stream.write_all(help_flags.help.ansi().to_string().as_bytes()) {
Ok(()) => Ok(()),
Err(e) => match e.kind() {
std::io::ErrorKind::BrokenPipe => Ok(()),
_ => Err(e),
},
}
}),
};
handle.await?
}
/// Determines whether a error encountered during `deno run`
/// should trigger fallback behavior, such as attempting to run a Deno task
/// with the same name.
///
/// Checks if the error message indicates a "module not found",
/// "unsupported scheme", or certain OS-level import failures (such as
/// "Is a directory" or "Access is denied"); if so, Deno will attempt to
/// interpret the original argument as a script name or task instead of a
/// file path.
///
/// See: https://github.com/denoland/deno/issues/28878
fn should_fallback_on_run_error(script_err: &str) -> bool {
if script_err.starts_with(MODULE_NOT_FOUND)
|| script_err.starts_with(UNSUPPORTED_SCHEME)
{
return true;
}
let re = lazy_regex::regex!(
r"Import 'file:///.+?' failed\.\n\s+0: .+ \(os error \d+\)"
);
re.is_match(script_err)
}
#[allow(clippy::print_stderr)]
fn setup_panic_hook() {
// This function does two things inside of the panic hook:
// - Tokio does not exit the process when a task panics, so we define a custom
// panic hook to implement this behaviour.
// - We print a message to stderr to indicate that this is a bug in Deno, and
// should be reported to us.
let orig_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |panic_info| {
eprintln!("\n============================================================");
eprintln!("Deno has panicked. This is a bug in Deno. Please report this");
eprintln!("at https://github.com/denoland/deno/issues/new.");
eprintln!("If you can reliably reproduce this panic, include the");
eprintln!("reproduction steps and re-run with the RUST_BACKTRACE=1 env");
eprintln!("var set and include the backtrace in your report.");
eprintln!();
eprintln!("Platform: {} {}", env::consts::OS, env::consts::ARCH);
eprintln!("Version: {}", deno_lib::version::DENO_VERSION_INFO.deno);
eprintln!("Args: {:?}", env::args().collect::<Vec<_>>());
eprintln!();
// Panic traces are not supported for custom/development builds.
#[cfg(feature = "panic-trace")]
{
let info = &deno_lib::version::DENO_VERSION_INFO;
let version =
if info.release_channel == deno_lib::shared::ReleaseChannel::Canary {
format!("{}+{}", deno_lib::version::DENO_VERSION, info.git_hash)
} else {
info.deno.to_string()
};
let trace = deno_panic::trace();
eprintln!("View stack trace at:");
eprintln!(
"https://panic.deno.com/v{}/{}/{}",
version,
env!("TARGET"),
trace
);
}
orig_hook(panic_info);
deno_runtime::exit(1);
}));
fn error_handler(file: &str, line: i32, message: &str) {
// Override C++ abort with a rust panic, so we
// get our message above and a nice backtrace.
panic!("Fatal error in {file}:{line}: {message}");
}
deno_core::v8::V8::set_fatal_error_handler(error_handler);
}
fn exit_with_message(message: &str, code: i32) -> ! {
log::error!(
"{}: {}",
colors::red_bold("error"),
message.trim_start_matches("error: ")
);
deno_runtime::exit(code);
}
fn exit_for_error(error: AnyError, initial_cwd: Option<&std::path::Path>) -> ! {
let error_string = match js_error_downcast_ref(&error) {
Some(e) => {
let initial_cwd = initial_cwd
.and_then(|cwd| deno_path_util::url_from_directory_path(cwd).ok());
format_js_error(e, initial_cwd.as_ref())
}
None => format!("{error:?}"),
};
exit_with_message(&error_string, 1);
}
pub(crate) fn unstable_exit_cb(feature: &str, api_name: &str) {
log::error!(
"Unstable API '{api_name}'. The `--unstable-{}` flag must be provided.",
feature
);
deno_runtime::exit(70);
}
fn maybe_setup_permission_broker() {
let Ok(socket_path) = std::env::var("DENO_PERMISSION_BROKER_PATH") else {
return;
};
log::warn!(
"{} Permission broker is an experimental feature",
colors::yellow("Warning")
);
let broker =
deno_runtime::deno_permissions::broker::PermissionBroker::new(socket_path);
deno_runtime::deno_permissions::broker::set_broker(broker);
}
pub fn main() {
#[cfg(feature = "dhat-heap")]
let profiler = dhat::Profiler::new_heap();
setup_panic_hook();
init_logging(None, None);
util::unix::raise_fd_limit();
util::windows::ensure_stdio_open();
#[cfg(windows)]
{
deno_subprocess_windows::disable_stdio_inheritance();
colors::enable_ansi(); // For Windows 10
}
deno_runtime::deno_permissions::prompter::set_prompt_callbacks(
Box::new(util::draw_thread::DrawThread::hide),
Box::new(util::draw_thread::DrawThread::show),
);
maybe_setup_permission_broker();
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.unwrap();
let args: Vec<_> = env::args_os().collect();
let future = async move {
let roots = LibWorkerFactoryRoots::default();
#[cfg(unix)]
let (waited_unconfigured_runtime, waited_args, waited_cwd) =
match wait_for_start(&args, roots.clone()) {
Some(f) => match f.await {
Ok(v) => match v {
Some((u, a, c)) => (Some(u), Some(a), Some(c)),
None => (None, None, None),
},
Err(e) => {
panic!("Failure from control sock: {e}");
}
},
None => (None, None, None),
};
#[cfg(not(unix))]
let (waited_unconfigured_runtime, waited_args, waited_cwd) =
(None, None, None);
let args = waited_args.unwrap_or(args);
let initial_cwd = waited_cwd.map(Some).unwrap_or_else(|| {
match std::env::current_dir().with_context(|| "Failed getting cwd.") {
Ok(cwd) => Some(cwd),
Err(err) => {
log::error!("Failed getting cwd: {err}");
None
}
}
});
// NOTE(lucacasonato): due to new PKU feature introduced in V8 11.6 we need to
// initialize the V8 platform on a parent thread of all threads that will spawn
// V8 isolates.
let flags = match resolve_flags_and_init(args, initial_cwd.clone()).await {
Ok(flags) => flags,
Err(err) => return (Err(err), initial_cwd),
};
if waited_unconfigured_runtime.is_none() {
init_v8(&flags);
}
(
run_subcommand(Arc::new(flags), waited_unconfigured_runtime, roots).await,
initial_cwd,
)
};
let (result, initial_cwd) =
create_and_run_current_thread_with_maybe_metrics(future);
#[cfg(feature = "dhat-heap")]
drop(profiler);
match result {
Ok(exit_code) => deno_runtime::exit(exit_code),
Err(err) => exit_for_error(err, initial_cwd.as_deref()),
}
}
async fn resolve_flags_and_init(
args: Vec<std::ffi::OsString>,
initial_cwd: Option<std::path::PathBuf>,
) -> Result<Flags, AnyError> {
// this env var is used by clap to enable dynamic completions, it's set by the shell when
// executing deno to get dynamic completions.
if std::env::var("COMPLETE").is_ok() {
crate::args::handle_shell_completion()?;
deno_runtime::exit(0);
}
let mut flags =
match flags_from_vec_with_initial_cwd(args, initial_cwd.clone()) {
Ok(flags) => flags,
Err(err @ clap::Error { .. })
if err.kind() == clap::error::ErrorKind::DisplayVersion =>
{
// Ignore results to avoid BrokenPipe errors.
let _ = err.print();
deno_runtime::exit(0);
}
Err(err) => exit_for_error(AnyError::from(err), initial_cwd.as_deref()),
};
// preserve already loaded env variables
if flags.subcommand.watch_flags().is_some() {
WatchEnvTracker::snapshot();
}
let env_file_paths: Option<Vec<std::path::PathBuf>> = flags
.env_file
.as_ref()
.map(|files| files.iter().map(PathBuf::from).collect());
load_env_variables_from_env_files(env_file_paths.as_ref(), flags.log_level);
if deno_lib::args::has_flag_env_var("DENO_CONNECTED") {
flags.tunnel = true;
}
// Tunnel sets up env vars and OTEL, so connect before everything else.
if flags.tunnel && !matches!(flags.subcommand, DenoSubcommand::Deploy(_)) {
if let Err(err) = initialize_tunnel(&flags).await {
exit_for_error(
err.context("Failed to start with tunnel"),
initial_cwd.as_deref(),
);
}
// SAFETY: We're doing this before any threads are created.
unsafe {
std::env::set_var("DENO_CONNECTED", "1");
}
}
flags.unstable_config.fill_with_env();
if std::env::var("DENO_COMPAT").is_ok() {
flags.unstable_config.enable_node_compat();
}
if flags.node_conditions.is_empty()
&& let Ok(conditions) = std::env::var("DENO_CONDITIONS")
{
flags.node_conditions = conditions
.split(",")
.map(|c| c.trim().to_string())
.collect();
}
let otel_config = flags.otel_config();
init_logging(flags.log_level, Some(otel_config.clone()));
deno_telemetry::init(
deno_lib::version::otel_runtime_config(),
otel_config.clone(),
)?;
if flags.permission_set.is_some() {
log::warn!(
"{} Permissions in the config file is an experimental feature and may change in the future.",
colors::yellow("Warning")
);
}
// TODO(bartlomieju): remove in Deno v2.5 and hard error then.
if flags.unstable_config.legacy_flag_enabled {
log::warn!(
"{} The `--unstable` flag has been removed in Deno 2.0. Use granular `--unstable-*` flags instead.\nLearn more at: https://docs.deno.com/runtime/manual/tools/unstable_flags",
colors::yellow("Warning")
);
}
if let Ok(audit_path) = std::env::var("DENO_AUDIT_PERMISSIONS") {
let audit_file = deno_runtime::deno_permissions::AUDIT_FILE.set(
deno_core::parking_lot::Mutex::new(std::fs::File::create(audit_path)?),
);
if audit_file.is_err() {
log::warn!("β οΈ {}", colors::yellow("Audit file is already set"));
}
}
Ok(flags)
}
fn init_v8(flags: &Flags) {
let default_v8_flags = match flags.subcommand {
DenoSubcommand::Lsp => vec![
"--stack-size=1024".to_string(),
"--js-explicit-resource-management".to_string(),
// Using same default as VSCode:
// https://github.com/microsoft/vscode/blob/48d4ba271686e8072fc6674137415bc80d936bc7/extensions/typescript-language-features/src/configuration/configuration.ts#L213-L214
"--max-old-space-size=3072".to_string(),
],
_ => get_default_v8_flags(),
};
let env_v8_flags = get_v8_flags_from_env();
let is_single_threaded = env_v8_flags
.iter()
.chain(&flags.v8_flags)
.any(|flag| flag == "--single-threaded");
init_v8_flags(&default_v8_flags, &flags.v8_flags, env_v8_flags);
let v8_platform = if is_single_threaded {
Some(::deno_core::v8::Platform::new_single_threaded(true).make_shared())
} else {
None
};
// TODO(bartlomieju): remove last argument once Deploy no longer needs it
deno_core::JsRuntime::init_platform(
v8_platform,
/* import assertions enabled */ false,
);
}
fn init_logging(
maybe_level: Option<log::Level>,
otel_config: Option<OtelConfig>,
) {
deno_lib::util::logger::init(deno_lib::util::logger::InitLoggingOptions {
maybe_level,
otel_config,
// it was considered to hold the draw thread's internal lock
// across logging, but if outputting to stderr blocks then that
// could potentially block other threads that access the draw
// thread's state
on_log_start: DrawThread::hide,
on_log_end: DrawThread::show,
})
}
#[cfg(unix)]
#[allow(clippy::type_complexity)]
fn wait_for_start(
args: &[std::ffi::OsString],
roots: LibWorkerFactoryRoots,
) -> Option<
impl Future<
Output = Result<
Option<(UnconfiguredRuntime, Vec<std::ffi::OsString>, PathBuf)>,
AnyError,
>,
> + use<>,
> {
let startup_snapshot = deno_snapshots::CLI_SNAPSHOT?;
let addr = std::env::var("DENO_UNSTABLE_CONTROL_SOCK").ok()?;
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
std::env::remove_var("DENO_UNSTABLE_CONTROL_SOCK")
};
let argv0 = args[0].clone();
Some(async move {
use tokio::io::AsyncBufReadExt;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::AsyncWriteExt;
use tokio::io::BufReader;
use tokio::net::TcpListener;
use tokio::net::UnixSocket;
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
use tokio_vsock::VsockAddr;
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
use tokio_vsock::VsockListener;
init_v8(&Flags::default());
let unconfigured = deno_runtime::UnconfiguredRuntime::new::<
deno_resolver::npm::DenoInNpmPackageChecker,
crate::npm::CliNpmResolver,
crate::sys::CliSys,
>(deno_runtime::UnconfiguredRuntimeOptions {
startup_snapshot,
create_params: deno_lib::worker::create_isolate_create_params(
&crate::sys::CliSys::default(),
),
shared_array_buffer_store: Some(roots.shared_array_buffer_store.clone()),
compiled_wasm_module_store: Some(
roots.compiled_wasm_module_store.clone(),
),
additional_extensions: vec![],
});
let (rx, mut tx): (
Box<dyn AsyncRead + Unpin>,
Box<dyn AsyncWrite + Send + Unpin>,
) = match addr.split_once(':') {
Some(("tcp", addr)) => {
let listener = TcpListener::bind(addr).await?;
let (stream, _) = listener.accept().await?;
let (rx, tx) = stream.into_split();
(Box::new(rx), Box::new(tx))
}
Some(("unix", path)) => {
let socket = UnixSocket::new_stream()?;
socket.bind(path)?;
let listener = socket.listen(1)?;
let (stream, _) = listener.accept().await?;
let (rx, tx) = stream.into_split();
(Box::new(rx), Box::new(tx))
}
#[cfg(any(
target_os = "android",
target_os = "linux",
target_os = "macos"
))]
Some(("vsock", addr)) => {
let Some((cid, port)) = addr.split_once(':') else {
deno_core::anyhow::bail!("invalid vsock addr");
};
let cid = if cid == "-1" { u32::MAX } else { cid.parse()? };
let port = port.parse()?;
let addr = VsockAddr::new(cid, port);
let listener = VsockListener::bind(addr)?;
let (stream, _) = listener.accept().await?;
let (rx, tx) = stream.into_split();
(Box::new(rx), Box::new(tx))
}
_ => {
deno_core::anyhow::bail!("invalid control sock");
}
};
let mut buf = Vec::with_capacity(1024);
BufReader::new(rx).read_until(b'\n', &mut buf).await?;
tokio::spawn(async move {
deno_runtime::deno_http::SERVE_NOTIFIER.notified().await;
#[derive(deno_core::serde::Serialize)]
enum Event {
Serving,
}
let mut buf = deno_core::serde_json::to_vec(&Event::Serving).unwrap();
buf.push(b'\n');
let _ = tx.write_all(&buf).await;
});
#[derive(deno_core::serde::Deserialize)]
struct Start {
cwd: String,
args: Vec<String>,
env: Vec<(String, String)>,
}
let cmd: Start = deno_core::serde_json::from_slice(&buf)?;
std::env::set_current_dir(&cmd.cwd)?;
for (k, v) in cmd.env {
// SAFETY: We're doing this before any threads are created.
unsafe { std::env::set_var(k, v) };
}
let args = [argv0]
.into_iter()
.chain(cmd.args.into_iter().map(Into::into))
.collect();
Ok(Some((unconfigured, args, PathBuf::from(cmd.cwd))))
})
}
#[derive(serde::Deserialize)]
struct AuthTunnelOutput {
org: String,
app: String,
token: String,
}
async fn auth_tunnel(
no_config: bool,
env_token: Option<String>,
) -> Result<AuthTunnelOutput, deno_core::anyhow::Error> {
let file = tempfile::NamedTempFile::new()?;
let mut args = vec![];
if let Some(token) = &env_token {
args.push("--token".to_string());
args.push(token.clone());
}
if no_config {
args.push("--really-no-config".into());
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/http_util.rs | cli/http_util.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::Arc;
use boxed_error::Boxed;
use deno_cache_dir::file_fetcher::RedirectHeaderParseError;
use deno_core::error::AnyError;
use deno_core::futures::StreamExt;
use deno_core::serde;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_error::JsError;
use deno_error::JsErrorBox;
use deno_lib::version::DENO_VERSION_INFO;
use deno_runtime::deno_fetch;
use deno_runtime::deno_fetch::CreateHttpClientOptions;
use deno_runtime::deno_fetch::ResBody;
use deno_runtime::deno_fetch::create_http_client;
use deno_runtime::deno_tls::RootCertStoreProvider;
use http::HeaderMap;
use http::StatusCode;
use http::header::CONTENT_LENGTH;
use http::header::HeaderName;
use http::header::HeaderValue;
use http_body_util::BodyExt;
use once_cell::sync::OnceCell;
use thiserror::Error;
use crate::util::progress_bar::UpdateGuard;
#[derive(Debug, Error)]
pub enum SendError {
#[error(transparent)]
Send(#[from] deno_fetch::ClientSendError),
#[error(transparent)]
InvalidUri(#[from] http::uri::InvalidUri),
}
pub struct HttpClientProvider {
options: CreateHttpClientOptions,
root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
client: OnceCell<deno_fetch::Client>,
}
impl std::fmt::Debug for HttpClientProvider {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HttpClient")
.field("options", &self.options)
.finish()
}
}
impl HttpClientProvider {
pub fn new(
root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>,
unsafely_ignore_certificate_errors: Option<Vec<String>>,
) -> Self {
Self {
options: CreateHttpClientOptions {
unsafely_ignore_certificate_errors,
..Default::default()
},
root_cert_store_provider,
client: OnceCell::new(),
}
}
pub fn get_or_create(&self) -> Result<HttpClient, JsErrorBox> {
let client = self.client.get_or_try_init(|| {
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions {
root_cert_store: match &self.root_cert_store_provider {
Some(provider) => Some(provider.get_or_try_init()?.clone()),
None => None,
},
..self.options.clone()
},
)
.map_err(JsErrorBox::from_err)
})?;
Ok(HttpClient::new(client.clone()))
}
}
#[derive(Debug, Error, JsError)]
#[class(type)]
#[error("Bad response: {:?}{}", .status_code, .response_text.as_ref().map(|s| format!("\n\n{}", s)).unwrap_or_else(String::new))]
pub struct BadResponseError {
pub status_code: StatusCode,
pub response_text: Option<String>,
}
#[derive(Debug, Boxed, JsError)]
pub struct DownloadError(pub Box<DownloadErrorKind>);
#[derive(Debug, Error, JsError)]
pub enum DownloadErrorKind {
#[class(inherit)]
#[error(transparent)]
Fetch(deno_fetch::ClientSendError),
#[class(inherit)]
#[error(transparent)]
UrlParse(#[from] deno_core::url::ParseError),
#[class(generic)]
#[error(transparent)]
HttpParse(#[from] http::Error),
#[class(inherit)]
#[error(transparent)]
Json(#[from] serde_json::Error),
#[class(generic)]
#[error(transparent)]
ToStr(#[from] http::header::ToStrError),
#[class(inherit)]
#[error(transparent)]
RedirectHeaderParse(RedirectHeaderParseError),
#[class(type)]
#[error("Too many redirects.")]
TooManyRedirects,
#[class(inherit)]
#[error(transparent)]
BadResponse(#[from] BadResponseError),
#[class("Http")]
#[error("Not Found.")]
NotFound,
#[class("Http")]
#[error("Received unhandled Not Modified response.")]
UnhandledNotModified,
#[class(inherit)]
#[error(transparent)]
Other(JsErrorBox),
}
#[derive(Debug)]
pub enum HttpClientResponse {
Success {
headers: HeaderMap<HeaderValue>,
body: Vec<u8>,
},
NotFound,
NotModified,
}
impl HttpClientResponse {
pub fn into_bytes(self) -> Result<Vec<u8>, DownloadError> {
match self {
Self::Success { body, .. } => Ok(body),
Self::NotFound => Err(DownloadErrorKind::NotFound.into_box()),
Self::NotModified => {
Err(DownloadErrorKind::UnhandledNotModified.into_box())
}
}
}
pub fn into_maybe_bytes(self) -> Result<Option<Vec<u8>>, DownloadError> {
match self {
Self::Success { body, .. } => Ok(Some(body)),
Self::NotFound => Ok(None),
Self::NotModified => {
Err(DownloadErrorKind::UnhandledNotModified.into_box())
}
}
}
}
#[derive(Debug)]
pub struct HttpClient {
client: deno_fetch::Client,
}
impl HttpClient {
// DO NOT make this public. You should always be creating one of these from
// the HttpClientProvider
fn new(client: deno_fetch::Client) -> Self {
Self { client }
}
pub fn get(&self, url: Url) -> Result<RequestBuilder, http::Error> {
let body = deno_fetch::ReqBody::empty();
let mut req = http::Request::new(body);
*req.uri_mut() = url.as_str().parse()?;
Ok(RequestBuilder {
client: self.client.clone(),
req,
})
}
pub fn post(
&self,
url: Url,
body: deno_fetch::ReqBody,
) -> Result<RequestBuilder, http::Error> {
let mut req = http::Request::new(body);
*req.method_mut() = http::Method::POST;
*req.uri_mut() = url.as_str().parse()?;
Ok(RequestBuilder {
client: self.client.clone(),
req,
})
}
pub fn post_json<S>(
&self,
url: Url,
ser: &S,
) -> Result<RequestBuilder, DownloadError>
where
S: serde::Serialize,
{
let json = deno_core::serde_json::to_vec(ser)?;
let body = deno_fetch::ReqBody::full(json.into());
let builder = self.post(url, body)?;
Ok(builder.header(
http::header::CONTENT_TYPE,
"application/json".parse().map_err(http::Error::from)?,
))
}
pub async fn send(
&self,
url: &Url,
headers: HeaderMap,
) -> Result<http::Response<ResBody>, SendError> {
let body = deno_fetch::ReqBody::empty();
let mut request = http::Request::new(body);
*request.uri_mut() = http::Uri::try_from(url.as_str())?;
*request.headers_mut() = headers;
self
.client
.clone()
.send(request)
.await
.map_err(SendError::Send)
}
pub async fn download_text(&self, url: Url) -> Result<String, AnyError> {
let bytes = self.download(url).await?;
Ok(String::from_utf8(bytes)?)
}
pub async fn download(&self, url: Url) -> Result<Vec<u8>, DownloadError> {
let response = self.download_inner(url, &Default::default(), None).await?;
response.into_bytes()
}
pub async fn download_with_progress_and_retries(
&self,
url: Url,
headers: &HeaderMap,
progress_guard: &UpdateGuard,
) -> Result<HttpClientResponse, DownloadError> {
crate::util::retry::retry(
|| self.download_inner(url.clone(), headers, Some(progress_guard)),
|e| {
matches!(
e.as_kind(),
DownloadErrorKind::BadResponse(_) | DownloadErrorKind::Fetch(_)
)
},
)
.await
}
pub async fn get_redirected_url(
&self,
url: Url,
headers: &HeaderMap<HeaderValue>,
) -> Result<Url, AnyError> {
let (_, url) = self.get_redirected_response(url, headers).await?;
Ok(url)
}
async fn download_inner(
&self,
url: Url,
headers: &HeaderMap<HeaderValue>,
progress_guard: Option<&UpdateGuard>,
) -> Result<HttpClientResponse, DownloadError> {
let (response, _) = self.get_redirected_response(url, headers).await?;
if response.status() == 404 {
return Ok(HttpClientResponse::NotFound);
} else if response.status() == 304 {
return Ok(HttpClientResponse::NotModified);
} else if !response.status().is_success() {
let status = response.status();
let maybe_response_text = body_to_string(response).await.ok();
return Err(
DownloadErrorKind::BadResponse(BadResponseError {
status_code: status,
response_text: maybe_response_text
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty()),
})
.into_box(),
);
}
get_response_body_with_progress(response, progress_guard)
.await
.map(|(headers, body)| HttpClientResponse::Success { headers, body })
.map_err(|err| DownloadErrorKind::Other(err).into_box())
}
async fn get_redirected_response(
&self,
mut url: Url,
headers: &HeaderMap<HeaderValue>,
) -> Result<(http::Response<deno_fetch::ResBody>, Url), DownloadError> {
let mut req = self.get(url.clone())?.build();
*req.headers_mut() = headers.clone();
let mut response = self
.client
.clone()
.send(req)
.await
.map_err(|e| DownloadErrorKind::Fetch(e).into_box())?;
let status = response.status();
if status.is_redirection() && status != http::StatusCode::NOT_MODIFIED {
for _ in 0..5 {
let new_url = resolve_redirect_from_response(&url, &response)?;
let mut req = self.get(new_url.clone())?.build();
let mut headers = headers.clone();
// SECURITY: Do NOT forward auth headers to a new origin
if new_url.origin() != url.origin() {
headers.remove(http::header::AUTHORIZATION);
}
*req.headers_mut() = headers;
let new_response = self
.client
.clone()
.send(req)
.await
.map_err(|e| DownloadErrorKind::Fetch(e).into_box())?;
let status = new_response.status();
if status.is_redirection() {
response = new_response;
url = new_url;
} else {
return Ok((new_response, new_url));
}
}
Err(DownloadErrorKind::TooManyRedirects.into_box())
} else {
Ok((response, url))
}
}
}
pub async fn get_response_body_with_progress(
response: http::Response<deno_fetch::ResBody>,
progress_guard: Option<&UpdateGuard>,
) -> Result<(HeaderMap, Vec<u8>), JsErrorBox> {
use http_body::Body as _;
if let Some(progress_guard) = progress_guard {
let mut total_size = response.body().size_hint().exact();
if total_size.is_none() {
total_size = response
.headers()
.get(CONTENT_LENGTH)
.and_then(|val| val.to_str().ok())
.and_then(|s| s.parse::<u64>().ok());
}
if let Some(total_size) = total_size {
progress_guard.set_total_size(total_size);
let mut current_size = 0;
let mut data = Vec::with_capacity(total_size as usize);
let (parts, body) = response.into_parts();
let mut stream = body.into_data_stream();
while let Some(item) = stream.next().await {
let bytes = item?;
current_size += bytes.len() as u64;
progress_guard.set_position(current_size);
data.extend(bytes.into_iter());
}
return Ok((parts.headers, data));
}
}
let (parts, body) = response.into_parts();
let bytes = body.collect().await?.to_bytes();
Ok((parts.headers, bytes.into()))
}
fn resolve_redirect_from_response<B>(
request_url: &Url,
response: &http::Response<B>,
) -> Result<Url, DownloadError> {
debug_assert!(response.status().is_redirection());
deno_cache_dir::file_fetcher::resolve_redirect_from_headers(
request_url,
response.headers(),
)
.map_err(|err| DownloadErrorKind::RedirectHeaderParse(*err).into_box())
}
pub async fn body_to_string<B>(body: B) -> Result<String, AnyError>
where
B: http_body::Body,
AnyError: From<B::Error>,
{
let bytes = body.collect().await?.to_bytes();
let s = std::str::from_utf8(&bytes)?;
Ok(s.into())
}
pub async fn body_to_json<B, D>(body: B) -> Result<D, AnyError>
where
B: http_body::Body,
AnyError: From<B::Error>,
D: serde::de::DeserializeOwned,
{
let bytes = body.collect().await?.to_bytes();
let val = deno_core::serde_json::from_slice(&bytes)?;
Ok(val)
}
pub struct RequestBuilder {
client: deno_fetch::Client,
req: http::Request<deno_fetch::ReqBody>,
}
impl RequestBuilder {
pub fn header(mut self, name: HeaderName, value: HeaderValue) -> Self {
self.req.headers_mut().append(name, value);
self
}
pub async fn send(
self,
) -> Result<http::Response<deno_fetch::ResBody>, AnyError> {
self.client.send(self.req).await.map_err(Into::into)
}
pub fn build(self) -> http::Request<deno_fetch::ReqBody> {
self.req
}
}
#[allow(clippy::print_stdout)]
#[allow(clippy::print_stderr)]
#[cfg(test)]
mod test {
use std::collections::HashSet;
use std::hash::RandomState;
use deno_runtime::deno_tls::rustls::RootCertStore;
use super::*;
#[tokio::test]
async fn test_http_client_download_redirect() {
let _http_server_guard = test_util::http_server();
let client = HttpClientProvider::new(None, None).get_or_create().unwrap();
// make a request to the redirect server
let text = client
.download_text(
Url::parse("http://localhost:4546/subdir/redirects/redirect1.js")
.unwrap(),
)
.await
.unwrap();
assert_eq!(text, "export const redirect = 1;\n");
// now make one to the infinite redirects server
let err = client
.download_text(
Url::parse("http://localhost:4549/subdir/redirects/redirect1.js")
.unwrap(),
)
.await
.err()
.unwrap();
assert_eq!(err.to_string(), "Too many redirects.");
}
#[tokio::test]
async fn test_fetch_with_cafile_string() {
let _http_server_guard = test_util::http_server();
let url = Url::parse("https://localhost:5545/assets/fixture.json").unwrap();
let client = HttpClient::new(
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions {
ca_certs: vec![
std::fs::read(test_util::testdata_path().join("tls/RootCA.pem"))
.unwrap(),
],
..Default::default()
},
)
.unwrap(),
);
let response = client.send(&url, Default::default()).await.unwrap();
assert!(response.status().is_success());
let (parts, body) = response.into_parts();
let headers = parts.headers;
let body = body.collect().await.unwrap().to_bytes();
assert!(!body.is_empty());
assert_eq!(headers.get("content-type").unwrap(), "application/json");
assert_eq!(headers.get("etag"), None);
assert_eq!(headers.get("x-typescript-types"), None);
}
static PUBLIC_HTTPS_URLS: &[&str] = &[
"https://deno.com/",
"https://example.com/",
"https://github.com/",
"https://www.w3.org/",
];
/// This test depends on external servers, so we need to be careful to avoid mistaking an offline machine with a
/// test failure.
#[tokio::test]
async fn test_fetch_with_default_certificate_store() {
let urls: HashSet<_, RandomState> =
HashSet::from_iter(PUBLIC_HTTPS_URLS.iter());
// Rely on the randomization of hashset iteration
for url in urls {
// Relies on external http server with a valid mozilla root CA cert.
let url = Url::parse(url).unwrap();
eprintln!("Attempting to fetch {url}...");
let client = HttpClient::new(
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions::default(),
)
.unwrap(),
);
let result = client.send(&url, Default::default()).await;
match result {
Ok(response) if response.status().is_success() => {
return; // success
}
_ => {
// keep going
}
}
}
// Use 1.1.1.1 and 8.8.8.8 as our last-ditch internet check
if std::net::TcpStream::connect("8.8.8.8:80").is_err()
&& std::net::TcpStream::connect("1.1.1.1:80").is_err()
{
return;
}
panic!(
"None of the expected public URLs were available but internet appears to be available"
);
}
#[tokio::test]
async fn test_fetch_with_empty_certificate_store() {
let root_cert_store = RootCertStore::empty();
let urls: HashSet<_, RandomState> =
HashSet::from_iter(PUBLIC_HTTPS_URLS.iter());
// Rely on the randomization of hashset iteration
let url = urls.into_iter().next().unwrap();
// Relies on external http server with a valid mozilla root CA cert.
let url = Url::parse(url).unwrap();
eprintln!("Attempting to fetch {url}...");
let client = HttpClient::new(
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions {
root_cert_store: Some(root_cert_store),
..Default::default()
},
)
.unwrap(),
);
let result = client.send(&url, HeaderMap::new()).await;
assert!(result.is_err() || !result.unwrap().status().is_success());
}
#[tokio::test]
async fn test_fetch_with_cafile_gzip() {
let _http_server_guard = test_util::http_server();
let url =
Url::parse("https://localhost:5545/run/import_compression/gziped")
.unwrap();
let client = HttpClient::new(
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions {
ca_certs: vec![
std::fs::read(
test_util::testdata_path()
.join("tls/RootCA.pem")
.to_string(),
)
.unwrap(),
],
..Default::default()
},
)
.unwrap(),
);
let response = client.send(&url, Default::default()).await.unwrap();
assert!(response.status().is_success());
let (parts, body) = response.into_parts();
let headers = parts.headers;
let body = body.collect().await.unwrap().to_bytes().to_vec();
assert_eq!(String::from_utf8(body).unwrap(), "console.log('gzip')");
assert_eq!(
headers.get("content-type").unwrap(),
"application/javascript"
);
assert_eq!(headers.get("etag"), None);
assert_eq!(headers.get("x-typescript-types"), None);
}
#[tokio::test]
async fn test_fetch_with_cafile_with_etag() {
let _http_server_guard = test_util::http_server();
let url = Url::parse("https://localhost:5545/etag_script.ts").unwrap();
let client = HttpClient::new(
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions {
ca_certs: vec![
std::fs::read(
test_util::testdata_path()
.join("tls/RootCA.pem")
.to_string(),
)
.unwrap(),
],
..Default::default()
},
)
.unwrap(),
);
let response = client.send(&url, Default::default()).await.unwrap();
assert!(response.status().is_success());
let (parts, body) = response.into_parts();
let headers = parts.headers;
let body = body.collect().await.unwrap().to_bytes().to_vec();
assert!(!body.is_empty());
assert_eq!(String::from_utf8(body).unwrap(), "console.log('etag')");
assert_eq!(
headers.get("content-type").unwrap(),
"application/typescript"
);
assert_eq!(headers.get("etag").unwrap(), "33a64df551425fcc55e");
assert_eq!(headers.get("x-typescript-types"), None);
let mut headers = HeaderMap::new();
headers.insert("If-None-Match", "33a64df551425fcc55e".parse().unwrap());
let res = client.send(&url, headers).await.unwrap();
assert_eq!(res.status(), StatusCode::NOT_MODIFIED);
}
#[tokio::test]
async fn test_fetch_with_cafile_brotli() {
let _http_server_guard = test_util::http_server();
let url =
Url::parse("https://localhost:5545/run/import_compression/brotli")
.unwrap();
let client = HttpClient::new(
create_http_client(
DENO_VERSION_INFO.user_agent,
CreateHttpClientOptions {
ca_certs: vec![
std::fs::read(
test_util::testdata_path()
.join("tls/RootCA.pem")
.to_string(),
)
.unwrap(),
],
..Default::default()
},
)
.unwrap(),
);
let response = client.send(&url, Default::default()).await.unwrap();
assert!(response.status().is_success());
let (parts, body) = response.into_parts();
let headers = parts.headers;
let body = body.collect().await.unwrap().to_bytes().to_vec();
assert!(!body.is_empty());
assert_eq!(String::from_utf8(body).unwrap(), "console.log('brotli');");
assert_eq!(
headers.get("content-type").unwrap(),
"application/javascript"
);
assert_eq!(headers.get("etag"), None);
assert_eq!(headers.get("x-typescript-types"), None);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/graph_container.rs | cli/graph_container.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::Arc;
use deno_ast::ModuleSpecifier;
use deno_config::glob::FilePatterns;
use deno_config::glob::PathOrPatternSet;
use deno_core::error::AnyError;
use deno_core::parking_lot::RwLock;
use deno_graph::ModuleGraph;
use deno_runtime::colors;
use deno_runtime::deno_permissions::PermissionsContainer;
use crate::args::CliOptions;
use crate::module_loader::ModuleLoadPreparer;
use crate::module_loader::PrepareModuleLoadOptions;
use crate::util::fs::collect_specifiers;
use crate::util::path::is_script_ext;
pub trait ModuleGraphContainer: Clone + 'static {
/// Acquires a permit to modify the module graph without other code
/// having the chance to modify it. In the meantime, other code may
/// still read from the existing module graph.
async fn acquire_update_permit(&self) -> impl ModuleGraphUpdatePermit;
/// Gets a copy of the graph.
fn graph(&self) -> Arc<ModuleGraph>;
}
/// A permit for updating the module graph. When complete and
/// everything looks fine, calling `.commit()` will store the
/// new graph in the ModuleGraphContainer.
pub trait ModuleGraphUpdatePermit {
/// Gets the module graph for mutation.
fn graph_mut(&mut self) -> &mut ModuleGraph;
/// Saves the mutated module graph in the container.
fn commit(self);
}
/// Holds the `ModuleGraph` for the main worker.
#[derive(Clone)]
pub struct MainModuleGraphContainer {
// Allow only one request to update the graph data at a time,
// but allow other requests to read from it at any time even
// while another request is updating the data.
update_queue: Arc<deno_core::unsync::sync::TaskQueue>,
inner: Arc<RwLock<Arc<ModuleGraph>>>,
cli_options: Arc<CliOptions>,
module_load_preparer: Arc<ModuleLoadPreparer>,
root_permissions: PermissionsContainer,
}
#[derive(Default, Debug)]
pub struct CheckSpecifiersOptions<'a> {
pub ext_overwrite: Option<&'a String>,
pub allow_unknown_media_types: bool,
}
pub struct CollectSpecifiersOptions {
/// Whether to include paths that are specified even if they're ignored.
pub include_ignored_specified: bool,
}
impl MainModuleGraphContainer {
pub fn new(
cli_options: Arc<CliOptions>,
module_load_preparer: Arc<ModuleLoadPreparer>,
root_permissions: PermissionsContainer,
) -> Self {
Self {
update_queue: Default::default(),
inner: Arc::new(RwLock::new(Arc::new(ModuleGraph::new(
cli_options.graph_kind(),
)))),
cli_options,
module_load_preparer,
root_permissions,
}
}
pub async fn check_specifiers(
&self,
specifiers: &[ModuleSpecifier],
options: CheckSpecifiersOptions<'_>,
) -> Result<(), AnyError> {
let mut graph_permit = self.acquire_update_permit().await;
let graph = graph_permit.graph_mut();
self
.module_load_preparer
.prepare_module_load(
graph,
specifiers,
PrepareModuleLoadOptions {
is_dynamic: false,
lib: self.cli_options.ts_type_lib_window(),
permissions: self.root_permissions.clone(),
ext_overwrite: options.ext_overwrite,
allow_unknown_media_types: options.allow_unknown_media_types,
skip_graph_roots_validation: false,
},
)
.await?;
graph_permit.commit();
Ok(())
}
/// Helper around prepare_module_load that loads and type checks
/// the provided files.
pub async fn load_and_type_check_files(
&self,
files: &[String],
options: CollectSpecifiersOptions,
) -> Result<(), AnyError> {
let specifiers = self.collect_specifiers(files, options)?;
if specifiers.is_empty() {
log::warn!("{} No matching files found.", colors::yellow("Warning"));
}
self.check_specifiers(&specifiers, Default::default()).await
}
pub fn collect_specifiers(
&self,
files: &[String],
options: CollectSpecifiersOptions,
) -> Result<Vec<ModuleSpecifier>, AnyError> {
let excludes = self.cli_options.workspace().resolve_config_excludes()?;
let include_patterns =
PathOrPatternSet::from_include_relative_path_or_patterns(
self.cli_options.initial_cwd(),
files,
)?;
let file_patterns = FilePatterns {
base: self.cli_options.initial_cwd().to_path_buf(),
include: Some(include_patterns),
exclude: excludes,
};
collect_specifiers(
crate::util::fs::CollectSpecifiersOptions {
file_patterns,
vendor_folder: self
.cli_options
.vendor_dir_path()
.map(ToOwned::to_owned),
include_ignored_specified: options.include_ignored_specified,
},
|e| is_script_ext(e.path),
)
}
}
impl ModuleGraphContainer for MainModuleGraphContainer {
async fn acquire_update_permit(&self) -> impl ModuleGraphUpdatePermit {
let permit = self.update_queue.acquire().await;
MainModuleGraphUpdatePermit {
permit,
inner: self.inner.clone(),
graph: (**self.inner.read()).clone(),
}
}
fn graph(&self) -> Arc<ModuleGraph> {
self.inner.read().clone()
}
}
/// A permit for updating the module graph. When complete and
/// everything looks fine, calling `.commit()` will store the
/// new graph in the ModuleGraphContainer.
pub struct MainModuleGraphUpdatePermit<'a> {
permit: deno_core::unsync::sync::TaskQueuePermit<'a>,
inner: Arc<RwLock<Arc<ModuleGraph>>>,
graph: ModuleGraph,
}
impl ModuleGraphUpdatePermit for MainModuleGraphUpdatePermit<'_> {
fn graph_mut(&mut self) -> &mut ModuleGraph {
&mut self.graph
}
fn commit(self) {
*self.inner.write() = Arc::new(self.graph);
drop(self.permit); // explicit drop for clarity
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/npm.rs | cli/npm.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashSet;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use dashmap::DashMap;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_error::JsErrorBox;
use deno_lib::version::DENO_VERSION_INFO;
use deno_npm::NpmResolutionPackage;
use deno_npm::npm_rc::ResolvedNpmRc;
use deno_npm::registry::NpmPackageInfo;
use deno_npm::registry::NpmPackageVersionInfosIterator;
use deno_npm::resolution::NpmResolutionSnapshot;
use deno_npm::resolution::NpmVersionResolver;
use deno_npm_cache::NpmCacheHttpClientBytesResponse;
use deno_npm_cache::NpmCacheHttpClientResponse;
use deno_npm_installer::BinEntries;
use deno_npm_installer::CachedNpmPackageExtraInfoProvider;
use deno_npm_installer::ExpectedExtraInfo;
use deno_npm_installer::lifecycle_scripts::LIFECYCLE_SCRIPTS_RUNNING_ENV_VAR;
use deno_npm_installer::lifecycle_scripts::LifecycleScriptsExecutor;
use deno_npm_installer::lifecycle_scripts::LifecycleScriptsExecutorOptions;
use deno_npm_installer::lifecycle_scripts::PackageWithScript;
use deno_npm_installer::lifecycle_scripts::is_broken_default_install_script;
use deno_resolver::npm::ByonmNpmResolverCreateOptions;
use deno_resolver::npm::ManagedNpmResolverRc;
use deno_runtime::deno_io::FromRawIoHandle;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
use deno_task_shell::KillSignal;
use crate::file_fetcher::CliFileFetcher;
use crate::http_util::HttpClientProvider;
use crate::sys::CliSys;
use crate::task_runner::TaskStdio;
use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressMessagePrompt;
pub type CliNpmInstallerFactory = deno_npm_installer::NpmInstallerFactory<
CliNpmCacheHttpClient,
ProgressBar,
CliSys,
>;
pub type CliNpmInstaller =
deno_npm_installer::NpmInstaller<CliNpmCacheHttpClient, CliSys>;
pub type CliNpmCache = deno_npm_cache::NpmCache<CliSys>;
pub type CliNpmRegistryInfoProvider =
deno_npm_cache::RegistryInfoProvider<CliNpmCacheHttpClient, CliSys>;
pub type CliNpmResolver = deno_resolver::npm::NpmResolver<CliSys>;
pub type CliManagedNpmResolver = deno_resolver::npm::ManagedNpmResolver<CliSys>;
pub type CliNpmResolverCreateOptions =
deno_resolver::npm::NpmResolverCreateOptions<CliSys>;
pub type CliByonmNpmResolverCreateOptions =
ByonmNpmResolverCreateOptions<CliSys>;
pub type CliNpmGraphResolver = deno_npm_installer::graph::NpmDenoGraphResolver<
CliNpmCacheHttpClient,
CliSys,
>;
#[derive(Debug)]
pub struct CliNpmCacheHttpClient {
http_client_provider: Arc<HttpClientProvider>,
progress_bar: ProgressBar,
}
impl CliNpmCacheHttpClient {
pub fn new(
http_client_provider: Arc<HttpClientProvider>,
progress_bar: ProgressBar,
) -> Self {
Self {
http_client_provider,
progress_bar,
}
}
}
#[async_trait::async_trait(?Send)]
impl deno_npm_cache::NpmCacheHttpClient for CliNpmCacheHttpClient {
async fn download_with_retries_on_any_tokio_runtime(
&self,
url: Url,
maybe_auth: Option<String>,
maybe_etag: Option<String>,
) -> Result<NpmCacheHttpClientResponse, deno_npm_cache::DownloadError> {
let guard = self.progress_bar.update(url.as_str());
let client = self.http_client_provider.get_or_create().map_err(|err| {
deno_npm_cache::DownloadError {
status_code: None,
error: err,
}
})?;
let mut headers = http::HeaderMap::new();
if let Some(auth) = maybe_auth {
headers.append(
http::header::AUTHORIZATION,
http::header::HeaderValue::try_from(auth).unwrap(),
);
}
if let Some(etag) = maybe_etag {
headers.append(
http::header::IF_NONE_MATCH,
http::header::HeaderValue::try_from(etag).unwrap(),
);
}
client
.download_with_progress_and_retries(url, &headers, &guard)
.await
.map(|response| match response {
crate::http_util::HttpClientResponse::Success { headers, body } => {
NpmCacheHttpClientResponse::Bytes(NpmCacheHttpClientBytesResponse {
etag: headers
.get(http::header::ETAG)
.and_then(|e| e.to_str().map(|t| t.to_string()).ok()),
bytes: body,
})
}
crate::http_util::HttpClientResponse::NotFound => {
NpmCacheHttpClientResponse::NotFound
}
crate::http_util::HttpClientResponse::NotModified => {
NpmCacheHttpClientResponse::NotModified
}
})
.map_err(|err| {
use crate::http_util::DownloadErrorKind::*;
let status_code = match err.as_kind() {
Fetch { .. }
| UrlParse { .. }
| HttpParse { .. }
| Json { .. }
| ToStr { .. }
| RedirectHeaderParse { .. }
| TooManyRedirects
| UnhandledNotModified
| NotFound
| Other(_) => None,
BadResponse(bad_response_error) => {
Some(bad_response_error.status_code.as_u16())
}
};
deno_npm_cache::DownloadError {
status_code,
error: JsErrorBox::from_err(err),
}
})
}
}
#[derive(Debug)]
pub struct NpmFetchResolver {
nv_by_req: DashMap<PackageReq, Option<PackageNv>>,
info_by_name: DashMap<String, Option<Arc<NpmPackageInfo>>>,
file_fetcher: Arc<CliFileFetcher>,
npmrc: Arc<ResolvedNpmRc>,
version_resolver: Arc<NpmVersionResolver>,
}
impl NpmFetchResolver {
pub fn new(
file_fetcher: Arc<CliFileFetcher>,
npmrc: Arc<ResolvedNpmRc>,
version_resolver: Arc<NpmVersionResolver>,
) -> Self {
Self {
nv_by_req: Default::default(),
info_by_name: Default::default(),
file_fetcher,
npmrc,
version_resolver,
}
}
pub async fn req_to_nv(
&self,
req: &PackageReq,
) -> Result<Option<PackageNv>, AnyError> {
if let Some(nv) = self.nv_by_req.get(req) {
return Ok(nv.value().clone());
}
let maybe_get_nv = || async {
let name = &req.name;
let Some(package_info) = self.package_info(name).await else {
return Result::<Option<PackageNv>, AnyError>::Ok(None);
};
let version_resolver =
self.version_resolver.get_for_package(&package_info);
let version_info = version_resolver.resolve_best_package_version_info(
&req.version_req,
Vec::new().into_iter(),
)?;
Ok(Some(PackageNv {
name: name.clone(),
version: version_info.version.clone(),
}))
};
let nv = maybe_get_nv().await?;
self.nv_by_req.insert(req.clone(), nv.clone());
Ok(nv)
}
pub async fn package_info(&self, name: &str) -> Option<Arc<NpmPackageInfo>> {
if let Some(info) = self.info_by_name.get(name) {
return info.value().clone();
}
// todo(#27198): use RegistryInfoProvider instead
let fetch_package_info = || async {
let info_url = deno_npm_cache::get_package_url(&self.npmrc, name);
let registry_config = self.npmrc.get_registry_config(name);
// TODO(bartlomieju): this should error out, not use `.ok()`.
let maybe_auth_header =
deno_npm_cache::maybe_auth_header_value_for_npm_registry(
registry_config,
)
.map_err(AnyError::from)
.and_then(|value| match value {
Some(value) => Ok(Some((
http::header::AUTHORIZATION,
http::HeaderValue::try_from(value.into_bytes())?,
))),
None => Ok(None),
})
.ok()?;
let file = self
.file_fetcher
.fetch_bypass_permissions_with_maybe_auth(&info_url, maybe_auth_header)
.await
.ok()?;
serde_json::from_slice::<NpmPackageInfo>(&file.source).ok()
};
let info = fetch_package_info().await.map(Arc::new);
self.info_by_name.insert(name.to_string(), info.clone());
info
}
pub fn applicable_version_infos<'a>(
&'a self,
package_info: &'a NpmPackageInfo,
) -> NpmPackageVersionInfosIterator<'a> {
self
.version_resolver
.get_for_package(package_info)
.applicable_version_infos()
}
}
pub static NPM_CONFIG_USER_AGENT_ENV_VAR: &str = "npm_config_user_agent";
pub fn get_npm_config_user_agent() -> String {
format!(
"deno/{} npm/? deno/{} {} {}",
DENO_VERSION_INFO.deno,
DENO_VERSION_INFO.deno,
std::env::consts::OS,
std::env::consts::ARCH
)
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum DenoTaskLifecycleScriptsError {
#[class(inherit)]
#[error(transparent)]
Io(#[from] std::io::Error),
#[class(inherit)]
#[error(transparent)]
BinEntries(#[from] deno_npm_installer::BinEntriesError),
#[class(inherit)]
#[error(
"failed to create npm process state tempfile for running lifecycle scripts"
)]
CreateNpmProcessState(#[source] std::io::Error),
#[class(generic)]
#[error(transparent)]
Task(AnyError),
#[class(generic)]
#[error("failed to run scripts for packages: {}", .0.join(", "))]
RunScripts(Vec<String>),
}
pub struct DenoTaskLifeCycleScriptsExecutor {
progress_bar: ProgressBar,
npm_resolver: ManagedNpmResolverRc<CliSys>,
}
#[async_trait::async_trait(?Send)]
impl LifecycleScriptsExecutor for DenoTaskLifeCycleScriptsExecutor {
async fn execute(
&self,
options: LifecycleScriptsExecutorOptions<'_>,
) -> Result<(), AnyError> {
let mut failed_packages = Vec::new();
let sys = CliSys::default();
let mut bin_entries = BinEntries::new(&sys);
// get custom commands for each bin available in the node_modules dir (essentially
// the scripts that are in `node_modules/.bin`)
let base = self
.resolve_baseline_custom_commands(
options.extra_info_provider,
&mut bin_entries,
options.snapshot,
options.system_packages,
)
.await;
// we don't run with signals forwarded because once signals
// are setup then they're process wide.
let kill_signal = KillSignal::default();
let _drop_signal = kill_signal.clone().drop_guard();
let mut env_vars = crate::task_runner::real_env_vars();
// so the subprocess can detect that it is running as part of a lifecycle script,
// and avoid trying to set up node_modules again
env_vars.insert(LIFECYCLE_SCRIPTS_RUNNING_ENV_VAR.into(), "1".into());
// we want to pass the current state of npm resolution down to the deno subprocess
// (that may be running as part of the script). we do this with an inherited temp file
//
// SAFETY: we are sharing a single temp file across all of the scripts. the file position
// will be shared among these, which is okay since we run only one script at a time.
// However, if we concurrently run scripts in the future we will
// have to have multiple temp files.
let temp_file_fd = deno_runtime::deno_process::npm_process_state_tempfile(
options.process_state.as_bytes(),
)
.map_err(DenoTaskLifecycleScriptsError::CreateNpmProcessState)?;
// SAFETY: fd/handle is valid
let _temp_file = unsafe { std::fs::File::from_raw_io_handle(temp_file_fd) }; // make sure the file gets closed
env_vars.insert(
deno_runtime::deno_process::NPM_RESOLUTION_STATE_FD_ENV_VAR_NAME.into(),
(temp_file_fd as usize).to_string().into(),
);
for PackageWithScript {
package,
scripts,
package_folder,
} in options.packages_with_scripts
{
// add custom commands for binaries from the package's dependencies. this will take precedence over the
// baseline commands, so if the package relies on a bin that conflicts with one higher in the dependency tree, the
// correct bin will be used.
let custom_commands = self
.resolve_custom_commands_from_deps(
options.extra_info_provider,
base.clone(),
package,
options.snapshot,
)
.await;
for script_name in ["preinstall", "install", "postinstall"] {
if let Some(script) = scripts.get(script_name) {
if script_name == "install"
&& is_broken_default_install_script(&sys, script, package_folder)
{
continue;
}
let _guard = self.progress_bar.update_with_prompt(
ProgressMessagePrompt::Initialize,
&format!("{}: running '{script_name}' script", package.id.nv),
);
let crate::task_runner::TaskResult {
exit_code,
stderr,
stdout,
} =
crate::task_runner::run_task(crate::task_runner::RunTaskOptions {
task_name: script_name,
script,
cwd: package_folder.clone(),
env_vars: env_vars.clone(),
custom_commands: custom_commands.clone(),
init_cwd: options.init_cwd,
argv: &[],
root_node_modules_dir: Some(options.root_node_modules_dir_path),
stdio: Some(crate::task_runner::TaskIo {
stderr: TaskStdio::piped(),
stdout: TaskStdio::piped(),
}),
kill_signal: kill_signal.clone(),
})
.await
.map_err(DenoTaskLifecycleScriptsError::Task)?;
let stdout = stdout.unwrap();
let stderr = stderr.unwrap();
if exit_code != 0 {
log::warn!(
"error: script '{}' in '{}' failed with exit code {}{}{}",
script_name,
package.id.nv,
exit_code,
if !stdout.trim_ascii().is_empty() {
format!(
"\nstdout:\n{}\n",
String::from_utf8_lossy(&stdout).trim()
)
} else {
String::new()
},
if !stderr.trim_ascii().is_empty() {
format!(
"\nstderr:\n{}\n",
String::from_utf8_lossy(&stderr).trim()
)
} else {
String::new()
},
);
failed_packages.push(&package.id.nv);
// assume if earlier script fails, later ones will fail too
break;
}
}
}
(options.on_ran_pkg_scripts)(package)?;
}
// re-set up bin entries for the packages which we've run scripts for.
// lifecycle scripts can create files that are linked to by bin entries,
// and the only reliable way to handle this is to re-link bin entries
// (this is what PNPM does as well)
let package_ids = options
.packages_with_scripts
.iter()
.map(|p| &p.package.id)
.collect::<HashSet<_>>();
bin_entries.finish_only(
options.snapshot,
&options.root_node_modules_dir_path.join(".bin"),
|outcome| outcome.warn_if_failed(),
&package_ids,
)?;
if failed_packages.is_empty() {
Ok(())
} else {
Err(
DenoTaskLifecycleScriptsError::RunScripts(
failed_packages
.iter()
.map(|p| p.to_string())
.collect::<Vec<_>>(),
)
.into(),
)
}
}
}
impl DenoTaskLifeCycleScriptsExecutor {
pub fn new(
npm_resolver: ManagedNpmResolverRc<CliSys>,
progress_bar: ProgressBar,
) -> Self {
Self {
npm_resolver,
progress_bar,
}
}
// take in all (non copy) packages from snapshot,
// and resolve the set of available binaries to create
// custom commands available to the task runner
async fn resolve_baseline_custom_commands<'a>(
&self,
extra_info_provider: &CachedNpmPackageExtraInfoProvider,
bin_entries: &mut BinEntries<'a, CliSys>,
snapshot: &'a NpmResolutionSnapshot,
packages: &'a [NpmResolutionPackage],
) -> crate::task_runner::TaskCustomCommands {
let mut custom_commands = crate::task_runner::TaskCustomCommands::new();
custom_commands
.insert("npx".to_string(), Rc::new(crate::task_runner::NpxCommand));
custom_commands
.insert("npm".to_string(), Rc::new(crate::task_runner::NpmCommand));
custom_commands
.insert("node".to_string(), Rc::new(crate::task_runner::NodeCommand));
custom_commands.insert(
"node-gyp".to_string(),
Rc::new(crate::task_runner::NodeGypCommand),
);
// TODO: this recreates the bin entries which could be redoing some work, but the ones
// we compute earlier in `sync_resolution_with_fs` may not be exhaustive (because we skip
// doing it for packages that are set up already.
// realistically, scripts won't be run very often so it probably isn't too big of an issue.
self
.resolve_custom_commands_from_packages(
extra_info_provider,
bin_entries,
custom_commands,
snapshot,
packages,
)
.await
}
// resolves the custom commands from an iterator of packages
// and adds them to the existing custom commands.
// note that this will overwrite any existing custom commands
async fn resolve_custom_commands_from_packages<
'a,
P: IntoIterator<Item = &'a NpmResolutionPackage>,
>(
&self,
extra_info_provider: &CachedNpmPackageExtraInfoProvider,
bin_entries: &mut BinEntries<'a, CliSys>,
mut commands: crate::task_runner::TaskCustomCommands,
snapshot: &'a NpmResolutionSnapshot,
packages: P,
) -> crate::task_runner::TaskCustomCommands {
for package in packages {
let Ok(package_path) = self
.npm_resolver
.resolve_pkg_folder_from_pkg_id(&package.id)
else {
continue;
};
let extra = if let Some(extra) = &package.extra {
Cow::Borrowed(extra)
} else {
let Ok(extra) = extra_info_provider
.get_package_extra_info(
&package.id.nv,
&package_path,
ExpectedExtraInfo::from_package(package),
)
.await
else {
continue;
};
Cow::Owned(extra)
};
if extra.bin.is_some() {
bin_entries.add(package, &extra, package_path);
}
}
let bins: Vec<(String, PathBuf)> = bin_entries.collect_bin_files(snapshot);
for (bin_name, script_path) in bins {
commands.insert(
bin_name.clone(),
Rc::new(crate::task_runner::NodeModulesFileRunCommand {
command_name: bin_name,
path: script_path,
}),
);
}
commands
}
// resolves the custom commands from the dependencies of a package
// and adds them to the existing custom commands.
// note that this will overwrite any existing custom commands.
async fn resolve_custom_commands_from_deps(
&self,
extra_info_provider: &CachedNpmPackageExtraInfoProvider,
baseline: crate::task_runner::TaskCustomCommands,
package: &NpmResolutionPackage,
snapshot: &NpmResolutionSnapshot,
) -> crate::task_runner::TaskCustomCommands {
let sys = CliSys::default();
let mut bin_entries = BinEntries::new(&sys);
self
.resolve_custom_commands_from_packages(
extra_info_provider,
&mut bin_entries,
baseline,
snapshot,
package
.dependencies
.values()
.map(|id| snapshot.package_from_id(id).unwrap()),
)
.await
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/jsr.rs | cli/jsr.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::Arc;
use dashmap::DashMap;
use deno_core::serde_json;
use deno_graph::JsrPackageReqNotFoundError;
use deno_graph::packages::JsrPackageInfo;
use deno_graph::packages::JsrPackageVersionInfo;
use deno_graph::packages::JsrPackageVersionResolver;
use deno_graph::packages::JsrVersionResolver;
use deno_semver::package::PackageName;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
use crate::args::jsr_url;
use crate::file_fetcher::CliFileFetcher;
/// This is similar to a subset of `JsrCacheResolver` which fetches rather than
/// just reads the cache. Keep in sync!
#[derive(Debug)]
pub struct JsrFetchResolver {
nv_by_req: DashMap<PackageReq, Option<PackageNv>>,
/// The `module_graph` field of the version infos should be forcibly absent.
/// It can be large and we don't want to store it.
info_by_nv: DashMap<PackageNv, Option<Arc<JsrPackageVersionInfo>>>,
info_by_name: DashMap<String, Option<Arc<JsrPackageInfo>>>,
file_fetcher: Arc<CliFileFetcher>,
jsr_version_resolver: Arc<JsrVersionResolver>,
}
impl JsrFetchResolver {
pub fn new(
file_fetcher: Arc<CliFileFetcher>,
jsr_version_resolver: Arc<JsrVersionResolver>,
) -> Self {
Self {
nv_by_req: Default::default(),
info_by_nv: Default::default(),
info_by_name: Default::default(),
file_fetcher,
jsr_version_resolver,
}
}
pub fn version_resolver_for_package<'a>(
&'a self,
name: &PackageName,
info: &'a JsrPackageInfo,
) -> JsrPackageVersionResolver<'a> {
self.jsr_version_resolver.get_for_package(name, info)
}
pub async fn req_to_nv(
&self,
req: &PackageReq,
) -> Result<Option<PackageNv>, JsrPackageReqNotFoundError> {
if let Some(nv) = self.nv_by_req.get(req) {
return Ok(nv.value().clone());
}
let maybe_get_nv = || async {
let name = req.name.clone();
let package_info = self.package_info(&name).await;
let Some(package_info) = package_info else {
log::debug!("no package info found for jsr:{name}");
return Ok(None);
};
// Find the first matching version of the package.
let version_resolver = self
.jsr_version_resolver
.get_for_package(&req.name, &package_info);
let version =
version_resolver.resolve_version(req, Vec::new().into_iter());
let version = if let Ok(version) = version {
version.version.clone()
} else {
let package_info = self.force_refresh_package_info(&name).await;
let Some(package_info) = package_info else {
log::debug!("no package info found for jsr:{name}");
return Ok(None);
};
let version_resolver = self
.jsr_version_resolver
.get_for_package(&req.name, &package_info);
version_resolver
.resolve_version(req, Vec::new().into_iter())?
.version
.clone()
};
Ok(Some(PackageNv { name, version }))
};
let nv = maybe_get_nv().await?;
self.nv_by_req.insert(req.clone(), nv.clone());
Ok(nv)
}
pub async fn force_refresh_package_info(
&self,
name: &str,
) -> Option<Arc<JsrPackageInfo>> {
let meta_url = self.meta_url(name)?;
let file_fetcher = self.file_fetcher.clone();
let file = file_fetcher
.fetch_with_options(
&meta_url,
deno_resolver::file_fetcher::FetchPermissionsOptionRef::AllowAll,
deno_resolver::file_fetcher::FetchOptions {
maybe_cache_setting: Some(
&deno_cache_dir::file_fetcher::CacheSetting::ReloadAll,
),
..Default::default()
},
)
.await
.ok()?;
let info = serde_json::from_slice::<JsrPackageInfo>(&file.source).ok()?;
let info = Arc::new(info);
self
.info_by_name
.insert(name.to_string(), Some(info.clone()));
Some(info)
}
fn meta_url(&self, name: &str) -> Option<deno_core::url::Url> {
jsr_url().join(&format!("{}/meta.json", name)).ok()
}
// todo(dsherret): this should return error messages and only `None` when the package
// doesn't exist
pub async fn package_info(&self, name: &str) -> Option<Arc<JsrPackageInfo>> {
if let Some(info) = self.info_by_name.get(name) {
return info.value().clone();
}
let fetch_package_info = || async {
let meta_url = self.meta_url(name)?;
let file = self
.file_fetcher
.fetch_bypass_permissions(&meta_url)
.await
.ok()?;
serde_json::from_slice::<JsrPackageInfo>(&file.source).ok()
};
let info = fetch_package_info().await.map(Arc::new);
self.info_by_name.insert(name.to_string(), info.clone());
info
}
pub async fn package_version_info(
&self,
nv: &PackageNv,
) -> Option<Arc<JsrPackageVersionInfo>> {
if let Some(info) = self.info_by_nv.get(nv) {
return info.value().clone();
}
let fetch_package_version_info = || async {
let meta_url = jsr_url()
.join(&format!("{}/{}_meta.json", &nv.name, &nv.version))
.ok()?;
let file_fetcher = self.file_fetcher.clone();
let file = file_fetcher
.fetch_bypass_permissions(&meta_url)
.await
.ok()?;
partial_jsr_package_version_info_from_slice(&file.source).ok()
};
let info = fetch_package_version_info().await.map(Arc::new);
self.info_by_nv.insert(nv.clone(), info.clone());
info
}
}
/// This is a roundabout way of deserializing `JsrPackageVersionInfo`,
/// because we only want the `exports` field and `module_graph` is large.
pub fn partial_jsr_package_version_info_from_slice(
slice: &[u8],
) -> serde_json::Result<JsrPackageVersionInfo> {
let mut info = serde_json::from_slice::<serde_json::Value>(slice)?;
Ok(JsrPackageVersionInfo {
manifest: Default::default(), // not used by the LSP (only caching checks this in deno_graph)
exports: info
.as_object_mut()
.and_then(|o| o.remove("exports"))
.unwrap_or_default(),
module_graph_1: None,
module_graph_2: None,
lockfile_checksum: None,
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/factory.rs | cli/factory.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::future::Future;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_bundle_runtime::BundlePlatform;
use deno_cache_dir::GlobalOrLocalHttpCache;
use deno_cache_dir::npm::NpmCacheDir;
use deno_config::workspace::WorkspaceDirectory;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_error::JsErrorBox;
use deno_graph::packages::JsrVersionResolver;
use deno_lib::args::CaData;
use deno_lib::args::get_root_cert_store;
use deno_lib::args::npm_process_state;
use deno_lib::npm::NpmRegistryReadPermissionChecker;
use deno_lib::npm::NpmRegistryReadPermissionCheckerMode;
use deno_lib::npm::create_npm_process_state_provider;
use deno_lib::worker::LibMainWorkerFactory;
use deno_lib::worker::LibMainWorkerOptions;
use deno_lib::worker::LibWorkerFactoryRoots;
use deno_npm::npm_rc::ResolvedNpmRc;
use deno_npm::resolution::NpmVersionResolver;
use deno_npm_cache::NpmCacheSetting;
use deno_npm_installer::NpmInstallerFactoryOptions;
use deno_npm_installer::lifecycle_scripts::LifecycleScriptsExecutor;
use deno_npm_installer::lifecycle_scripts::NullLifecycleScriptsExecutor;
use deno_npm_installer::process_state::NpmProcessStateKind;
use deno_resolver::cache::ParsedSourceCache;
use deno_resolver::cjs::IsCjsResolutionMode;
use deno_resolver::deno_json::CompilerOptionsOverrides;
use deno_resolver::deno_json::CompilerOptionsResolver;
use deno_resolver::factory::ConfigDiscoveryOption;
use deno_resolver::factory::NpmProcessStateOptions;
use deno_resolver::factory::ResolverFactoryOptions;
use deno_resolver::factory::SpecifiedImportMapProvider;
use deno_resolver::import_map::WorkspaceExternalImportMapLoader;
use deno_resolver::loader::MemoryFiles;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_resolver::workspace::WorkspaceResolver;
use deno_runtime::FeatureChecker;
use deno_runtime::deno_fs;
use deno_runtime::deno_fs::RealFs;
use deno_runtime::deno_permissions::Permissions;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_runtime::deno_tls::RootCertStoreProvider;
use deno_runtime::deno_tls::rustls::RootCertStore;
use deno_runtime::deno_web::BlobStore;
use deno_runtime::inspector_server::InspectorServer;
use deno_runtime::permissions::RuntimePermissionDescriptorParser;
use node_resolver::NodeConditionOptions;
use node_resolver::NodeResolverOptions;
use node_resolver::cache::NodeResolutionThreadLocalCache;
use once_cell::sync::OnceCell;
use sys_traits::EnvCurrentDir;
use crate::args::BundleFlags;
use crate::args::CliLockfile;
use crate::args::CliOptions;
use crate::args::ConfigFlag;
use crate::args::DenoSubcommand;
use crate::args::Flags;
use crate::args::InstallFlags;
use crate::cache::Caches;
use crate::cache::CodeCache;
use crate::cache::DenoDir;
use crate::cache::GlobalHttpCache;
use crate::cache::ModuleInfoCache;
use crate::cache::SqliteNodeAnalysisCache;
use crate::file_fetcher::CliFileFetcher;
use crate::file_fetcher::CreateCliFileFetcherOptions;
use crate::file_fetcher::TextDecodedFile;
use crate::file_fetcher::create_cli_file_fetcher;
use crate::graph_container::MainModuleGraphContainer;
use crate::graph_util::FileWatcherReporter;
use crate::graph_util::ModuleGraphBuilder;
use crate::graph_util::ModuleGraphCreator;
use crate::http_util::HttpClientProvider;
use crate::module_loader::CliEmitter;
use crate::module_loader::CliModuleLoaderFactory;
use crate::module_loader::EszipModuleLoader;
use crate::module_loader::ModuleLoadPreparer;
use crate::node::CliNodeResolver;
use crate::node::CliPackageJsonResolver;
use crate::npm::CliNpmCache;
use crate::npm::CliNpmCacheHttpClient;
use crate::npm::CliNpmGraphResolver;
use crate::npm::CliNpmInstaller;
use crate::npm::CliNpmInstallerFactory;
use crate::npm::CliNpmResolver;
use crate::npm::DenoTaskLifeCycleScriptsExecutor;
use crate::resolver::CliCjsTracker;
use crate::resolver::CliResolver;
use crate::resolver::on_resolve_diagnostic;
use crate::standalone::binary::DenoCompileBinaryWriter;
use crate::sys::CliSys;
use crate::tools::installer::BinNameResolver;
use crate::tools::lint::LintRuleProvider;
use crate::tools::run::hmr::HmrRunnerState;
use crate::tsc::TypeCheckingCjsTracker;
use crate::type_checker::TypeChecker;
use crate::util::file_watcher::WatcherCommunicator;
use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressBarStyle;
use crate::worker::CliMainWorkerFactory;
use crate::worker::CliMainWorkerOptions;
struct CliRootCertStoreProvider {
cell: OnceCell<RootCertStore>,
maybe_root_path: Option<PathBuf>,
maybe_ca_stores: Option<Vec<String>>,
maybe_ca_data: Option<CaData>,
}
impl CliRootCertStoreProvider {
pub fn new(
maybe_root_path: Option<PathBuf>,
maybe_ca_stores: Option<Vec<String>>,
maybe_ca_data: Option<CaData>,
) -> Self {
Self {
cell: Default::default(),
maybe_root_path,
maybe_ca_stores,
maybe_ca_data,
}
}
}
impl RootCertStoreProvider for CliRootCertStoreProvider {
fn get_or_try_init(&self) -> Result<&RootCertStore, JsErrorBox> {
self
.cell
.get_or_try_init(|| {
get_root_cert_store(
self.maybe_root_path.clone(),
self.maybe_ca_stores.clone(),
self.maybe_ca_data.clone(),
)
})
.map_err(JsErrorBox::from_err)
}
}
#[derive(Debug)]
struct EszipModuleLoaderProvider {
cli_options: Arc<CliOptions>,
deferred: once_cell::sync::OnceCell<Arc<EszipModuleLoader>>,
}
impl EszipModuleLoaderProvider {
pub async fn get(&self) -> Result<Option<&Arc<EszipModuleLoader>>, AnyError> {
if self.cli_options.eszip()
&& let DenoSubcommand::Run(run_flags) = self.cli_options.sub_command()
{
if self.deferred.get().is_none() {
let eszip_loader = EszipModuleLoader::create(
&run_flags.script,
self.cli_options.initial_cwd(),
)
.await?;
_ = self.deferred.set(Arc::new(eszip_loader));
}
return Ok(Some(self.deferred.get().unwrap()));
}
Ok(None)
}
}
#[derive(Debug)]
struct CliSpecifiedImportMapProvider {
cli_options: Arc<CliOptions>,
file_fetcher: Arc<CliFileFetcher>,
eszip_module_loader_provider: Arc<EszipModuleLoaderProvider>,
workspace_external_import_map_loader:
Arc<WorkspaceExternalImportMapLoader<CliSys>>,
}
#[async_trait::async_trait(?Send)]
impl SpecifiedImportMapProvider for CliSpecifiedImportMapProvider {
async fn get(
&self,
) -> Result<Option<deno_resolver::workspace::SpecifiedImportMap>, AnyError>
{
async fn resolve_import_map_value_from_specifier(
specifier: &Url,
file_fetcher: &CliFileFetcher,
) -> Result<serde_json::Value, AnyError> {
if specifier.scheme() == "data" {
let data_url_text =
deno_media_type::data_url::RawDataUrl::parse(specifier)?.decode()?;
Ok(serde_json::from_str(&data_url_text)?)
} else {
let file = TextDecodedFile::decode(
file_fetcher.fetch_bypass_permissions(specifier).await?,
)?;
Ok(serde_json::from_str(&file.source)?)
}
}
let maybe_import_map_specifier =
self.cli_options.resolve_specified_import_map_specifier()?;
match maybe_import_map_specifier {
Some(specifier) => {
let value = match self.eszip_module_loader_provider.get().await? {
Some(eszip) => eszip.load_import_map_value(&specifier)?,
None => resolve_import_map_value_from_specifier(
&specifier,
&self.file_fetcher,
)
.await
.with_context(|| {
format!("Unable to load '{}' import map", specifier)
})?,
};
Ok(Some(deno_resolver::workspace::SpecifiedImportMap {
base_url: specifier,
value,
}))
}
None => {
if let Some(import_map) =
self.workspace_external_import_map_loader.get_or_load()?
{
let path_url = deno_path_util::url_from_file_path(&import_map.path)?;
Ok(Some(deno_resolver::workspace::SpecifiedImportMap {
base_url: path_url,
value: import_map.value.clone(),
}))
} else {
Ok(None)
}
}
}
}
}
pub type CliWorkspaceFactory = deno_resolver::factory::WorkspaceFactory<CliSys>;
pub type CliResolverFactory = deno_resolver::factory::ResolverFactory<CliSys>;
pub struct Deferred<T>(once_cell::unsync::OnceCell<T>);
impl<T> Default for Deferred<T> {
fn default() -> Self {
Self(once_cell::unsync::OnceCell::default())
}
}
impl<T: std::fmt::Debug> std::fmt::Debug for Deferred<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Deferred").field(&self.0).finish()
}
}
impl<T> Deferred<T> {
#[inline(always)]
pub fn get_or_try_init(
&self,
create: impl FnOnce() -> Result<T, AnyError>,
) -> Result<&T, AnyError> {
self.0.get_or_try_init(create)
}
#[inline(always)]
pub fn get_or_init(&self, create: impl FnOnce() -> T) -> &T {
self.0.get_or_init(create)
}
pub async fn get_or_try_init_async(
&self,
// some futures passed here are boxed because it was discovered
// that they were called a lot, causing other futures to get
// really big causing stack overflows on Windows
create: impl Future<Output = Result<T, AnyError>>,
) -> Result<&T, AnyError> {
if self.0.get().is_none() {
// todo(dsherret): it would be more ideal if this enforced a
// single executor and then we could make some initialization
// concurrent
let val = create.await?;
_ = self.0.set(val);
}
Ok(self.0.get().unwrap())
}
}
#[derive(Default)]
struct CliFactoryServices {
blob_store: Deferred<Arc<BlobStore>>,
caches: Deferred<Arc<Caches>>,
cli_options: Deferred<Arc<CliOptions>>,
code_cache: Deferred<Arc<CodeCache>>,
eszip_module_loader_provider: Deferred<Arc<EszipModuleLoaderProvider>>,
feature_checker: Deferred<Arc<FeatureChecker>>,
file_fetcher: Deferred<Arc<CliFileFetcher>>,
fs: Deferred<Arc<dyn deno_fs::FileSystem>>,
http_client_provider: Deferred<Arc<HttpClientProvider>>,
main_graph_container: Deferred<Arc<MainModuleGraphContainer>>,
graph_reporter: Deferred<Option<Arc<dyn deno_graph::source::Reporter>>>,
maybe_inspector_server: Deferred<Option<Arc<InspectorServer>>>,
memory_files: Arc<MemoryFiles>,
module_graph_builder: Deferred<Arc<ModuleGraphBuilder>>,
module_graph_creator: Deferred<Arc<ModuleGraphCreator>>,
module_info_cache: Deferred<Arc<ModuleInfoCache>>,
module_load_preparer: Deferred<Arc<ModuleLoadPreparer>>,
npm_installer_factory: Deferred<CliNpmInstallerFactory>,
permission_desc_parser:
Deferred<Arc<RuntimePermissionDescriptorParser<CliSys>>>,
resolver_factory: Deferred<Arc<CliResolverFactory>>,
root_cert_store_provider: Deferred<Arc<dyn RootCertStoreProvider>>,
root_permissions_container: Deferred<PermissionsContainer>,
text_only_progress_bar: Deferred<ProgressBar>,
type_checker: Deferred<Arc<TypeChecker>>,
workspace_factory: Deferred<Arc<CliWorkspaceFactory>>,
install_reporter:
Deferred<Option<Arc<crate::tools::installer::InstallReporter>>>,
}
#[derive(Debug, Default)]
struct CliFactoryOverrides {
initial_cwd: Option<PathBuf>,
workspace_directory: Option<Arc<WorkspaceDirectory>>,
}
pub struct CliFactory {
watcher_communicator: Option<Arc<WatcherCommunicator>>,
flags: Arc<Flags>,
services: CliFactoryServices,
overrides: CliFactoryOverrides,
}
impl CliFactory {
pub fn from_flags(flags: Arc<Flags>) -> Self {
Self {
flags,
watcher_communicator: None,
services: Default::default(),
overrides: Default::default(),
}
}
pub fn from_flags_for_watcher(
flags: Arc<Flags>,
watcher_communicator: Arc<WatcherCommunicator>,
) -> Self {
CliFactory {
watcher_communicator: Some(watcher_communicator),
flags,
services: Default::default(),
overrides: Default::default(),
}
}
pub fn set_initial_cwd(&mut self, initial_cwd: PathBuf) {
self.overrides.initial_cwd = Some(initial_cwd);
}
pub fn set_workspace_dir(&mut self, dir: Arc<WorkspaceDirectory>) {
self.overrides.workspace_directory = Some(dir);
}
pub async fn maybe_lockfile(
&self,
) -> Result<Option<&Arc<CliLockfile>>, AnyError> {
self.npm_installer_factory()?.maybe_lockfile().await
}
pub fn cli_options(&self) -> Result<&Arc<CliOptions>, AnyError> {
self.services.cli_options.get_or_try_init(|| {
let workspace_factory = self.workspace_factory()?;
let workspace_directory = workspace_factory.workspace_directory()?;
CliOptions::from_flags(
self.flags.clone(),
workspace_factory.initial_cwd().clone(),
workspace_directory.clone(),
)
.map(Arc::new)
})
}
pub fn deno_dir(&self) -> Result<&DenoDir, AnyError> {
Ok(
self
.workspace_factory()?
.deno_dir_provider()
.get_or_create()?,
)
}
pub fn caches(&self) -> Result<&Arc<Caches>, AnyError> {
self.services.caches.get_or_try_init(|| {
let cli_options = self.cli_options()?;
let caches = Arc::new(Caches::new(
self.workspace_factory()?.deno_dir_provider().clone(),
));
// Warm up the caches we know we'll likely need based on the CLI mode
match cli_options.sub_command() {
DenoSubcommand::Run(_)
| DenoSubcommand::Serve(_)
| DenoSubcommand::Bench(_)
| DenoSubcommand::Test(_)
| DenoSubcommand::Check(_) => {
_ = caches.dep_analysis_db();
_ = caches.node_analysis_db();
if cli_options.type_check_mode().is_true() {
_ = caches.fast_check_db();
_ = caches.type_checking_cache_db();
}
if cli_options.code_cache_enabled() {
_ = caches.code_cache_db();
}
}
_ => {}
}
Ok(caches)
})
}
pub fn blob_store(&self) -> &Arc<BlobStore> {
self.services.blob_store.get_or_init(Default::default)
}
pub fn bin_name_resolver(&self) -> Result<BinNameResolver<'_>, AnyError> {
let http_client = self.http_client_provider();
let npm_api = self.npm_installer_factory()?.registry_info_provider()?;
Ok(BinNameResolver::new(
http_client,
npm_api.as_ref(),
self.npm_version_resolver()?,
))
}
pub fn root_cert_store_provider(&self) -> &Arc<dyn RootCertStoreProvider> {
self.services.root_cert_store_provider.get_or_init(|| {
Arc::new(CliRootCertStoreProvider::new(
None,
self.flags.ca_stores.clone(),
self.flags.ca_data.clone(),
))
})
}
pub fn text_only_progress_bar(&self) -> &ProgressBar {
self
.services
.text_only_progress_bar
.get_or_init(|| ProgressBar::new(ProgressBarStyle::TextOnly))
}
pub fn global_http_cache(&self) -> Result<&Arc<GlobalHttpCache>, AnyError> {
Ok(self.workspace_factory()?.global_http_cache()?)
}
pub fn http_cache(
&self,
) -> Result<&GlobalOrLocalHttpCache<CliSys>, AnyError> {
Ok(self.workspace_factory()?.http_cache()?)
}
pub fn http_client_provider(&self) -> &Arc<HttpClientProvider> {
self.services.http_client_provider.get_or_init(|| {
Arc::new(HttpClientProvider::new(
Some(self.root_cert_store_provider().clone()),
self.flags.unsafely_ignore_certificate_errors.clone(),
))
})
}
fn eszip_module_loader_provider(
&self,
) -> Result<&Arc<EszipModuleLoaderProvider>, AnyError> {
self
.services
.eszip_module_loader_provider
.get_or_try_init(|| {
Ok(Arc::new(EszipModuleLoaderProvider {
cli_options: self.cli_options()?.clone(),
deferred: Default::default(),
}))
})
}
pub fn file_fetcher(&self) -> Result<&Arc<CliFileFetcher>, AnyError> {
self.services.file_fetcher.get_or_try_init(|| {
let cli_options = self.cli_options()?;
Ok(Arc::new(create_cli_file_fetcher(
self.blob_store().clone(),
self.http_cache()?.clone(),
self.http_client_provider().clone(),
self.services.memory_files.clone(),
self.sys(),
CreateCliFileFetcherOptions {
allow_remote: !cli_options.no_remote(),
cache_setting: cli_options.cache_setting(),
download_log_level: log::Level::Info,
progress_bar: Some(self.text_only_progress_bar().clone()),
},
)))
})
}
pub fn fs(&self) -> &Arc<dyn deno_fs::FileSystem> {
self.services.fs.get_or_init(|| Arc::new(RealFs))
}
pub fn memory_files(&self) -> &Arc<MemoryFiles> {
&self.services.memory_files
}
pub fn sys(&self) -> CliSys {
CliSys::default() // very cheap to make
}
pub fn in_npm_pkg_checker(
&self,
) -> Result<&DenoInNpmPackageChecker, AnyError> {
self.resolver_factory()?.in_npm_package_checker()
}
pub async fn tsgo_path(&self) -> Result<Option<&PathBuf>, AnyError> {
if self.cli_options()?.unstable_tsgo() {
Ok(Some(
crate::tsc::ensure_tsgo(
self.deno_dir()?,
self.http_client_provider().clone(),
)
.await?,
))
} else {
Ok(None)
}
}
pub fn jsr_version_resolver(
&self,
) -> Result<&Arc<JsrVersionResolver>, AnyError> {
self.resolver_factory()?.jsr_version_resolver()
}
pub fn npm_cache(&self) -> Result<&Arc<CliNpmCache>, AnyError> {
self.npm_installer_factory()?.npm_cache()
}
pub fn npm_cache_dir(&self) -> Result<&Arc<NpmCacheDir>, AnyError> {
Ok(self.workspace_factory()?.npm_cache_dir()?)
}
pub fn npmrc(&self) -> Result<&Arc<ResolvedNpmRc>, AnyError> {
Ok(self.workspace_factory()?.npmrc()?)
}
pub async fn npm_graph_resolver(
&self,
) -> Result<&Arc<CliNpmGraphResolver>, AnyError> {
self
.npm_installer_factory()?
.npm_deno_graph_resolver()
.await
}
pub async fn npm_installer_if_managed(
&self,
) -> Result<Option<&Arc<CliNpmInstaller>>, AnyError> {
self
.npm_installer_factory()?
.npm_installer_if_managed()
.await
}
pub fn npm_installer_factory(
&self,
) -> Result<&CliNpmInstallerFactory, AnyError> {
self.services.npm_installer_factory.get_or_try_init(|| {
let cli_options = self.cli_options()?;
let resolver_factory = self.resolver_factory()?;
Ok(CliNpmInstallerFactory::new(
resolver_factory.clone(),
Arc::new(CliNpmCacheHttpClient::new(
self.http_client_provider().clone(),
self.text_only_progress_bar().clone(),
)),
match resolver_factory.npm_resolver()?.as_managed() {
Some(managed_npm_resolver) => {
Arc::new(DenoTaskLifeCycleScriptsExecutor::new(
managed_npm_resolver.clone(),
self.text_only_progress_bar().clone(),
)) as Arc<dyn LifecycleScriptsExecutor>
}
None => Arc::new(NullLifecycleScriptsExecutor),
},
self.text_only_progress_bar().clone(),
self
.install_reporter()?
.cloned()
.map(|r| r as Arc<dyn deno_npm_installer::InstallReporter>),
NpmInstallerFactoryOptions {
cache_setting: NpmCacheSetting::from_cache_setting(
&cli_options.cache_setting(),
),
caching_strategy: cli_options.default_npm_caching_strategy(),
lifecycle_scripts_config: cli_options.lifecycle_scripts_config(),
resolve_npm_resolution_snapshot: Box::new(|| {
deno_lib::args::resolve_npm_resolution_snapshot(&CliSys::default())
}),
},
))
})
}
pub fn npm_version_resolver(
&self,
) -> Result<&Arc<NpmVersionResolver>, AnyError> {
self.resolver_factory()?.npm_version_resolver()
}
pub fn install_reporter(
&self,
) -> Result<Option<&Arc<crate::tools::installer::InstallReporter>>, AnyError>
{
self
.services
.install_reporter
.get_or_try_init(|| match self.cli_options()?.sub_command() {
DenoSubcommand::Install(InstallFlags::Local(_))
| DenoSubcommand::Add(_)
| DenoSubcommand::Cache(_) => Ok(Some(Arc::new(
crate::tools::installer::InstallReporter::new(),
))),
_ => Ok(None),
})
.map(|opt| opt.as_ref())
}
pub async fn npm_installer(&self) -> Result<&Arc<CliNpmInstaller>, AnyError> {
self.npm_installer_factory()?.npm_installer().await
}
pub async fn npm_resolver(&self) -> Result<&CliNpmResolver, AnyError> {
self.initialize_npm_resolution_if_managed().await?;
self.resolver_factory()?.npm_resolver()
}
fn workspace_factory(&self) -> Result<&Arc<CliWorkspaceFactory>, AnyError> {
self.services.workspace_factory.get_or_try_init(|| {
let initial_cwd = match self.overrides.initial_cwd.clone() {
Some(v) => v,
None => {
if let Some(initial_cwd) = self.flags.initial_cwd.clone() {
initial_cwd
} else {
self
.sys()
.env_current_dir()
.with_context(|| "Failed getting cwd.")?
}
}
};
let options = new_workspace_factory_options(&initial_cwd, &self.flags);
let mut factory =
CliWorkspaceFactory::new(self.sys(), initial_cwd, options);
if let Some(workspace_dir) = &self.overrides.workspace_directory {
factory.set_workspace_directory(workspace_dir.clone());
}
Ok(Arc::new(factory))
})
}
pub async fn workspace_resolver(
&self,
) -> Result<&Arc<WorkspaceResolver<CliSys>>, AnyError> {
self.initialize_npm_resolution_if_managed().await?;
self.resolver_factory()?.workspace_resolver().await
}
pub async fn resolver(&self) -> Result<&Arc<CliResolver>, AnyError> {
self.initialize_npm_resolution_if_managed().await?;
self.resolver_factory()?.deno_resolver().await
}
pub fn graph_reporter(
&self,
) -> Result<&Option<Arc<dyn deno_graph::source::Reporter>>, AnyError> {
match self.cli_options()?.sub_command() {
DenoSubcommand::Install(_) => {
self.services.graph_reporter.get_or_try_init(|| {
self.install_reporter().map(|opt| {
opt.map(|r| r.clone() as Arc<dyn deno_graph::source::Reporter>)
})
})
}
_ => Ok(self.services.graph_reporter.get_or_init(|| {
self
.watcher_communicator
.as_ref()
.map(|i| FileWatcherReporter::new(i.clone()))
.map(|i| Arc::new(i) as Arc<dyn deno_graph::source::Reporter>)
})),
}
}
pub fn module_info_cache(&self) -> Result<&Arc<ModuleInfoCache>, AnyError> {
self.services.module_info_cache.get_or_try_init(|| {
Ok(Arc::new(ModuleInfoCache::new(
self.caches()?.dep_analysis_db(),
self.resolver_factory()?.parsed_source_cache().clone(),
)))
})
}
pub fn code_cache(&self) -> Result<&Arc<CodeCache>, AnyError> {
self.services.code_cache.get_or_try_init(|| {
Ok(Arc::new(CodeCache::new(self.caches()?.code_cache_db())))
})
}
pub fn parsed_source_cache(
&self,
) -> Result<&Arc<ParsedSourceCache>, AnyError> {
Ok(self.resolver_factory()?.parsed_source_cache())
}
pub fn emitter(&self) -> Result<&Arc<CliEmitter>, AnyError> {
self.resolver_factory()?.emitter()
}
pub async fn lint_rule_provider(&self) -> Result<LintRuleProvider, AnyError> {
Ok(LintRuleProvider::new(Some(
self.workspace_resolver().await?.clone(),
)))
}
pub async fn node_resolver(&self) -> Result<&Arc<CliNodeResolver>, AnyError> {
self.initialize_npm_resolution_if_managed().await?;
self.resolver_factory()?.node_resolver()
}
async fn initialize_npm_resolution_if_managed(&self) -> Result<(), AnyError> {
self
.npm_installer_factory()?
.initialize_npm_resolution_if_managed()
.await
}
pub fn pkg_json_resolver(
&self,
) -> Result<&Arc<CliPackageJsonResolver>, AnyError> {
Ok(self.resolver_factory()?.pkg_json_resolver())
}
pub fn compiler_options_resolver(
&self,
) -> Result<&Arc<CompilerOptionsResolver>, AnyError> {
self.resolver_factory()?.compiler_options_resolver()
}
pub async fn type_checker(&self) -> Result<&Arc<TypeChecker>, AnyError> {
self
.services
.type_checker
.get_or_try_init_async(
async {
let cli_options = self.cli_options()?;
Ok(Arc::new(TypeChecker::new(
self.caches()?.clone(),
Arc::new(TypeCheckingCjsTracker::new(
self.cjs_tracker()?.clone(),
self.module_info_cache()?.clone(),
)),
cli_options.clone(),
self.module_graph_builder().await?.clone(),
self.node_resolver().await?.clone(),
self.npm_resolver().await?.clone(),
self.resolver_factory()?.pkg_json_resolver().clone(),
self.sys(),
self.compiler_options_resolver()?.clone(),
if cli_options.code_cache_enabled() {
Some(self.code_cache()?.clone())
} else {
None
},
self.tsgo_path().await?.cloned(),
)))
}
.boxed_local(),
)
.await
}
pub async fn module_graph_builder(
&self,
) -> Result<&Arc<ModuleGraphBuilder>, AnyError> {
self
.services
.module_graph_builder
.get_or_try_init_async(
async {
let cli_options = self.cli_options()?;
Ok(Arc::new(ModuleGraphBuilder::new(
self.caches()?.clone(),
self.cjs_tracker()?.clone(),
cli_options.clone(),
self.file_fetcher()?.clone(),
self.global_http_cache()?.clone(),
self.in_npm_pkg_checker()?.clone(),
self.jsr_version_resolver()?.clone(),
self.maybe_lockfile().await?.cloned(),
self.graph_reporter()?.clone(),
self.module_info_cache()?.clone(),
self.npm_graph_resolver().await?.clone(),
self.npm_installer_if_managed().await?.cloned(),
self.npm_resolver().await?.clone(),
self.resolver_factory()?.parsed_source_cache().clone(),
self.text_only_progress_bar().clone(),
self.resolver().await?.clone(),
self.root_permissions_container()?.clone(),
self.sys(),
self.compiler_options_resolver()?.clone(),
self.install_reporter()?.cloned().map(|r| {
r as Arc<dyn deno_resolver::file_fetcher::GraphLoaderReporter>
}),
)))
}
.boxed_local(),
)
.await
}
pub async fn module_graph_creator(
&self,
) -> Result<&Arc<ModuleGraphCreator>, AnyError> {
self
.services
.module_graph_creator
.get_or_try_init_async(
async {
let cli_options = self.cli_options()?;
Ok(Arc::new(ModuleGraphCreator::new(
cli_options.clone(),
self.module_graph_builder().await?.clone(),
self.type_checker().await?.clone(),
)))
}
.boxed_local(),
)
.await
}
pub async fn main_module_graph_container(
&self,
) -> Result<&Arc<MainModuleGraphContainer>, AnyError> {
self
.services
.main_graph_container
.get_or_try_init_async(
async {
Ok(Arc::new(MainModuleGraphContainer::new(
self.cli_options()?.clone(),
self.module_load_preparer().await?.clone(),
self.root_permissions_container()?.clone(),
)))
}
.boxed_local(),
)
.await
}
pub fn maybe_inspector_server(
&self,
) -> Result<&Option<Arc<InspectorServer>>, AnyError> {
self.services.maybe_inspector_server.get_or_try_init(|| {
let cli_options = self.cli_options()?;
match cli_options.resolve_inspector_server() {
Ok(server) => Ok(server.map(Arc::new)),
Err(err) => Err(err),
}
})
}
pub async fn module_load_preparer(
&self,
) -> Result<&Arc<ModuleLoadPreparer>, AnyError> {
self
.services
.module_load_preparer
.get_or_try_init_async(
async {
let cli_options = self.cli_options()?;
Ok(Arc::new(ModuleLoadPreparer::new(
cli_options.clone(),
self.maybe_lockfile().await?.cloned(),
self.module_graph_builder().await?.clone(),
self.text_only_progress_bar().clone(),
self.type_checker().await?.clone(),
)))
}
.boxed_local(),
)
.await
}
pub fn cjs_tracker(&self) -> Result<&Arc<CliCjsTracker>, AnyError> {
self.resolver_factory()?.cjs_tracker()
}
pub fn permission_desc_parser(
&self,
) -> Result<&Arc<RuntimePermissionDescriptorParser<CliSys>>, AnyError> {
self.services.permission_desc_parser.get_or_try_init(|| {
Ok(Arc::new(RuntimePermissionDescriptorParser::new(self.sys())))
})
}
pub fn feature_checker(&self) -> Result<&Arc<FeatureChecker>, AnyError> {
self.services.feature_checker.get_or_try_init(|| {
let cli_options = self.cli_options()?;
let mut checker = FeatureChecker::default();
checker.set_exit_cb(Box::new(crate::unstable_exit_cb));
let unstable_features = cli_options.unstable_features();
for feature in deno_runtime::UNSTABLE_FEATURES {
if unstable_features.contains(&feature.name) {
checker.enable_feature(feature.name);
}
}
Ok(Arc::new(checker))
})
}
pub async fn create_compile_binary_writer(
&self,
) -> Result<DenoCompileBinaryWriter<'_>, AnyError> {
let cli_options = self.cli_options()?;
Ok(DenoCompileBinaryWriter::new(
self.resolver_factory()?.cjs_module_export_analyzer()?,
self.cjs_tracker()?,
self.cli_options()?,
self.deno_dir()?,
self.emitter()?,
self.file_fetcher()?,
self.http_client_provider(),
self.npm_resolver().await?,
self.workspace_resolver().await?.as_ref(),
cli_options.npm_system_info(),
))
}
pub fn root_permissions_container(
&self,
) -> Result<&PermissionsContainer, AnyError> {
self
.services
.root_permissions_container
.get_or_try_init(|| {
let desc_parser = self.permission_desc_parser()?.clone();
let permissions = Permissions::from_options(
desc_parser.as_ref(),
&self.cli_options()?.permissions_options()?,
)?;
Ok(PermissionsContainer::new(desc_parser, permissions))
})
}
fn workspace_external_import_map_loader(
&self,
) -> Result<&Arc<WorkspaceExternalImportMapLoader<CliSys>>, AnyError> {
Ok(
self
.workspace_factory()?
.workspace_external_import_map_loader()?,
)
}
pub async fn create_cli_main_worker_factory(
&self,
) -> Result<CliMainWorkerFactory, AnyError> {
self
.create_cli_main_worker_factory_with_roots(Default::default())
.await
}
pub async fn create_module_loader_factory(
&self,
) -> Result<CliModuleLoaderFactory, AnyError> {
let cli_options = self.cli_options()?;
let cli_npm_resolver = self.npm_resolver().await?.clone();
let in_npm_pkg_checker = self.in_npm_pkg_checker()?;
let workspace_factory = self.workspace_factory()?;
let resolver_factory = self.resolver_factory()?;
let npm_installer_factory = self.npm_installer_factory()?;
let cjs_tracker = self.cjs_tracker()?.clone();
let npm_registry_permission_checker = {
let mode = if resolver_factory.use_byonm()? {
NpmRegistryReadPermissionCheckerMode::Byonm
} else if let Some(node_modules_dir) =
workspace_factory.node_modules_dir_path()?
{
NpmRegistryReadPermissionCheckerMode::Local(
node_modules_dir.to_path_buf(),
)
} else {
NpmRegistryReadPermissionCheckerMode::Global(
self.npm_cache_dir()?.root_dir().to_path_buf(),
)
};
Arc::new(NpmRegistryReadPermissionChecker::new(self.sys(), mode))
};
let maybe_eszip_loader =
self.eszip_module_loader_provider()?.get().await?.cloned();
let module_loader_factory = CliModuleLoaderFactory::new(
cli_options,
cjs_tracker,
if cli_options.code_cache_enabled() {
Some(self.code_cache()?.clone())
} else {
None
},
self.emitter()?.clone(),
self.file_fetcher()?.clone(),
npm_installer_factory
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/task_runner.rs | cli/task_runner.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::ffi::OsStr;
use std::ffi::OsString;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures::future::LocalBoxFuture;
use deno_task_shell::ExecutableCommand;
use deno_task_shell::ExecuteResult;
use deno_task_shell::KillSignal;
use deno_task_shell::ShellCommand;
use deno_task_shell::ShellCommandContext;
use deno_task_shell::ShellPipeReader;
use deno_task_shell::ShellPipeWriter;
use tokio::task::JoinHandle;
use tokio::task::LocalSet;
use tokio_util::sync::CancellationToken;
use crate::node::CliNodeResolver;
use crate::npm::CliManagedNpmResolver;
use crate::npm::CliNpmResolver;
pub fn get_script_with_args(script: &str, argv: &[String]) -> String {
let additional_args = argv
.iter()
// surround all the additional arguments in double quotes
// and sanitize any command substitution
.map(|a| format!("\"{}\"", a.replace('"', "\\\"").replace('$', "\\$")))
.collect::<Vec<_>>()
.join(" ");
let script = format!("{script} {additional_args}");
script.trim().to_owned()
}
pub struct TaskStdio(Option<ShellPipeReader>, ShellPipeWriter);
impl TaskStdio {
pub fn stdout() -> Self {
Self(None, ShellPipeWriter::stdout())
}
pub fn stderr() -> Self {
Self(None, ShellPipeWriter::stderr())
}
pub fn piped() -> Self {
let (r, w) = deno_task_shell::pipe();
Self(Some(r), w)
}
}
pub struct TaskIo {
pub stdout: TaskStdio,
pub stderr: TaskStdio,
}
impl Default for TaskIo {
fn default() -> Self {
Self {
stdout: TaskStdio::stdout(),
stderr: TaskStdio::stderr(),
}
}
}
pub struct RunTaskOptions<'a> {
pub task_name: &'a str,
pub script: &'a str,
pub cwd: PathBuf,
pub init_cwd: &'a Path,
pub env_vars: HashMap<OsString, OsString>,
pub argv: &'a [String],
pub custom_commands: HashMap<String, Rc<dyn ShellCommand>>,
pub root_node_modules_dir: Option<&'a Path>,
pub stdio: Option<TaskIo>,
pub kill_signal: KillSignal,
}
pub type TaskCustomCommands = HashMap<String, Rc<dyn ShellCommand>>;
pub struct TaskResult {
pub exit_code: i32,
pub stdout: Option<Vec<u8>>,
pub stderr: Option<Vec<u8>>,
}
pub async fn run_task(
mut opts: RunTaskOptions<'_>,
) -> Result<TaskResult, AnyError> {
let script = get_script_with_args(opts.script, opts.argv);
let seq_list = deno_task_shell::parser::parse(&script)
.with_context(|| format!("Error parsing script '{}'.", opts.task_name))?;
let env_vars =
prepare_env_vars(opts.env_vars, opts.init_cwd, opts.root_node_modules_dir);
if !opts.custom_commands.contains_key("deno") {
opts
.custom_commands
.insert("deno".to_string(), Rc::new(DenoCommand::default()));
}
let state = deno_task_shell::ShellState::new(
env_vars,
opts.cwd,
opts.custom_commands,
opts.kill_signal,
);
let stdio = opts.stdio.unwrap_or_default();
let (
TaskStdio(stdout_read, stdout_write),
TaskStdio(stderr_read, stderr_write),
) = (stdio.stdout, stdio.stderr);
fn read(reader: ShellPipeReader) -> JoinHandle<Result<Vec<u8>, AnyError>> {
tokio::task::spawn_blocking(move || {
let mut buf = Vec::new();
reader.pipe_to(&mut buf)?;
Ok(buf)
})
}
let stdout = stdout_read.map(read);
let stderr = stderr_read.map(read);
let local = LocalSet::new();
let future = async move {
let exit_code = deno_task_shell::execute_with_pipes(
seq_list,
state,
ShellPipeReader::stdin(),
stdout_write,
stderr_write,
)
.await;
Ok::<_, AnyError>(TaskResult {
exit_code,
stdout: if let Some(stdout) = stdout {
Some(stdout.await??)
} else {
None
},
stderr: if let Some(stderr) = stderr {
Some(stderr.await??)
} else {
None
},
})
};
local.run_until(future).await
}
fn prepare_env_vars(
mut env_vars: HashMap<OsString, OsString>,
initial_cwd: &Path,
node_modules_dir: Option<&Path>,
) -> HashMap<OsString, OsString> {
const INIT_CWD_NAME: &str = "INIT_CWD";
if !env_vars.contains_key(OsStr::new(INIT_CWD_NAME)) {
// if not set, set an INIT_CWD env var that has the cwd
env_vars.insert(
INIT_CWD_NAME.into(),
initial_cwd.to_path_buf().into_os_string(),
);
}
if !env_vars
.contains_key(OsStr::new(crate::npm::NPM_CONFIG_USER_AGENT_ENV_VAR))
{
env_vars.insert(
crate::npm::NPM_CONFIG_USER_AGENT_ENV_VAR.into(),
crate::npm::get_npm_config_user_agent().into(),
);
}
if let Some(node_modules_dir) = node_modules_dir {
prepend_to_path(
&mut env_vars,
node_modules_dir.join(".bin").into_os_string(),
);
}
env_vars
}
fn prepend_to_path(
env_vars: &mut HashMap<OsString, OsString>,
value: OsString,
) {
match env_vars.get_mut(OsStr::new("PATH")) {
Some(path) => {
if path.is_empty() {
*path = value;
} else {
let mut new_path = value;
new_path.push(if cfg!(windows) { ";" } else { ":" });
new_path.push(&path);
*path = new_path;
}
}
None => {
env_vars.insert("PATH".into(), value);
}
}
}
pub fn real_env_vars() -> HashMap<OsString, OsString> {
std::env::vars_os()
.map(|(k, v)| {
if cfg!(windows) {
(k.to_ascii_uppercase(), v)
} else {
(k, v)
}
})
.collect()
}
// WARNING: Do not depend on this env var in user code. It's not stable API.
pub(crate) static USE_PKG_JSON_HIDDEN_ENV_VAR_NAME: &str =
"DENO_INTERNAL_TASK_USE_PKG_JSON";
pub struct NpmCommand;
impl ShellCommand for NpmCommand {
fn execute(
&self,
mut context: ShellCommandContext,
) -> LocalBoxFuture<'static, ExecuteResult> {
if context.args.first().and_then(|s| s.to_str()) == Some("run")
&& context.args.len() >= 2
// for now, don't run any npm scripts that have a flag because
// we don't handle stuff like `--workspaces` properly
&& !context.args.iter().any(|s| s.to_string_lossy().starts_with('-'))
{
// run with deno task instead
let mut args: Vec<OsString> = Vec::with_capacity(context.args.len());
args.push("task".into());
args.extend(context.args.into_iter().skip(1));
let mut state = context.state;
state.apply_env_var(
OsStr::new(USE_PKG_JSON_HIDDEN_ENV_VAR_NAME),
OsStr::new("1"),
);
return ExecutableCommand::new(
"deno".to_string(),
std::env::current_exe()
.and_then(|p| p.canonicalize())
.unwrap(),
)
.execute(ShellCommandContext {
args,
state,
..context
});
}
// fallback to running the real npm command
let npm_path = match context.state.resolve_command_path(OsStr::new("npm")) {
Ok(path) => path,
Err(err) => {
let _ = context.stderr.write_line(&format!("{}", err));
return Box::pin(std::future::ready(ExecuteResult::from_exit_code(
err.exit_code(),
)));
}
};
ExecutableCommand::new("npm".to_string(), npm_path).execute(context)
}
}
pub struct DenoCommand(ExecutableCommand);
impl Default for DenoCommand {
fn default() -> Self {
Self(ExecutableCommand::new(
"deno".to_string(),
std::env::current_exe()
.and_then(|p| p.canonicalize())
.unwrap(),
))
}
}
impl ShellCommand for DenoCommand {
fn execute(
&self,
context: ShellCommandContext,
) -> LocalBoxFuture<'static, ExecuteResult> {
self.0.execute(context)
}
}
pub struct NodeCommand;
impl ShellCommand for NodeCommand {
fn execute(
&self,
context: ShellCommandContext,
) -> LocalBoxFuture<'static, ExecuteResult> {
// continue to use Node if the first argument is a flag
// or there are no arguments provided for some reason
if context.args.is_empty()
|| ({
let first_arg = context.args[0].to_string_lossy();
first_arg.starts_with('-') // has a flag
})
{
return ExecutableCommand::new("node".to_string(), PathBuf::from("node"))
.execute(context);
}
let mut args: Vec<OsString> = Vec::with_capacity(7 + context.args.len());
args.extend([
"run".into(),
"-A".into(),
"--unstable-bare-node-builtins".into(),
"--unstable-detect-cjs".into(),
"--unstable-sloppy-imports".into(),
"--unstable-unsafe-proto".into(),
]);
args.extend(context.args);
let mut state = context.state;
state.apply_env_var(
OsStr::new(USE_PKG_JSON_HIDDEN_ENV_VAR_NAME),
OsStr::new("1"),
);
ExecutableCommand::new(
"deno".to_string(),
std::env::current_exe()
.and_then(|p| p.canonicalize())
.unwrap(),
)
.execute(ShellCommandContext {
args,
state,
..context
})
}
}
pub struct NodeGypCommand;
impl ShellCommand for NodeGypCommand {
fn execute(
&self,
context: ShellCommandContext,
) -> LocalBoxFuture<'static, ExecuteResult> {
// at the moment this shell command is just to give a warning if node-gyp is not found
// in the future, we could try to run/install node-gyp for the user with deno
if context
.state
.resolve_command_path(OsStr::new("node-gyp"))
.is_err()
{
log::warn!(
"{} node-gyp was used in a script, but was not listed as a dependency. Either add it as a dependency or install it globally (e.g. `npm install -g node-gyp`)",
crate::colors::yellow("Warning")
);
Box::pin(std::future::ready(ExecuteResult::from_exit_code(0)))
} else {
ExecutableCommand::new(
"node-gyp".to_string(),
"node-gyp".to_string().into(),
)
.execute(context)
}
}
}
pub struct NpxCommand;
impl ShellCommand for NpxCommand {
fn execute(
&self,
mut context: ShellCommandContext,
) -> LocalBoxFuture<'static, ExecuteResult> {
if let Some(first_arg) = context.args.first().cloned() {
match context.state.resolve_custom_command(&first_arg) {
Some(command) => {
let context = ShellCommandContext {
args: context.args.into_iter().skip(1).collect::<Vec<_>>(),
..context
};
command.execute(context)
}
_ => {
// can't find the command, so fallback to running the real npx command
let npx_path =
match context.state.resolve_command_path(OsStr::new("npx")) {
Ok(npx) => npx,
Err(err) => {
let _ = context.stderr.write_line(&format!("{}", err));
return Box::pin(std::future::ready(
ExecuteResult::from_exit_code(err.exit_code()),
));
}
};
ExecutableCommand::new("npx".to_string(), npx_path).execute(context)
}
}
} else {
let _ = context.stderr.write_line("npx: missing command");
Box::pin(std::future::ready(ExecuteResult::from_exit_code(1)))
}
}
}
/// Runs a module in the node_modules folder.
#[derive(Clone)]
pub struct NodeModulesFileRunCommand {
pub command_name: String,
pub path: PathBuf,
}
impl ShellCommand for NodeModulesFileRunCommand {
fn execute(
&self,
mut context: ShellCommandContext,
) -> LocalBoxFuture<'static, ExecuteResult> {
let mut args: Vec<OsString> = vec![
"run".into(),
"--ext=js".into(),
"-A".into(),
self.path.clone().into_os_string(),
];
args.extend(context.args);
let executable_command = deno_task_shell::ExecutableCommand::new(
"deno".to_string(),
std::env::current_exe()
.and_then(|p| p.canonicalize())
.unwrap(),
);
// set this environment variable so that the launched process knows the npm command name
context.state.apply_env_var(
OsStr::new("DENO_INTERNAL_NPM_CMD_NAME"),
OsStr::new(&self.command_name),
);
executable_command.execute(ShellCommandContext { args, ..context })
}
}
pub fn resolve_custom_commands(
node_resolver: &CliNodeResolver,
npm_resolver: &CliNpmResolver,
) -> Result<HashMap<String, Rc<dyn ShellCommand>>, AnyError> {
let mut commands = match npm_resolver {
CliNpmResolver::Byonm(npm_resolver) => {
let node_modules_dir = npm_resolver.root_node_modules_path().unwrap();
let bin_dir = node_modules_dir.join(".bin");
resolve_npm_commands_from_bin_dir(&bin_dir, node_resolver)
}
CliNpmResolver::Managed(npm_resolver) => {
resolve_managed_npm_commands(node_resolver, npm_resolver)?
}
};
commands.insert("npm".to_string(), Rc::new(NpmCommand));
Ok(commands)
}
pub fn resolve_npm_commands_from_bin_dir(
bin_dir: &Path,
node_resolver: &CliNodeResolver,
) -> HashMap<String, Rc<dyn ShellCommand>> {
let bin_commands = node_resolver.resolve_npm_commands_from_bin_dir(bin_dir);
bin_commands
.into_iter()
.map(|(command_name, path)| {
(
command_name.clone(),
Rc::new(NodeModulesFileRunCommand {
command_name,
path: path.path().to_path_buf(),
}) as Rc<dyn ShellCommand>,
)
})
.collect()
}
fn resolve_managed_npm_commands(
node_resolver: &CliNodeResolver,
npm_resolver: &CliManagedNpmResolver,
) -> Result<HashMap<String, Rc<dyn ShellCommand>>, AnyError> {
let mut result = HashMap::new();
for id in npm_resolver.resolution().top_level_packages() {
let package_folder = npm_resolver.resolve_pkg_folder_from_pkg_id(&id)?;
let bins =
node_resolver.resolve_npm_binary_commands_for_package(&package_folder)?;
result.extend(bins.into_iter().map(|(command_name, path)| {
(
command_name.clone(),
Rc::new(NodeModulesFileRunCommand {
command_name,
path: path.path().to_path_buf(),
}) as Rc<dyn ShellCommand>,
)
}));
}
if !result.contains_key("npx") {
result.insert("npx".to_string(), Rc::new(NpxCommand));
}
Ok(result)
}
/// Runs a deno task future forwarding any signals received
/// to the process.
///
/// Signal listeners and ctrl+c listening will be setup.
pub async fn run_future_forwarding_signals<TOutput>(
kill_signal: KillSignal,
future: impl std::future::Future<Output = TOutput>,
) -> TOutput {
fn spawn_future_with_cancellation(
future: impl std::future::Future<Output = ()> + 'static,
token: CancellationToken,
) {
deno_core::unsync::spawn(async move {
tokio::select! {
_ = future => {}
_ = token.cancelled() => {}
}
});
}
let token = CancellationToken::new();
let _token_drop_guard = token.clone().drop_guard();
let _drop_guard = kill_signal.clone().drop_guard();
spawn_future_with_cancellation(
listen_ctrl_c(kill_signal.clone()),
token.clone(),
);
#[cfg(unix)]
spawn_future_with_cancellation(
listen_and_forward_all_signals(kill_signal),
token,
);
future.await
}
async fn listen_ctrl_c(kill_signal: KillSignal) {
while let Ok(()) = deno_signals::ctrl_c().await {
// On windows, ctrl+c is sent to the process group, so the signal would
// have already been sent to the child process. We still want to listen
// for ctrl+c here to keep the process alive when receiving it, but no
// need to forward the signal because it's already been sent.
if !cfg!(windows) {
kill_signal.send(deno_task_shell::SignalKind::SIGINT)
}
}
}
#[cfg(unix)]
async fn listen_and_forward_all_signals(kill_signal: KillSignal) {
use deno_core::futures::FutureExt;
use deno_signals::SIGNAL_NUMS;
// listen and forward every signal we support
let mut futures = Vec::with_capacity(SIGNAL_NUMS.len());
for signo in SIGNAL_NUMS.iter().copied() {
if signo == libc::SIGKILL || signo == libc::SIGSTOP {
continue; // skip, can't listen to these
}
let kill_signal = kill_signal.clone();
futures.push(
async move {
let Ok(mut stream) = deno_signals::signal_stream(signo) else {
return;
};
let signal_kind: deno_task_shell::SignalKind = signo.into();
while let Some(()) = stream.recv().await {
kill_signal.send(signal_kind);
}
}
.boxed_local(),
)
}
deno_core::futures::future::join_all(futures).await;
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_prepend_to_path() {
let mut env_vars = HashMap::new();
prepend_to_path(&mut env_vars, "/example".into());
assert_eq!(
env_vars,
HashMap::from([("PATH".into(), "/example".into())])
);
prepend_to_path(&mut env_vars, "/example2".into());
let separator = if cfg!(windows) { ";" } else { ":" };
assert_eq!(
env_vars,
HashMap::from([(
"PATH".into(),
format!("/example2{}/example", separator).into()
)])
);
env_vars.get_mut(OsStr::new("PATH")).unwrap().clear();
prepend_to_path(&mut env_vars, "/example".into());
assert_eq!(
env_vars,
HashMap::from([("PATH".into(), "/example".into())])
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/unix.rs | cli/util/unix.rs | // Copyright 2018-2025 the Deno authors. MIT license.
/// Raise soft file descriptor limit to hard file descriptor limit.
/// This is the difference between `ulimit -n` and `ulimit -n -H`.
pub fn raise_fd_limit() {
#[cfg(unix)]
// TODO(bartlomieju):
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
let mut limits = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
if 0 != libc::getrlimit(libc::RLIMIT_NOFILE, &mut limits) {
return;
}
if limits.rlim_cur == libc::RLIM_INFINITY {
return;
}
// No hard limit? Do a binary search for the effective soft limit.
if limits.rlim_max == libc::RLIM_INFINITY {
let mut min = limits.rlim_cur;
let mut max = 1 << 20;
while min + 1 < max {
limits.rlim_cur = min + (max - min) / 2;
match libc::setrlimit(libc::RLIMIT_NOFILE, &limits) {
0 => min = limits.rlim_cur,
_ => max = limits.rlim_cur,
}
}
return;
}
// Raise the soft limit to the hard limit.
if limits.rlim_cur < limits.rlim_max {
limits.rlim_cur = limits.rlim_max;
libc::setrlimit(libc::RLIMIT_NOFILE, &limits);
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/file_watcher.rs | cli/util/file_watcher.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::cell::RefCell;
use std::collections::HashSet;
use std::future::Future;
use std::io::IsTerminal;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use deno_config::glob::PathOrPatternSet;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_core::parking_lot::Mutex;
use deno_core::url::Url;
use deno_lib::util::result::js_error_downcast_ref;
use deno_runtime::fmt_errors::format_js_error;
use deno_signals;
use log::info;
use notify::Error as NotifyError;
use notify::RecommendedWatcher;
use notify::RecursiveMode;
use notify::Watcher;
use notify::event::Event as NotifyEvent;
use notify::event::EventKind;
use tokio::select;
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::error::SendError;
use tokio::time::sleep;
use crate::args::Flags;
use crate::colors;
use crate::util::fs::canonicalize_path;
const CLEAR_SCREEN: &str = "\x1B[H\x1B[2J\x1B[3J";
const DEBOUNCE_INTERVAL: Duration = Duration::from_millis(200);
struct DebouncedReceiver {
// The `recv()` call could be used in a tokio `select!` macro,
// and so we store this state on the struct to ensure we don't
// lose items if a `recv()` never completes
received_items: HashSet<PathBuf>,
receiver: UnboundedReceiver<Vec<PathBuf>>,
}
impl DebouncedReceiver {
fn new_with_sender() -> (Arc<mpsc::UnboundedSender<Vec<PathBuf>>>, Self) {
let (sender, receiver) = mpsc::unbounded_channel();
(
Arc::new(sender),
Self {
receiver,
received_items: HashSet::new(),
},
)
}
async fn recv(&mut self) -> Option<Vec<PathBuf>> {
if self.received_items.is_empty() {
self
.received_items
.extend(self.receiver.recv().await?.into_iter());
}
loop {
select! {
items = self.receiver.recv() => {
self.received_items.extend(items?);
}
_ = sleep(DEBOUNCE_INTERVAL) => {
return Some(self.received_items.drain().collect());
}
}
}
}
}
async fn error_handler<F>(watch_future: F, initial_cwd: Option<&Url>) -> bool
where
F: Future<Output = Result<(), AnyError>>,
{
let result = watch_future.await;
if let Err(err) = result {
let error_string = match js_error_downcast_ref(&err) {
Some(e) => format_js_error(e, initial_cwd),
None => format!("{err:?}"),
};
log::error!(
"{}: {}",
colors::red_bold("error"),
error_string.trim_start_matches("error: ")
);
false
} else {
true
}
}
pub struct PrintConfig {
banner: &'static str,
/// Printing watcher status to terminal.
job_name: &'static str,
/// Determine whether to clear the terminal screen; applicable to TTY environments only.
clear_screen: bool,
pub print_finished: bool,
}
impl PrintConfig {
/// By default `PrintConfig` uses "Watcher" as a banner name that will
/// be printed in color. If you need to customize it, use
/// `PrintConfig::new_with_banner` instead.
pub fn new(job_name: &'static str, clear_screen: bool) -> Self {
Self {
banner: "Watcher",
job_name,
clear_screen,
print_finished: true,
}
}
pub fn new_with_banner(
banner: &'static str,
job_name: &'static str,
clear_screen: bool,
) -> Self {
Self {
banner,
job_name,
clear_screen,
print_finished: true,
}
}
}
fn create_print_after_restart_fn(clear_screen: bool) -> impl Fn() {
move || {
#[allow(clippy::print_stderr)]
if clear_screen && std::io::stderr().is_terminal() {
eprint!("{}", CLEAR_SCREEN);
}
}
}
#[derive(Debug)]
pub struct WatcherCommunicatorOptions {
/// Send a list of paths that should be watched for changes.
pub paths_to_watch_tx: tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>,
/// Listen for a list of paths that were changed.
pub changed_paths_rx: tokio::sync::broadcast::Receiver<Option<Vec<PathBuf>>>,
pub changed_paths_tx: tokio::sync::broadcast::Sender<Option<Vec<PathBuf>>>,
/// Send a message to force a restart.
pub restart_tx: tokio::sync::mpsc::UnboundedSender<()>,
pub restart_mode: WatcherRestartMode,
pub banner: String,
}
/// An interface to interact with Deno's CLI file watcher.
#[derive(Debug)]
pub struct WatcherCommunicator {
/// Send a list of paths that should be watched for changes.
paths_to_watch_tx: tokio::sync::mpsc::UnboundedSender<Vec<PathBuf>>,
/// Listen for a list of paths that were changed.
changed_paths_rx: tokio::sync::broadcast::Receiver<Option<Vec<PathBuf>>>,
changed_paths_tx: tokio::sync::broadcast::Sender<Option<Vec<PathBuf>>>,
/// Send a message to force a restart.
restart_tx: tokio::sync::mpsc::UnboundedSender<()>,
restart_mode: Mutex<WatcherRestartMode>,
banner: String,
}
impl WatcherCommunicator {
pub fn new(options: WatcherCommunicatorOptions) -> Self {
Self {
paths_to_watch_tx: options.paths_to_watch_tx,
changed_paths_rx: options.changed_paths_rx,
changed_paths_tx: options.changed_paths_tx,
restart_tx: options.restart_tx,
restart_mode: Mutex::new(options.restart_mode),
banner: options.banner,
}
}
pub fn watch_paths(
&self,
paths: Vec<PathBuf>,
) -> Result<(), SendError<Vec<PathBuf>>> {
if paths.is_empty() {
return Ok(());
}
self.paths_to_watch_tx.send(paths)
}
pub fn force_restart(&self) -> Result<(), SendError<()>> {
// Change back to automatic mode, so that HMR can set up watching
// from scratch.
*self.restart_mode.lock() = WatcherRestartMode::Automatic;
self.restart_tx.send(())
}
pub async fn watch_for_changed_paths(
&self,
) -> Result<Option<Vec<PathBuf>>, RecvError> {
let mut rx = self.changed_paths_rx.resubscribe();
rx.recv().await
}
pub fn change_restart_mode(&self, restart_mode: WatcherRestartMode) {
*self.restart_mode.lock() = restart_mode;
}
pub fn send(
&self,
paths: Option<Vec<PathBuf>>,
) -> Result<(), SendError<Option<Vec<PathBuf>>>> {
match *self.restart_mode.lock() {
WatcherRestartMode::Automatic => {
self.restart_tx.send(()).map_err(|_| SendError(None))
}
WatcherRestartMode::Manual => self
.changed_paths_tx
.send(paths)
.map(|_| ())
.map_err(|e| SendError(e.0)),
}
}
pub fn print(&self, msg: String) {
log::info!("{} {}", self.banner, colors::gray(msg));
}
pub fn show_path_changed(&self, changed_paths: Option<Vec<PathBuf>>) {
if let Some(paths) = changed_paths {
if !paths.is_empty() {
self.print(format!("Restarting! File change detected: {:?}", paths[0]))
} else {
self.print("Restarting! File change detected.".to_string())
}
}
}
}
/// Creates a file watcher.
///
/// - `operation` is the actual operation we want to run every time the watcher detects file
/// changes. For example, in the case where we would like to bundle, then `operation` would
/// have the logic for it like bundling the code.
pub async fn watch_func<O, F>(
flags: Arc<Flags>,
print_config: PrintConfig,
operation: O,
) -> Result<(), AnyError>
where
O: FnMut(
Arc<Flags>,
Arc<WatcherCommunicator>,
Option<Vec<PathBuf>>,
) -> Result<F, AnyError>,
F: Future<Output = Result<(), AnyError>>,
{
let fut = watch_recv(
flags,
print_config,
WatcherRestartMode::Automatic,
operation,
)
.boxed_local();
fut.await
}
#[derive(Clone, Copy, Debug)]
pub enum WatcherRestartMode {
/// When a file path changes the process is restarted.
Automatic,
/// When a file path changes the caller will trigger a restart, using
/// `WatcherInterface.restart_tx`.
Manual,
}
/// Creates a file watcher.
///
/// - `operation` is the actual operation we want to run every time the watcher detects file
/// changes. For example, in the case where we would like to bundle, then `operation` would
/// have the logic for it like bundling the code.
pub async fn watch_recv<O, F>(
mut flags: Arc<Flags>,
print_config: PrintConfig,
restart_mode: WatcherRestartMode,
mut operation: O,
) -> Result<(), AnyError>
where
O: FnMut(
Arc<Flags>,
Arc<WatcherCommunicator>,
Option<Vec<PathBuf>>,
) -> Result<F, AnyError>,
F: Future<Output = Result<(), AnyError>>,
{
let initial_cwd = std::env::current_dir()
.ok()
.and_then(|path| deno_path_util::url_from_directory_path(&path).ok());
let exclude_set = flags.resolve_watch_exclude_set()?;
let (paths_to_watch_tx, mut paths_to_watch_rx) =
tokio::sync::mpsc::unbounded_channel();
let (restart_tx, mut restart_rx) = tokio::sync::mpsc::unbounded_channel();
let (changed_paths_tx, changed_paths_rx) = tokio::sync::broadcast::channel(4);
let (watcher_sender, mut watcher_receiver) =
DebouncedReceiver::new_with_sender();
let PrintConfig {
banner,
job_name,
clear_screen,
print_finished,
} = print_config;
let print_after_restart = create_print_after_restart_fn(clear_screen);
let watcher_communicator =
Arc::new(WatcherCommunicator::new(WatcherCommunicatorOptions {
paths_to_watch_tx: paths_to_watch_tx.clone(),
changed_paths_rx: changed_paths_rx.resubscribe(),
changed_paths_tx,
restart_tx: restart_tx.clone(),
restart_mode,
banner: colors::intense_blue(banner).to_string(),
}));
info!("{} {} started.", colors::intense_blue(banner), job_name);
let changed_paths = Rc::new(RefCell::new(None));
let changed_paths_ = changed_paths.clone();
let watcher_ = watcher_communicator.clone();
deno_core::unsync::spawn(async move {
loop {
let received_changed_paths = watcher_receiver.recv().await;
changed_paths_
.borrow_mut()
.clone_from(&received_changed_paths);
// TODO(bartlomieju): should we fail on sending changed paths?
let _ = watcher_.send(received_changed_paths);
}
});
loop {
// We may need to give the runtime a tick to settle, as cancellations may need to propagate
// to tasks. We choose yielding 10 times to the runtime as a decent heuristic. If watch tests
// start to fail, this may need to be increased.
for _ in 0..10 {
tokio::task::yield_now().await;
}
let mut watcher = new_watcher(watcher_sender.clone())?;
consume_paths_to_watch(&mut watcher, &mut paths_to_watch_rx, &exclude_set);
let receiver_future = async {
loop {
let maybe_paths = paths_to_watch_rx.recv().await;
add_paths_to_watcher(&mut watcher, &maybe_paths.unwrap(), &exclude_set);
}
};
let operation_future = error_handler(
operation(
flags.clone(),
watcher_communicator.clone(),
changed_paths.borrow_mut().take(),
)?,
initial_cwd.as_ref(),
);
// don't reload dependencies after the first run
if flags.reload {
flags = Arc::new(Flags {
reload: false,
..Arc::unwrap_or_clone(flags)
});
}
select! {
_ = receiver_future => {},
_ = deno_signals::ctrl_c() => {
return Ok(());
},
_ = restart_rx.recv() => {
print_after_restart();
continue;
},
success = operation_future => {
consume_paths_to_watch(&mut watcher, &mut paths_to_watch_rx, &exclude_set);
if print_finished {
// TODO(bartlomieju): print exit code here?
info!(
"{} {} {}. Restarting on file change...",
colors::intense_blue(banner),
job_name,
if success {
"finished"
} else {
"failed"
}
);
}
},
}
let receiver_future = async {
loop {
let maybe_paths = paths_to_watch_rx.recv().await;
add_paths_to_watcher(&mut watcher, &maybe_paths.unwrap(), &exclude_set);
}
};
// If we got this far, it means that the `operation` has finished; let's wait
// and see if there are any new paths to watch received or any of the already
// watched paths has changed.
select! {
_ = receiver_future => {},
_ = deno_signals::ctrl_c() => {
return Ok(());
},
_ = restart_rx.recv() => {
print_after_restart();
continue;
},
}
}
}
fn new_watcher(
sender: Arc<mpsc::UnboundedSender<Vec<PathBuf>>>,
) -> Result<RecommendedWatcher, AnyError> {
Ok(Watcher::new(
move |res: Result<NotifyEvent, NotifyError>| {
let Ok(event) = res else {
return;
};
if !matches!(
event.kind,
EventKind::Create(_) | EventKind::Modify(_) | EventKind::Remove(_)
) {
return;
}
let paths = event
.paths
.iter()
.filter_map(|path| canonicalize_path(path).ok())
.collect();
sender.send(paths).unwrap();
},
Default::default(),
)?)
}
fn add_paths_to_watcher(
watcher: &mut RecommendedWatcher,
paths: &[PathBuf],
paths_to_exclude: &PathOrPatternSet,
) {
// Ignore any error e.g. `PathNotFound`
let mut watched_paths = Vec::new();
for path in paths {
if paths_to_exclude.matches_path(path) {
continue;
}
watched_paths.push(path.clone());
let _ = watcher.watch(path, RecursiveMode::Recursive);
}
log::debug!("Watching paths: {:?}", watched_paths);
}
fn consume_paths_to_watch(
watcher: &mut RecommendedWatcher,
receiver: &mut UnboundedReceiver<Vec<PathBuf>>,
exclude_set: &PathOrPatternSet,
) {
loop {
match receiver.try_recv() {
Ok(paths) => {
add_paths_to_watcher(watcher, &paths, exclude_set);
}
Err(e) => match e {
mpsc::error::TryRecvError::Empty => {
break;
}
// there must be at least one receiver alive
_ => unreachable!(),
},
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/path.rs | cli/util/path.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::path::Path;
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_config::glob::PathGlobMatch;
use deno_config::glob::PathOrPattern;
use deno_config::glob::PathOrPatternSet;
/// Checks if the path has an extension Deno supports for script execution.
pub fn is_script_ext(path: &Path) -> bool {
if let Some(ext) = get_extension(path) {
matches!(
ext.as_str(),
"ts" | "tsx" | "js" | "jsx" | "mjs" | "mts" | "cjs" | "cts"
)
} else {
false
}
}
/// Checks if the path has an extension Deno supports for importing.
pub fn is_importable_ext(path: &Path) -> bool {
if let Some(ext) = get_extension(path) {
matches!(
ext.as_str(),
"ts"
| "tsx"
| "js"
| "jsx"
| "mjs"
| "mts"
| "cjs"
| "cts"
| "json"
| "wasm"
)
} else {
false
}
}
/// Get the extension of a file in lowercase.
pub fn get_extension(file_path: &Path) -> Option<String> {
file_path
.extension()
.and_then(|e| e.to_str())
.map(|e| e.to_lowercase())
}
/// TypeScript figures out the type of file based on the extension, but we take
/// other factors into account like the file headers. The hack here is to map the
/// specifier passed to TypeScript to a new specifier with the file extension.
pub fn mapped_specifier_for_tsc(
specifier: &ModuleSpecifier,
media_type: MediaType,
) -> Option<String> {
let ext_media_type = MediaType::from_specifier(specifier);
if media_type != ext_media_type {
// we can't just add on the extension because typescript considers
// all .d.*.ts files as declaration files in TS 5.0+
if media_type != MediaType::Dts
&& media_type == MediaType::TypeScript
&& specifier
.path()
.split('/')
.next_back()
.map(|last| last.contains(".d."))
.unwrap_or(false)
{
let mut path_parts = specifier
.path()
.split('/')
.map(ToOwned::to_owned)
.collect::<Vec<_>>();
let last_part = path_parts.last_mut().unwrap();
*last_part = last_part.replace(".d.", "$d$");
let mut specifier = specifier.clone();
specifier.set_path(&path_parts.join("/"));
Some(format!("{}{}", specifier, media_type.as_ts_extension()))
} else {
Some(format!("{}{}", specifier, media_type.as_ts_extension()))
}
} else {
None
}
}
/// `from.make_relative(to)` but with fixes.
pub fn relative_specifier(
from: &ModuleSpecifier,
to: &ModuleSpecifier,
) -> Option<String> {
let is_dir = to.path().ends_with('/');
if is_dir && from == to {
return Some("./".to_string());
}
// workaround for url crate not adding a trailing slash for a directory
// it seems to be fixed once a version greater than 2.2.2 is released
let text = from.make_relative(to)?;
let text = if text.starts_with("../") || text.starts_with("./") {
text
} else {
format!("./{text}")
};
Some(to_percent_decoded_str(&text))
}
pub fn relative_specifier_path_for_display(
from: &ModuleSpecifier,
to: &ModuleSpecifier,
) -> String {
if to.scheme() == "file" && from.scheme() == "file" {
let relative_specifier = relative_specifier(from, to)
.map(Cow::Owned)
.unwrap_or_else(|| Cow::Borrowed(to.as_str()));
let relative_specifier = if relative_specifier.starts_with("../../../") {
to.as_str()
} else {
relative_specifier.trim_start_matches("./")
};
to_percent_decoded_str(relative_specifier)
} else {
to_percent_decoded_str(to.as_str())
}
}
/// Slightly different behaviour than the default matching
/// where an exact path needs to be matched to be opted-in
/// rather than just a partial directory match.
///
/// This is used by the test and bench filtering.
pub fn matches_pattern_or_exact_path(
path_or_pattern_set: &PathOrPatternSet,
path: &Path,
) -> bool {
for p in path_or_pattern_set.inner().iter().rev() {
match p {
PathOrPattern::Path(p) => {
if p == path {
return true;
}
}
PathOrPattern::NegatedPath(p) => {
if path.starts_with(p) {
return false;
}
}
PathOrPattern::RemoteUrl(_) => {}
PathOrPattern::Pattern(p) => match p.matches_path(path) {
PathGlobMatch::Matched => return true,
PathGlobMatch::MatchedNegated => return false,
PathGlobMatch::NotMatched => {}
},
}
}
false
}
/// For decoding percent-encodeing string
/// could be used for module specifier string literal of local modules,
/// or local file path to display `non-ASCII` characters correctly
/// # Examples
/// ```
/// use crate::util::path::to_percent_decoded_str;
///
/// let str = to_percent_decoded_str("file:///Users/path/to/%F0%9F%A6%95.ts");
/// assert_eq!(str, "file:///Users/path/to/π¦.ts");
/// ```
pub fn to_percent_decoded_str(s: &str) -> String {
match percent_encoding::percent_decode_str(s).decode_utf8() {
Ok(s) => s.to_string(),
// when failed to decode, return the original string
Err(_) => s.to_string(),
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_is_script_ext() {
assert!(!is_script_ext(Path::new("tests/subdir/redirects")));
assert!(!is_script_ext(Path::new("README.md")));
assert!(is_script_ext(Path::new("lib/typescript.d.ts")));
assert!(is_script_ext(Path::new("testdata/run/001_hello.js")));
assert!(is_script_ext(Path::new("testdata/run/002_hello.ts")));
assert!(is_script_ext(Path::new("foo.jsx")));
assert!(is_script_ext(Path::new("foo.tsx")));
assert!(is_script_ext(Path::new("foo.TS")));
assert!(is_script_ext(Path::new("foo.TSX")));
assert!(is_script_ext(Path::new("foo.JS")));
assert!(is_script_ext(Path::new("foo.JSX")));
assert!(is_script_ext(Path::new("foo.mjs")));
assert!(is_script_ext(Path::new("foo.mts")));
assert!(is_script_ext(Path::new("foo.cjs")));
assert!(is_script_ext(Path::new("foo.cts")));
assert!(!is_script_ext(Path::new("foo.json")));
assert!(!is_script_ext(Path::new("foo.wasm")));
assert!(!is_script_ext(Path::new("foo.mjsx")));
}
#[test]
fn test_is_importable_ext() {
assert!(!is_importable_ext(Path::new("tests/subdir/redirects")));
assert!(!is_importable_ext(Path::new("README.md")));
assert!(is_importable_ext(Path::new("lib/typescript.d.ts")));
assert!(is_importable_ext(Path::new("testdata/run/001_hello.js")));
assert!(is_importable_ext(Path::new("testdata/run/002_hello.ts")));
assert!(is_importable_ext(Path::new("foo.jsx")));
assert!(is_importable_ext(Path::new("foo.tsx")));
assert!(is_importable_ext(Path::new("foo.TS")));
assert!(is_importable_ext(Path::new("foo.TSX")));
assert!(is_importable_ext(Path::new("foo.JS")));
assert!(is_importable_ext(Path::new("foo.JSX")));
assert!(is_importable_ext(Path::new("foo.mjs")));
assert!(is_importable_ext(Path::new("foo.mts")));
assert!(is_importable_ext(Path::new("foo.cjs")));
assert!(is_importable_ext(Path::new("foo.cts")));
assert!(is_importable_ext(Path::new("foo.json")));
assert!(is_importable_ext(Path::new("foo.wasm")));
assert!(!is_importable_ext(Path::new("foo.mjsx")));
}
#[test]
fn test_relative_specifier() {
let fixtures: Vec<(&str, &str, Option<&str>)> = vec![
("file:///from", "file:///to", Some("./to")),
("file:///from", "file:///from/other", Some("./from/other")),
("file:///from", "file:///from/other/", Some("./from/other/")),
("file:///from", "file:///other/from", Some("./other/from")),
("file:///from/", "file:///other/from", Some("../other/from")),
("file:///from", "file:///other/from/", Some("./other/from/")),
(
"file:///from",
"file:///to/other.txt",
Some("./to/other.txt"),
),
(
"file:///from/test",
"file:///to/other.txt",
Some("../to/other.txt"),
),
(
"file:///from/other.txt",
"file:///to/other.txt",
Some("../to/other.txt"),
),
(
"https://deno.land/x/a/b/d.ts",
"https://deno.land/x/a/b/c.ts",
Some("./c.ts"),
),
(
"https://deno.land/x/a/b/d.ts",
"https://deno.land/x/a/c.ts",
Some("../c.ts"),
),
(
"https://deno.land/x/a/b/d.ts",
"https://deno.land/x/a/b/c/d.ts",
Some("./c/d.ts"),
),
(
"https://deno.land/x/a/b/c/",
"https://deno.land/x/a/b/c/d.ts",
Some("./d.ts"),
),
(
"https://deno.land/x/a/b/c/",
"https://deno.land/x/a/b/c/d/e.ts",
Some("./d/e.ts"),
),
(
"https://deno.land/x/a/b/c/f.ts",
"https://deno.land/x/a/b/c/d/e.ts",
Some("./d/e.ts"),
),
(
"https://deno.land/x/a/b/d.ts",
"https://deno.land/x/a/c.ts?foo=bar",
Some("../c.ts?foo=bar"),
),
(
"https://deno.land/x/a/b/d.ts?foo=bar",
"https://deno.land/x/a/b/c.ts",
Some("./c.ts"),
),
("file:///a/b/d.ts", "file:///a/b/c.ts", Some("./c.ts")),
("https://deno.land/x/a/b/c.ts", "file:///a/b/c.ts", None),
(
"https://deno.land/",
"https://deno.land/x/a/b/c.ts",
Some("./x/a/b/c.ts"),
),
(
"https://deno.land/x/d/e/f.ts",
"https://deno.land/x/a/b/c.ts",
Some("../../a/b/c.ts"),
),
];
for (from_str, to_str, expected) in fixtures {
let from = ModuleSpecifier::parse(from_str).unwrap();
let to = ModuleSpecifier::parse(to_str).unwrap();
let actual = relative_specifier(&from, &to);
assert_eq!(
actual.as_deref(),
expected,
"from: \"{from_str}\" to: \"{to_str}\""
);
}
}
#[test]
fn test_to_percent_decoded_str() {
let str = to_percent_decoded_str("%F0%9F%A6%95");
assert_eq!(str, "π¦");
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/archive.rs | cli/util/archive.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
fn unzip_with_shell(
archive_path: &Path,
archive_data: &[u8],
dest_path: &Path,
) -> Result<(), AnyError> {
fs::write(archive_path, archive_data)?;
let unpack_status = if cfg!(windows) {
Command::new("tar.exe")
.arg("xf")
.arg(archive_path)
.arg("-C")
.arg(dest_path)
.spawn()
.map_err(|err| {
if err.kind() == std::io::ErrorKind::NotFound {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"`tar.exe` was not found in your PATH",
)
} else {
err
}
})?
.wait()?
} else {
Command::new("unzip")
.current_dir(dest_path)
.arg(archive_path)
.spawn()
.map_err(|err| {
if err.kind() == std::io::ErrorKind::NotFound {
std::io::Error::new(
std::io::ErrorKind::NotFound,
"`unzip` was not found in your PATH, please install `unzip`",
)
} else {
err
}
})?
.wait()?
};
if !unpack_status.success() {
bail!("Failed to unpack archive.");
}
Ok(())
}
fn unzip(
archive_name: &str,
archive_data: &[u8],
dest_path: &Path,
) -> Result<(), AnyError> {
let mut archive = zip::ZipArchive::new(std::io::Cursor::new(archive_data))?;
archive
.extract(dest_path)
.with_context(|| format!("failed to extract archive: {archive_name}"))?;
Ok(())
}
pub struct UnpackArgs<'a> {
pub exe_name: &'a str,
pub archive_name: &'a str,
pub archive_data: &'a [u8],
pub is_windows: bool,
pub dest_path: &'a Path,
}
pub fn unpack_into_dir(args: UnpackArgs) -> Result<PathBuf, AnyError> {
let UnpackArgs {
exe_name,
archive_name,
archive_data,
is_windows,
dest_path,
} = args;
let exe_ext = if is_windows { "exe" } else { "" };
let archive_path = dest_path.join(exe_name).with_extension("zip");
let exe_path = dest_path.join(exe_name).with_extension(exe_ext);
assert!(!exe_path.exists());
let archive_ext = Path::new(archive_name)
.extension()
.and_then(|ext| ext.to_str())
.unwrap();
match archive_ext {
"zip" => match unzip(archive_name, archive_data, dest_path) {
Ok(()) if !exe_path.exists() => {
log::warn!("unpacking via the zip crate didn't produce the executable");
// No error but didn't produce exe, fallback to shelling out
unzip_with_shell(&archive_path, archive_data, dest_path)?;
}
Ok(_) => {}
Err(e) => {
log::warn!("unpacking via zip crate failed: {e}");
// Fallback to shelling out
unzip_with_shell(&archive_path, archive_data, dest_path)?;
}
},
ext => bail!("Unsupported archive type: '{ext}'"),
}
assert!(exe_path.exists());
Ok(exe_path)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/fs.rs | cli/util/fs.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::Error;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use deno_config::glob::FileCollector;
use deno_config::glob::FilePatterns;
use deno_config::glob::PathOrPattern;
use deno_config::glob::PathOrPatternSet;
use deno_config::glob::WalkEntry;
use deno_core::ModuleSpecifier;
use deno_core::anyhow::Context;
use deno_core::anyhow::anyhow;
use deno_core::error::AnyError;
use super::progress_bar::UpdateGuard;
use crate::sys::CliSys;
/// Creates a std::fs::File handling if the parent does not exist.
pub fn create_file(file_path: &Path) -> std::io::Result<std::fs::File> {
match std::fs::File::create(file_path) {
Ok(file) => Ok(file),
Err(err) => {
if err.kind() == ErrorKind::NotFound {
let parent_dir_path = file_path.parent().unwrap();
match std::fs::create_dir_all(parent_dir_path) {
Ok(()) => {
return std::fs::File::create(file_path)
.map_err(|err| add_file_context_to_err(file_path, err));
}
Err(create_err) => {
if !parent_dir_path.exists() {
return Err(Error::new(
create_err.kind(),
format!(
"{:#} (for '{}')\nCheck the permission of the directory.",
create_err,
parent_dir_path.display()
),
));
}
}
}
}
Err(add_file_context_to_err(file_path, err))
}
}
}
fn add_file_context_to_err(file_path: &Path, err: Error) -> Error {
Error::new(
err.kind(),
format!("{:#} (for '{}')", err, file_path.display()),
)
}
/// Similar to `std::fs::canonicalize()` but strips UNC prefixes on Windows.
pub fn canonicalize_path(path: &Path) -> Result<PathBuf, Error> {
Ok(deno_path_util::strip_unc_prefix(path.canonicalize()?))
}
/// Canonicalizes a path which might be non-existent by going up the
/// ancestors until it finds a directory that exists, canonicalizes
/// that path, then adds back the remaining path components.
///
/// Note: When using this, you should be aware that a symlink may
/// subsequently be created along this path by some other code.
pub fn canonicalize_path_maybe_not_exists(
path: &Path,
) -> Result<PathBuf, Error> {
deno_path_util::fs::canonicalize_path_maybe_not_exists(
&CliSys::default(),
path,
)
}
pub struct CollectSpecifiersOptions {
pub file_patterns: FilePatterns,
pub vendor_folder: Option<PathBuf>,
/// Whether to include paths that are specified even if they're ignored.
pub include_ignored_specified: bool,
}
/// Collects module specifiers that satisfy the given predicate as a file path, by recursively walking `include`.
/// Specifiers that start with http and https are left intact.
/// Note: This ignores all .git and node_modules folders.
pub fn collect_specifiers(
options: CollectSpecifiersOptions,
predicate: impl Fn(WalkEntry) -> bool,
) -> Result<Vec<ModuleSpecifier>, AnyError> {
let CollectSpecifiersOptions {
mut file_patterns,
vendor_folder,
include_ignored_specified: always_include_specified,
} = options;
let mut prepared = vec![];
// break out the remote specifiers and explicitly specified paths
if let Some(include_mut) = &mut file_patterns.include {
let includes = std::mem::take(include_mut);
let path_or_patterns = includes.into_path_or_patterns();
let mut result = Vec::with_capacity(path_or_patterns.len());
for path_or_pattern in path_or_patterns {
match path_or_pattern {
PathOrPattern::Path(path) => {
if path.is_dir() {
result.push(PathOrPattern::Path(path));
} else if always_include_specified
|| !file_patterns.exclude.matches_path(&path)
{
let url = specifier_from_file_path(&path)?;
prepared.push(url);
}
}
PathOrPattern::NegatedPath(path) => {
// add it back
result.push(PathOrPattern::NegatedPath(path));
}
PathOrPattern::RemoteUrl(remote_url) => {
prepared.push(remote_url);
}
PathOrPattern::Pattern(pattern) => {
// add it back
result.push(PathOrPattern::Pattern(pattern));
}
}
}
*include_mut = PathOrPatternSet::new(result);
}
let collected_files = FileCollector::new(predicate)
.ignore_git_folder()
.ignore_node_modules()
.set_vendor_folder(vendor_folder)
.collect_file_patterns(&CliSys::default(), &file_patterns);
let mut collected_files_as_urls = collected_files
.iter()
.map(|f| specifier_from_file_path(f).unwrap())
.collect::<Vec<ModuleSpecifier>>();
collected_files_as_urls.sort();
prepared.extend(collected_files_as_urls);
Ok(prepared)
}
/// Asynchronously removes a directory and all its descendants, but does not error
/// when the directory does not exist.
pub async fn remove_dir_all_if_exists(path: &Path) -> std::io::Result<()> {
let result = tokio::fs::remove_dir_all(path).await;
match result {
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()),
_ => result,
}
}
/// Gets the total size (in bytes) of a directory.
pub fn dir_size(path: &Path) -> std::io::Result<u64> {
let entries = std::fs::read_dir(path)?;
let mut total = 0;
for entry in entries {
let entry = entry?;
total += match entry.metadata()? {
data if data.is_dir() => dir_size(&entry.path())?,
data => data.len(),
};
}
Ok(total)
}
pub fn specifier_from_file_path(
path: &Path,
) -> Result<ModuleSpecifier, AnyError> {
ModuleSpecifier::from_file_path(path)
.map_err(|_| anyhow!("Invalid file path '{}'", path.display()))
}
#[derive(Default)]
pub struct FsCleaner {
pub files_removed: u64,
pub dirs_removed: u64,
pub bytes_removed: u64,
pub progress_guard: Option<UpdateGuard>,
}
impl FsCleaner {
pub fn new(progress_guard: Option<UpdateGuard>) -> Self {
Self {
files_removed: 0,
dirs_removed: 0,
bytes_removed: 0,
progress_guard,
}
}
pub fn rm_rf(&mut self, path: &Path) -> Result<(), AnyError> {
for entry in walkdir::WalkDir::new(path).contents_first(true) {
let entry = entry?;
if entry.file_type().is_dir() {
self.dirs_removed += 1;
self.update_progress();
std::fs::remove_dir_all(entry.path())?;
} else {
self.remove_file(entry.path(), entry.metadata().ok())?;
}
}
Ok(())
}
pub fn remove_file(
&mut self,
path: &Path,
meta: Option<std::fs::Metadata>,
) -> Result<(), AnyError> {
if let Some(meta) = meta {
self.bytes_removed += meta.len();
}
self.files_removed += 1;
self.update_progress();
match std::fs::remove_file(path)
.with_context(|| format!("Failed to remove file: {}", path.display()))
{
Err(e) => {
if cfg!(windows)
&& let Ok(meta) = path.symlink_metadata()
&& meta.is_symlink()
{
std::fs::remove_dir(path).with_context(|| {
format!("Failed to remove symlink: {}", path.display())
})?;
return Ok(());
}
Err(e)
}
_ => Ok(()),
}
}
fn update_progress(&self) {
if let Some(pg) = &self.progress_guard {
pg.set_position(self.files_removed + self.dirs_removed);
}
}
}
#[cfg(test)]
mod tests {
use pretty_assertions::assert_eq;
use test_util::PathRef;
use test_util::TempDir;
use super::*;
#[test]
fn test_collect_specifiers() {
fn create_files(dir_path: &PathRef, files: &[&str]) {
dir_path.create_dir_all();
for f in files {
dir_path.join(f).write("");
}
}
// dir.ts
// βββ a.ts
// βββ b.js
// βββ child
// β βββ e.mjs
// β βββ f.mjsx
// β βββ .foo.TS
// β βββ README.md
// βββ c.tsx
// βββ d.jsx
// βββ ignore
// βββ g.d.ts
// βββ .gitignore
let t = TempDir::new();
let root_dir_path = t.path().join("dir.ts");
let root_dir_files = ["a.ts", "b.js", "c.tsx", "d.jsx"];
create_files(&root_dir_path, &root_dir_files);
let child_dir_path = root_dir_path.join("child");
let child_dir_files = ["e.mjs", "f.mjsx", ".foo.TS", "README.md"];
create_files(&child_dir_path, &child_dir_files);
let ignore_dir_path = root_dir_path.join("ignore");
let ignore_dir_files = ["g.d.ts", ".gitignore"];
create_files(&ignore_dir_path, &ignore_dir_files);
let predicate = |e: WalkEntry| {
// exclude dotfiles
e.path
.file_name()
.and_then(|f| f.to_str())
.map(|f| !f.starts_with('.'))
.unwrap_or(false)
};
let result = collect_specifiers(
CollectSpecifiersOptions {
file_patterns: FilePatterns {
base: root_dir_path.to_path_buf(),
include: Some(
PathOrPatternSet::from_include_relative_path_or_patterns(
root_dir_path.as_path(),
&[
"http://localhost:8080".to_string(),
"./".to_string(),
"https://localhost:8080".to_string(),
],
)
.unwrap(),
),
exclude: PathOrPatternSet::new(vec![PathOrPattern::Path(
ignore_dir_path.to_path_buf(),
)]),
},
vendor_folder: None,
include_ignored_specified: false,
},
predicate,
)
.unwrap();
let root_dir_url = ModuleSpecifier::from_file_path(&root_dir_path)
.unwrap()
.to_string();
let expected = vec![
"http://localhost:8080/".to_string(),
"https://localhost:8080/".to_string(),
format!("{root_dir_url}/a.ts"),
format!("{root_dir_url}/b.js"),
format!("{root_dir_url}/c.tsx"),
format!("{root_dir_url}/child/README.md"),
format!("{root_dir_url}/child/e.mjs"),
format!("{root_dir_url}/child/f.mjsx"),
format!("{root_dir_url}/d.jsx"),
];
assert_eq!(
result
.into_iter()
.map(|s| s.to_string())
.collect::<Vec<_>>(),
expected
);
let scheme = if cfg!(target_os = "windows") {
"file:///"
} else {
"file://"
};
let result = collect_specifiers(
CollectSpecifiersOptions {
file_patterns: FilePatterns {
base: root_dir_path.to_path_buf(),
include: Some(PathOrPatternSet::new(vec![
PathOrPattern::new(&format!(
"{}{}",
scheme,
root_dir_path.join("child").to_string().replace('\\', "/")
))
.unwrap(),
])),
exclude: Default::default(),
},
vendor_folder: None,
include_ignored_specified: false,
},
predicate,
)
.unwrap();
let expected = vec![
format!("{root_dir_url}/child/README.md"),
format!("{root_dir_url}/child/e.mjs"),
format!("{root_dir_url}/child/f.mjsx"),
];
assert_eq!(
result
.into_iter()
.map(|s| s.to_string())
.collect::<Vec<_>>(),
expected
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/watch_env_tracker.rs | cli/util/watch_env_tracker.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::collections::HashSet;
use std::env;
use std::ffi::OsString;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::OnceLock;
use deno_terminal::colors;
#[derive(Debug, Clone)]
struct WatchEnvTrackerInner {
// Track all loaded variables and their values
loaded_variables: HashSet<OsString>,
// Track variables that are no longer present in any loaded file
unused_variables: HashSet<OsString>,
// Track original env vars that existed before we started
original_env: HashMap<OsString, OsString>,
}
impl WatchEnvTrackerInner {
fn new() -> Self {
// Capture the original environment state
let original_env: HashMap<OsString, OsString> = env::vars_os().collect();
Self {
loaded_variables: HashSet::new(),
unused_variables: HashSet::new(),
original_env,
}
}
}
#[derive(Debug, Clone)]
pub struct WatchEnvTracker {
inner: Arc<Mutex<WatchEnvTrackerInner>>,
}
// Global singleton instance
static WATCH_ENV_TRACKER: OnceLock<WatchEnvTracker> = OnceLock::new();
impl WatchEnvTracker {
/// Get the global singleton instance
pub fn snapshot() -> &'static WatchEnvTracker {
WATCH_ENV_TRACKER.get_or_init(|| WatchEnvTracker {
inner: Arc::new(Mutex::new(WatchEnvTrackerInner::new())),
})
}
// Consolidated error handling function
fn handle_dotenvy_error(
error: dotenvy::Error,
file_path: &Path,
log_level: Option<log::Level>,
) {
#[allow(clippy::print_stderr)]
if log_level.map(|l| l >= log::Level::Info).unwrap_or(true) {
match error {
dotenvy::Error::LineParse(line, index) => eprintln!(
"{} Parsing failed within the specified environment file: {} at index: {} of the value: {}",
colors::yellow("Warning"),
file_path.display(),
index,
line
),
dotenvy::Error::Io(_) => eprintln!(
"{} The `--env-file` flag was used, but the environment file specified '{}' was not found.",
colors::yellow("Warning"),
file_path.display()
),
dotenvy::Error::EnvVar(_) => eprintln!(
"{} One or more of the environment variables isn't present or not unicode within the specified environment file: {}",
colors::yellow("Warning"),
file_path.display()
),
_ => eprintln!(
"{} Unknown failure occurred with the specified environment file: {}",
colors::yellow("Warning"),
file_path.display()
),
}
}
}
// Internal method that accepts an already-acquired lock to avoid deadlocks
fn load_env_file_inner(
&self,
file_path: PathBuf,
log_level: Option<log::Level>,
inner: &mut WatchEnvTrackerInner,
) {
// Check if file exists
if !file_path.exists() {
// Only show warning if logging is enabled
#[allow(clippy::print_stderr)]
if log_level.map(|l| l >= log::Level::Info).unwrap_or(true) {
eprintln!(
"{} The environment file specified '{}' was not found.",
colors::yellow("Warning"),
file_path.display()
);
}
return;
}
match dotenvy::from_path_iter(&file_path) {
Ok(iter) => {
for item in iter {
match item {
Ok((key, value)) => {
// Convert to OsString for consistency
let key_os = OsString::from(key);
let value_os = OsString::from(value);
// Check if this variable is already loaded from a previous file
if inner.loaded_variables.contains(&key_os) {
// Variable already exists from a previous file, skip it
#[allow(clippy::print_stderr)]
if log_level.map(|l| l >= log::Level::Debug).unwrap_or(false) {
eprintln!(
"{} Variable '{}' already loaded from '{}', skipping value from '{}'",
colors::yellow("Debug"),
key_os.to_string_lossy(),
inner
.loaded_variables
.get(&key_os)
.map(|k| k.to_string_lossy().to_string())
.unwrap_or_else(|| "unknown".to_string()),
file_path.display()
);
}
continue;
}
// Set the environment variable
// SAFETY: We're setting environment variables with valid UTF-8 strings
// from the .env file. Both key and value are guaranteed to be valid strings.
unsafe {
env::set_var(&key_os, &value_os);
}
// Track this variable
inner.loaded_variables.insert(key_os.clone());
inner.unused_variables.remove(&key_os);
}
Err(e) => {
Self::handle_dotenvy_error(e, &file_path, log_level);
}
}
}
}
Err(e) =>
{
#[allow(clippy::print_stderr)]
if log_level.map(|l| l >= log::Level::Info).unwrap_or(true) {
eprintln!(
"{} Failed to read {}: {}",
colors::yellow("Warning"),
file_path.display(),
e
);
}
}
}
}
/// Clean up variables that are no longer present in any loaded file
fn _cleanup_removed_variables(
&self,
inner: &mut WatchEnvTrackerInner,
log_level: Option<log::Level>,
) {
for var_name in inner.unused_variables.iter() {
if !inner.original_env.contains_key(var_name) {
// SAFETY: We're removing an environment variable that we previously set
unsafe {
env::remove_var(var_name);
}
#[allow(clippy::print_stderr)]
if log_level.map(|l| l >= log::Level::Debug).unwrap_or(false) {
eprintln!(
"{} Variable '{}' removed from environment as it's no longer present in any loaded file",
colors::yellow("Debug"),
var_name.to_string_lossy()
);
}
} else {
let original_value = inner.original_env.get(var_name).unwrap();
// SAFETY: We're setting an environment variable to a value we control
unsafe {
env::set_var(var_name, original_value);
}
#[allow(clippy::print_stderr)]
if log_level.map(|l| l >= log::Level::Debug).unwrap_or(false) {
eprintln!(
"{} Variable '{}' restored to original value as it's no longer present in any loaded file",
colors::yellow("Debug"),
var_name.to_string_lossy()
);
}
}
}
}
// Load multiple env files in reverse order (later files take precedence over earlier ones)
pub fn load_env_variables_from_env_files(
&self,
file_paths: Option<&Vec<PathBuf>>,
log_level: Option<log::Level>,
) {
let Some(env_file_names) = file_paths else {
return;
};
let mut inner = self.inner.lock().unwrap();
inner.unused_variables = std::mem::take(&mut inner.loaded_variables);
inner.loaded_variables = HashSet::new();
for env_file_name in env_file_names.iter().rev() {
self.load_env_file_inner(
env_file_name.to_path_buf(),
log_level,
&mut inner,
);
}
self._cleanup_removed_variables(&mut inner, log_level);
}
}
pub fn load_env_variables_from_env_files(
filename: Option<&Vec<PathBuf>>,
flags_log_level: Option<log::Level>,
) {
let Some(env_file_names) = filename else {
return;
};
for env_file_name in env_file_names.iter().rev() {
match dotenvy::from_filename(env_file_name) {
Ok(_) => (),
Err(error) => {
WatchEnvTracker::handle_dotenvy_error(
error,
env_file_name,
flags_log_level,
);
}
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/util/v8.rs | cli/util/v8.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_lib::util::v8::construct_v8_flags;
pub mod convert;
#[inline(always)]
pub fn get_v8_flags_from_env() -> Vec<String> {
std::env::var("DENO_V8_FLAGS")
.ok()
.map(|flags| flags.split(',').map(String::from).collect::<Vec<String>>())
.unwrap_or_default()
}
pub fn init_v8_flags(
default_v8_flags: &[String],
v8_flags: &[String],
env_v8_flags: Vec<String>,
) {
if default_v8_flags.is_empty()
&& v8_flags.is_empty()
&& env_v8_flags.is_empty()
{
return;
}
let v8_flags_includes_help = env_v8_flags
.iter()
.chain(v8_flags)
.any(|flag| flag == "-help" || flag == "--help");
// Keep in sync with `standalone.rs`.
let v8_flags = construct_v8_flags(default_v8_flags, v8_flags, env_v8_flags);
let unrecognized_v8_flags = deno_core::v8_set_flags(v8_flags)
.into_iter()
.skip(1)
.collect::<Vec<_>>();
if !unrecognized_v8_flags.is_empty() {
for f in unrecognized_v8_flags {
log::error!("error: V8 did not recognize flag '{f}'");
}
log::error!("\nFor a list of V8 flags, use '--v8-flags=--help'");
deno_runtime::exit(1);
}
if v8_flags_includes_help {
deno_runtime::exit(0);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.