repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/channel.rs | cli/tools/test/channel.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::fmt::Display;
use std::future::Future;
use std::future::poll_fn;
use std::io::Write;
use std::pin::Pin;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::task::Poll;
use std::task::ready;
use std::time::Duration;
use deno_core::parking_lot;
use deno_core::parking_lot::lock_api::RawMutex;
use deno_core::parking_lot::lock_api::RawMutexTimed;
use deno_runtime::deno_io::AsyncPipeRead;
use deno_runtime::deno_io::PipeRead;
use deno_runtime::deno_io::PipeWrite;
use deno_runtime::deno_io::pipe;
use tokio::io::AsyncRead;
use tokio::io::AsyncReadExt;
use tokio::io::ReadBuf;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::mpsc::WeakUnboundedSender;
use tokio::sync::mpsc::error::SendError;
use super::TestEvent;
/// 8-byte sync marker that is unlikely to appear in normal output. Equivalent
/// to the string `"\u{200B}\0\u{200B}\0"`.
const SYNC_MARKER: &[u8; 8] = &[226, 128, 139, 0, 226, 128, 139, 0];
const HALF_SYNC_MARKER: &[u8; 4] = &[226, 128, 139, 0];
const BUFFER_SIZE: usize = 4096;
/// The test channel has been closed and cannot be used to send further messages.
#[derive(Debug, Copy, Clone, Eq, PartialEq, deno_error::JsError)]
#[class(generic)]
pub struct ChannelClosedError;
impl std::error::Error for ChannelClosedError {}
impl Display for ChannelClosedError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("Test channel closed")
}
}
impl<T> From<SendError<T>> for ChannelClosedError {
fn from(_: SendError<T>) -> Self {
Self
}
}
#[repr(transparent)]
struct SendMutex(*const parking_lot::RawMutex);
impl Drop for SendMutex {
fn drop(&mut self) {
// SAFETY: We know this was locked by the sender
unsafe {
(*self.0).unlock();
}
}
}
// SAFETY: This is a mutex, so it's safe to send a pointer to it
unsafe impl Send for SendMutex {}
/// Create a [`TestEventSenderFactory`] and [`TestEventReceiver`] pair. The [`TestEventSenderFactory`] may be
/// used to create [`TestEventSender`]s and stdio streams for multiple workers in the system. The [`TestEventReceiver`]
/// will be kept alive until the final [`TestEventSender`] is dropped.
pub fn create_test_event_channel() -> (TestEventSenderFactory, TestEventReceiver)
{
let (sender, receiver) = tokio::sync::mpsc::unbounded_channel();
(
TestEventSenderFactory {
sender,
worker_id: Default::default(),
},
TestEventReceiver { receiver },
)
}
/// Create a [`TestEventWorkerSender`] and [`TestEventReceiver`] pair.The [`TestEventReceiver`]
/// will be kept alive until the [`TestEventSender`] is dropped.
pub fn create_single_test_event_channel()
-> (TestEventWorkerSender, TestEventReceiver) {
let (factory, receiver) = create_test_event_channel();
(factory.worker(), receiver)
}
/// Polls for the next [`TestEvent`] from any worker. Events from multiple worker
/// streams may be interleaved.
pub struct TestEventReceiver {
receiver: UnboundedReceiver<(usize, TestEvent)>,
}
impl TestEventReceiver {
/// Receive a single test event, or `None` if no workers are alive.
pub async fn recv(&mut self) -> Option<(usize, TestEvent)> {
self.receiver.recv().await
}
}
struct TestStream {
id: usize,
read_opt: Option<AsyncPipeRead>,
sender: UnboundedSender<(usize, TestEvent)>,
}
impl TestStream {
fn new(
id: usize,
pipe_reader: PipeRead,
sender: UnboundedSender<(usize, TestEvent)>,
) -> std::io::Result<Self> {
// This may fail if the tokio runtime is shutting down
let read_opt = Some(pipe_reader.into_async()?);
Ok(Self {
id,
read_opt,
sender,
})
}
/// Send a buffer to the test event channel. If the channel no longer exists, shut down the stream
/// because we can't do anything.
#[must_use = "If this returns false, don't keep reading because we cannot send"]
fn send(&mut self, buffer: Vec<u8>) -> bool {
if buffer.is_empty() {
true
} else if self
.sender
.send((self.id, TestEvent::Output(buffer)))
.is_err()
{
self.read_opt.take();
false
} else {
true
}
}
fn is_alive(&self) -> bool {
self.read_opt.is_some()
}
/// Cancellation-safe.
#[inline]
fn pipe(&mut self) -> impl Future<Output = ()> + '_ {
poll_fn(|cx| self.poll_pipe(cx))
}
/// Attempt to read from a given stream, pushing all of the data in it into the given
/// [`UnboundedSender`] before returning.
fn poll_pipe(&mut self, cx: &mut std::task::Context) -> Poll<()> {
let mut buffer = [0_u8; BUFFER_SIZE];
let mut buf = ReadBuf::new(&mut buffer);
let res = {
// No more stream, we shouldn't hit this case.
let Some(stream) = &mut self.read_opt else {
unreachable!();
};
ready!(Pin::new(&mut *stream).poll_read(cx, &mut buf))
};
match res {
Ok(_) => {
let buf = buf.filled().to_vec();
if buf.is_empty() {
// The buffer may return empty in EOF conditions and never return an error,
// so we need to treat this as EOF
self.read_opt.take();
} else {
// Attempt to send the buffer, marking as not alive if the channel is closed
_ = self.send(buf);
}
}
Err(_) => {
// Stream errored, so just return and mark this stream as not alive.
_ = self.send(buf.filled().to_vec());
self.read_opt.take();
}
}
Poll::Ready(())
}
/// Read and "block" until the sync markers have been read.
async fn read_until_sync_marker(&mut self) {
let Some(file) = &mut self.read_opt else {
return;
};
let mut flush = Vec::with_capacity(BUFFER_SIZE);
loop {
let mut buffer = [0_u8; BUFFER_SIZE];
match file.read(&mut buffer).await {
Err(_) | Ok(0) => {
// EOF or error, just return. We make no guarantees about unflushed data at shutdown.
self.read_opt.take();
return;
}
Ok(read) => {
flush.extend(&buffer[0..read]);
// "ends_with" is cheaper, so check that first
if flush.ends_with(HALF_SYNC_MARKER) {
// We might have read the full sync marker.
if flush.ends_with(SYNC_MARKER) {
flush.truncate(flush.len() - SYNC_MARKER.len());
} else {
flush.truncate(flush.len() - HALF_SYNC_MARKER.len());
}
// Try to send our flushed buffer. If the channel is closed, this stream will
// be marked as not alive.
_ = self.send(flush);
return;
}
// If we don't end with the marker, then we need to search the bytes we read plus four bytes
// from before. There's still a possibility that the marker could be split because of a pipe
// buffer that fills up, forcing the flush to be written across two writes and interleaving
// data between, but that's a risk we take with this sync marker approach.
let start =
(flush.len() - read).saturating_sub(HALF_SYNC_MARKER.len());
if let Some(offset) =
memchr::memmem::find(&flush[start..], HALF_SYNC_MARKER)
{
flush.truncate(offset);
// Try to send our flushed buffer. If the channel is closed, this stream will
// be marked as not alive.
_ = self.send(flush);
return;
}
}
}
}
}
}
/// A factory for creating [`TestEventSender`]s. This factory must be dropped
/// before the [`TestEventReceiver`] will complete.
pub struct TestEventSenderFactory {
sender: UnboundedSender<(usize, TestEvent)>,
worker_id: AtomicUsize,
}
impl TestEventSenderFactory {
/// Create a [`TestEventWorkerSender`], along with a stdout/stderr stream.
pub fn worker(&self) -> TestEventWorkerSender {
let id = self.worker_id.fetch_add(1, Ordering::AcqRel);
let (stdout_reader, stdout_writer) = pipe().unwrap();
let (stderr_reader, stderr_writer) = pipe().unwrap();
let (sync_sender, mut sync_receiver) =
tokio::sync::mpsc::unbounded_channel::<(SendMutex, SendMutex)>();
let stdout = stdout_writer.try_clone().unwrap();
let stderr = stderr_writer.try_clone().unwrap();
let sender = self.sender.clone();
// Each worker spawns its own output monitoring and serialization task. This task will
// poll the stdout/stderr streams and interleave that data with `TestEvents` generated
// by the test runner worker.
//
// Note that this _must_ be a separate thread! Flushing requires locking coördination
// on two threads and if we're blocking-locked on the mutex we've sent down the sync_receiver,
// there's no way for us to process the actual flush operation here.
//
// Creating a mini-runtime to flush the stdout/stderr is the easiest way to do this, but
// there's no reason we couldn't do it with non-blocking I/O, other than the difficulty
// of setting up an I/O reactor in Windows.
std::thread::spawn(move || {
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_io()
.build()
.unwrap();
runtime.block_on(tokio::task::unconstrained(async move {
let mut test_stdout =
TestStream::new(id, stdout_reader, sender.clone())?;
let mut test_stderr = TestStream::new(id, stderr_reader, sender)?;
// This ensures that the stdout and stderr streams in the select! loop below cannot starve each
// other.
let mut alternate_stream_priority = false;
// This function will be woken whenever a stream or the receiver is ready
loop {
alternate_stream_priority = !alternate_stream_priority;
let (a, b) = if alternate_stream_priority {
(&mut test_stdout, &mut test_stderr)
} else {
(&mut test_stderr, &mut test_stdout)
};
tokio::select! {
biased; // We actually want to poll the channel first
recv = sync_receiver.recv() => {
match recv {
// If the channel closed, we assume that all important data from the streams was synced,
// so we just end this task immediately.
None => { break },
Some((mutex1, mutex2)) => {
// Two phase lock: mutex1 indicates that we are done our general read phase and are ready for
// the sync phase. mutex2 indicates that we have completed the sync phase. This prevents deadlock
// when the pipe is too full to accept the sync marker.
drop(mutex1);
for stream in [&mut test_stdout, &mut test_stderr] {
if stream.is_alive() {
stream.read_until_sync_marker().await;
}
}
drop(mutex2);
}
}
}
// Poll stdout first if `alternate_stream_priority` is true, otherwise poll stderr first.
// This is necessary because of the `biased` flag above to avoid starvation.
_ = a.pipe(), if a.is_alive() => {},
_ = b.pipe(), if b.is_alive() => {},
}
}
Ok::<_, std::io::Error>(())
}))?;
Ok::<_, std::io::Error>(())
});
let sender = TestEventSender {
id,
sender: self.sender.clone(),
sync_sender,
stdout_writer,
stderr_writer,
};
TestEventWorkerSender {
sender,
stdout,
stderr,
}
}
/// A [`TestEventWeakSender`] has a unique ID, but will not keep the [`TestEventReceiver`] alive.
/// This may be useful to add a `SIGINT` or other break handler to tests that isn't part of a
/// specific test, but handles the overall orchestration of running tests:
///
/// ```nocompile
/// let mut cancel_sender = test_event_sender_factory.weak_sender();
/// let sigint_handler_handle = spawn(async move {
/// signal::ctrl_c().await.unwrap();
/// cancel_sender.send(TestEvent::Sigint).ok();
/// });
/// ```
pub fn weak_sender(&self) -> TestEventWeakSender {
TestEventWeakSender {
id: self.worker_id.fetch_add(1, Ordering::AcqRel),
sender: self.sender.downgrade(),
}
}
}
pub struct TestEventWeakSender {
pub id: usize,
sender: WeakUnboundedSender<(usize, TestEvent)>,
}
impl TestEventWeakSender {
pub fn send(&mut self, message: TestEvent) -> Result<(), ChannelClosedError> {
Ok(
self
.sender
.upgrade()
.ok_or(ChannelClosedError)?
.send((self.id, message))?,
)
}
}
pub struct TestEventWorkerSender {
pub sender: TestEventSender,
pub stdout: PipeWrite,
pub stderr: PipeWrite,
}
/// Sends messages from a given worker into the test stream. If multiple clones of
/// this sender are kept alive, the worker is kept alive.
///
/// Any unflushed bytes in the stdout or stderr stream associated with this sender
/// are not guaranteed to be sent on drop unless flush is explicitly called.
pub struct TestEventSender {
pub id: usize,
sender: UnboundedSender<(usize, TestEvent)>,
sync_sender: UnboundedSender<(SendMutex, SendMutex)>,
stdout_writer: PipeWrite,
stderr_writer: PipeWrite,
}
impl TestEventSender {
pub fn send(&mut self, message: TestEvent) -> Result<(), ChannelClosedError> {
// Certain messages require us to ensure that all output has been drained to ensure proper
// interleaving of messages.
if message.requires_stdio_sync() {
self.flush()?;
}
Ok(self.sender.send((self.id, message))?)
}
/// Ensure that all output has been fully flushed by writing a sync marker into the
/// stdout and stderr streams and waiting for it on the other side.
pub fn flush(&mut self) -> Result<(), ChannelClosedError> {
// Two phase lock: mutex1 indicates that we are done our general read phase and are ready for
// the sync phase. mutex2 indicates that we have completed the sync phase. This prevents deadlock
// when the pipe is too full to accept the sync marker.
let mutex1 = parking_lot::RawMutex::INIT;
mutex1.lock();
let mutex2 = parking_lot::RawMutex::INIT;
mutex2.lock();
self
.sync_sender
.send((SendMutex(&mutex1 as _), SendMutex(&mutex2 as _)))?;
if !mutex1.try_lock_for(Duration::from_secs(30)) {
panic!(
"Test flush deadlock 1, sender closed = {}",
self.sync_sender.is_closed()
);
}
_ = self.stdout_writer.write_all(SYNC_MARKER);
_ = self.stderr_writer.write_all(SYNC_MARKER);
if !mutex2.try_lock_for(Duration::from_secs(30)) {
panic!(
"Test flush deadlock 2, sender closed = {}",
self.sync_sender.is_closed()
);
}
Ok(())
}
}
#[allow(clippy::print_stdout)]
#[allow(clippy::print_stderr)]
#[cfg(test)]
mod tests {
use deno_core::unsync::spawn;
use deno_core::unsync::spawn_blocking;
use super::*;
use crate::tools::test::TestResult;
/// Test that output is correctly interleaved with messages.
#[tokio::test]
async fn spawn_worker() {
test_util::timeout!(60);
let (mut worker, mut receiver) = create_single_test_event_channel();
let recv_handle = spawn(async move {
let mut queue = vec![];
while let Some((_, message)) = receiver.recv().await {
let msg_str = format!("{message:?}");
if msg_str.len() > 50 {
eprintln!("message = {}...", &msg_str[..50]);
} else {
eprintln!("message = {}", msg_str);
}
queue.push(message);
}
eprintln!("done");
queue
});
let send_handle = spawn_blocking(move || {
worker.stdout.write_all(&[1; 100_000]).unwrap();
eprintln!("Wrote bytes");
worker.sender.send(TestEvent::StepWait(1)).unwrap();
eprintln!("Sent");
worker.stdout.write_all(&[2; 100_000]).unwrap();
eprintln!("Wrote bytes 2");
worker.sender.flush().unwrap();
eprintln!("Done");
});
send_handle.await.unwrap();
let messages = recv_handle.await.unwrap();
let mut expected = 1;
let mut count = 0;
for message in messages {
match message {
TestEvent::Output(vec) => {
assert_eq!(vec[0], expected);
count += vec.len();
}
TestEvent::StepWait(_) => {
assert_eq!(count, 100_000);
count = 0;
expected = 2;
}
_ => unreachable!(),
}
}
assert_eq!(expected, 2);
assert_eq!(count, 100_000);
}
/// Test that flushing a large number of times doesn't hang.
#[tokio::test]
async fn test_flush_lots() {
test_util::timeout!(240);
let (mut worker, mut receiver) = create_single_test_event_channel();
let recv_handle = spawn(async move {
let mut queue = vec![];
while let Some((_, message)) = receiver.recv().await {
assert!(!matches!(message, TestEvent::Output(..)));
queue.push(message);
}
eprintln!("Receiver closed");
queue
});
let send_handle = spawn_blocking(move || {
for _ in 0..100000 {
worker.sender.send(TestEvent::StepWait(1)).unwrap();
}
eprintln!("Sent all messages");
});
send_handle.await.unwrap();
let messages = recv_handle.await.unwrap();
assert_eq!(messages.len(), 100000);
}
/// Test that flushing a large number of times doesn't hang.
#[tokio::test]
async fn test_flush_large() {
test_util::timeout!(240);
let (mut worker, mut receiver) = create_single_test_event_channel();
let recv_handle = spawn(async move {
let mut queue = vec![];
while let Some((_, message)) = receiver.recv().await {
if let TestEvent::StepWait(..) = message {
queue.push(());
}
}
eprintln!("Receiver closed");
queue
});
let send_handle = spawn_blocking(move || {
for _ in 0..25000 {
// Write one pipe buffer's worth of message here. We try a few different sizes of potentially
// blocking writes.
worker.stderr.write_all(&[0; 4 * 1024]).unwrap();
worker.sender.send(TestEvent::StepWait(1)).unwrap();
worker.stderr.write_all(&[0; 16 * 1024]).unwrap();
worker.sender.send(TestEvent::StepWait(1)).unwrap();
worker.stderr.write_all(&[0; 64 * 1024]).unwrap();
worker.sender.send(TestEvent::StepWait(1)).unwrap();
worker.stderr.write_all(&[0; 128 * 1024]).unwrap();
worker.sender.send(TestEvent::StepWait(1)).unwrap();
}
eprintln!("Sent all messages");
});
send_handle.await.unwrap();
let messages = recv_handle.await.unwrap();
assert_eq!(messages.len(), 100000);
}
/// Test that flushing a large number of times doesn't hang.
#[tokio::test]
async fn test_flush_with_close() {
test_util::timeout!(240);
let (worker, mut receiver) = create_single_test_event_channel();
let TestEventWorkerSender {
mut sender,
stderr,
stdout,
} = worker;
let recv_handle = spawn(async move {
let mut queue = vec![];
while let Some((_, _)) = receiver.recv().await {
queue.push(());
}
eprintln!("Receiver closed");
queue
});
let send_handle = spawn_blocking(move || {
let mut stdout = Some(stdout);
let mut stderr = Some(stderr);
for i in 0..100000 {
if i == 20000 {
stdout.take();
}
if i == 40000 {
stderr.take();
}
if i % 2 == 0 {
if let Some(stdout) = &mut stdout {
stdout.write_all(b"message").unwrap();
}
} else if let Some(stderr) = &mut stderr {
stderr.write_all(b"message").unwrap();
}
sender.send(TestEvent::StepWait(1)).unwrap();
}
eprintln!("Sent all messages");
});
send_handle.await.unwrap();
let messages = recv_handle.await.unwrap();
assert_eq!(messages.len(), 130000);
}
/// Test that large numbers of interleaved steps are routed properly.
#[tokio::test]
async fn test_interleave() {
test_util::timeout!(60);
const MESSAGE_COUNT: usize = 10_000;
let (mut worker, mut receiver) = create_single_test_event_channel();
let recv_handle = spawn(async move {
let mut i = 0;
while let Some((_, message)) = receiver.recv().await {
if i % 2 == 0 {
let expected_text = format!("{:08x}", i / 2).into_bytes();
let TestEvent::Output(text) = message else {
panic!("Incorrect message: {message:?}");
};
assert_eq!(text, expected_text);
} else {
let TestEvent::Result(index, TestResult::Ok, 0) = message else {
panic!("Incorrect message: {message:?}");
};
assert_eq!(index, i / 2);
}
i += 1;
}
eprintln!("Receiver closed");
i
});
let send_handle: deno_core::unsync::JoinHandle<()> =
spawn_blocking(move || {
for i in 0..MESSAGE_COUNT {
worker
.stderr
.write_all(format!("{i:08x}").as_str().as_bytes())
.unwrap();
worker
.sender
.send(TestEvent::Result(i, TestResult::Ok, 0))
.unwrap();
}
eprintln!("Sent all messages");
});
send_handle.await.unwrap();
let messages = recv_handle.await.unwrap();
assert_eq!(messages, MESSAGE_COUNT * 2);
}
#[tokio::test]
async fn test_sender_shutdown_before_receive() {
test_util::timeout!(60);
for _ in 0..10 {
let (mut worker, mut receiver) = create_single_test_event_channel();
worker.stderr.write_all(b"hello").unwrap();
worker
.sender
.send(TestEvent::Result(0, TestResult::Ok, 0))
.unwrap();
drop(worker);
let (_, message) = receiver.recv().await.unwrap();
let TestEvent::Output(text) = message else {
panic!("Incorrect message: {message:?}");
};
assert_eq!(text.as_slice(), b"hello");
let (_, message) = receiver.recv().await.unwrap();
let TestEvent::Result(..) = message else {
panic!("Incorrect message: {message:?}");
};
assert!(receiver.recv().await.is_none());
}
}
/// Ensure nothing panics if we're racing the runtime shutdown.
#[test]
fn test_runtime_shutdown() {
test_util::timeout!(60);
let runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
runtime.block_on(async {
let (mut worker, mut receiver) = create_single_test_event_channel();
tokio::task::spawn(async move {
loop {
if receiver.recv().await.is_none() {
break;
}
}
});
tokio::task::spawn(async move {
_ = worker.sender.send(TestEvent::Sigint);
});
});
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/fmt.rs | cli/tools/test/fmt.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::ops::AddAssign;
use console_static_text::ansi::strip_ansi_codes;
use deno_core::stats::RuntimeActivity;
use deno_core::stats::RuntimeActivityDiff;
use deno_core::stats::RuntimeActivityTrace;
use deno_core::stats::RuntimeActivityType;
use deno_runtime::fmt_errors::format_js_error;
use phf::phf_map;
use super::*;
use crate::util::path::to_percent_decoded_str;
pub fn to_relative_path_or_remote_url(cwd: &Url, path_or_url: &str) -> String {
let Ok(url) = Url::parse(path_or_url) else {
return "<anonymous>".to_string();
};
if url.scheme() == "file"
&& let Some(mut r) = cwd.make_relative(&url)
{
if !r.starts_with("../") {
r = format!("./{r}");
}
return to_percent_decoded_str(&r);
}
path_or_url.to_string()
}
fn abbreviate_test_error(js_error: &JsError) -> JsError {
let mut js_error = js_error.clone();
let frames = std::mem::take(&mut js_error.frames);
// check if there are any stack frames coming from user code
let should_filter = frames.iter().any(|f| {
if let Some(file_name) = &f.file_name {
!(file_name.starts_with("[ext:") || file_name.starts_with("ext:"))
} else {
true
}
});
if should_filter {
let mut frames = frames
.into_iter()
.rev()
.skip_while(|f| {
if let Some(file_name) = &f.file_name {
file_name.starts_with("[ext:") || file_name.starts_with("ext:")
} else {
false
}
})
.collect::<Vec<_>>();
frames.reverse();
js_error.frames = frames;
} else {
js_error.frames = frames;
}
js_error.cause = js_error
.cause
.as_ref()
.map(|e| Box::new(abbreviate_test_error(e)));
js_error.aggregated = js_error
.aggregated
.as_ref()
.map(|es| es.iter().map(abbreviate_test_error).collect());
js_error
}
// This function prettifies `JsError` and applies some changes specifically for
// test runner purposes:
//
// - hide stack traces if `options.hide_stacktraces` is set to `true`
//
// - filter out stack frames:
// - if stack trace consists of mixed user and internal code, the frames
// below the first user code frame are filtered out
// - if stack trace consists only of internal code it is preserved as is
pub fn format_test_error(
js_error: &JsError,
options: &TestFailureFormatOptions,
) -> String {
let mut js_error = abbreviate_test_error(js_error);
js_error.exception_message = js_error
.exception_message
.trim_start_matches("Uncaught ")
.to_string();
let message = if options.hide_stacktraces {
js_error.exception_message
} else {
format_js_error(&js_error, options.initial_cwd.as_ref())
};
if options.strip_ascii_color {
strip_ansi_codes(&message).to_string()
} else {
message
}
}
pub fn format_sanitizer_diff(
diff: RuntimeActivityDiff,
) -> (Vec<String>, Vec<String>) {
let (mut messages, trailers) = format_sanitizer_accum(diff.appeared, true);
let disappeared = format_sanitizer_accum(diff.disappeared, false);
messages.extend(disappeared.0);
messages.sort();
let mut trailers = BTreeSet::from_iter(trailers);
trailers.extend(disappeared.1);
(messages, trailers.into_iter().collect::<Vec<_>>())
}
fn format_sanitizer_accum(
activities: Vec<RuntimeActivity>,
appeared: bool,
) -> (Vec<String>, Vec<String>) {
// Aggregate the sanitizer information
let mut accum = HashMap::new();
for activity in activities {
let item = format_sanitizer_accum_item(activity);
accum.entry(item).or_insert(0).add_assign(1);
}
let mut output = vec![];
let mut needs_trace_leaks = false;
for ((item_type, item_name, trace), count) in accum.into_iter() {
if item_type == RuntimeActivityType::Resource {
let (name, action1, action2) = pretty_resource_name(&item_name);
let hint = resource_close_hint(&item_name);
let value = if appeared {
format!(
"{name} was {action1} during the test, but not {action2} during the test. {hint}"
)
} else {
format!(
"{name} was {action1} before the test started, but was {action2} during the test. \
Do not close resources in a test that were not created during that test."
)
};
output.push(value);
} else if item_type == RuntimeActivityType::AsyncOp {
let (count_str, plural, tense) = if count == 1 {
(Cow::Borrowed("An"), "", "was")
} else {
(Cow::Owned(count.to_string()), "s", "were")
};
let phrase = if appeared {
"started in this test, but never completed"
} else {
"started before the test, but completed during the test. Async operations should not complete in a test if they were not started in that test"
};
let mut value = if let Some([operation, hint]) =
OP_DETAILS.get(&item_name)
{
format!(
"{count_str} async operation{plural} to {operation} {tense} {phrase}. This is often caused by not {hint}."
)
} else {
format!(
"{count_str} async call{plural} to {item_name} {tense} {phrase}."
)
};
value += &if let Some(trace) = trace {
format!(" The operation {tense} started here:\n{trace}")
} else {
needs_trace_leaks = true;
String::new()
};
output.push(value);
} else if item_type == RuntimeActivityType::Timer {
let (count_str, plural, tense) = if count == 1 {
(Cow::Borrowed("A"), "", "was")
} else {
(Cow::Owned(count.to_string()), "s", "were")
};
let phrase = if appeared {
"started in this test, but never completed"
} else {
"started before the test, but completed during the test. Intervals and timers should not complete in a test if they were not started in that test"
};
let mut value = format!(
"{count_str} timer{plural} {tense} {phrase}. This is often caused by not calling `clearTimeout`."
);
value += &if let Some(trace) = trace {
format!(" The operation {tense} started here:\n{trace}")
} else {
needs_trace_leaks = true;
String::new()
};
output.push(value);
} else if item_type == RuntimeActivityType::Interval {
let (count_str, plural, tense) = if count == 1 {
(Cow::Borrowed("An"), "", "was")
} else {
(Cow::Owned(count.to_string()), "s", "were")
};
let phrase = if appeared {
"started in this test, but never completed"
} else {
"started before the test, but completed during the test. Intervals and timers should not complete in a test if they were not started in that test"
};
let mut value = format!(
"{count_str} interval{plural} {tense} {phrase}. This is often caused by not calling `clearInterval`."
);
value += &if let Some(trace) = trace {
format!(" The operation {tense} started here:\n{trace}")
} else {
needs_trace_leaks = true;
String::new()
};
output.push(value);
} else {
unreachable!()
}
}
if needs_trace_leaks {
(output, vec!["To get more details where leaks occurred, run again with the --trace-leaks flag.".to_owned()])
} else {
(output, vec![])
}
}
fn format_sanitizer_accum_item(
activity: RuntimeActivity,
) -> (
RuntimeActivityType,
Cow<'static, str>,
Option<RuntimeActivityTrace>,
) {
let activity_type = activity.activity();
match activity {
RuntimeActivity::AsyncOp(_, trace, name) => {
(activity_type, name.into(), trace)
}
RuntimeActivity::Resource(_, _, name) => (activity_type, name.into(), None),
RuntimeActivity::Interval(_, trace) => (activity_type, "".into(), trace),
RuntimeActivity::Timer(_, trace) => (activity_type, "".into(), trace),
}
}
fn pretty_resource_name(
name: &str,
) -> (Cow<'static, str>, &'static str, &'static str) {
let (name, action1, action2) = match name {
"fsFile" => ("A file", "opened", "closed"),
"fetchRequest" => ("A fetch request", "started", "finished"),
"fetchRequestBody" => ("A fetch request body", "created", "closed"),
"fetchResponse" => ("A fetch response body", "created", "consumed"),
"httpClient" => ("An HTTP client", "created", "closed"),
"dynamicLibrary" => ("A dynamic library", "loaded", "unloaded"),
"httpConn" => ("An inbound HTTP connection", "accepted", "closed"),
"httpStream" => ("An inbound HTTP request", "accepted", "closed"),
"tcpStream" => ("A TCP connection", "opened/accepted", "closed"),
"unixStream" => ("A Unix connection", "opened/accepted", "closed"),
"tlsStream" => ("A TLS connection", "opened/accepted", "closed"),
"tlsListener" => ("A TLS listener", "opened", "closed"),
"unixListener" => ("A Unix listener", "opened", "closed"),
"unixDatagram" => ("A Unix datagram", "opened", "closed"),
"tcpListener" => ("A TCP listener", "opened", "closed"),
"udpSocket" => ("A UDP socket", "opened", "closed"),
"timer" => ("A timer", "started", "fired/cleared"),
"textDecoder" => ("A text decoder", "created", "finished"),
"messagePort" => ("A message port", "created", "closed"),
"webSocketStream" => ("A WebSocket", "opened", "closed"),
"fsEvents" => ("A file system watcher", "created", "closed"),
"childStdin" => ("A child process stdin", "opened", "closed"),
"childStdout" => ("A child process stdout", "opened", "closed"),
"childStderr" => ("A child process stderr", "opened", "closed"),
"child" => ("A child process", "started", "closed"),
"signal" => ("A signal listener", "created", "fired/cleared"),
"stdin" => ("The stdin pipe", "opened", "closed"),
"stdout" => ("The stdout pipe", "opened", "closed"),
"stderr" => ("The stderr pipe", "opened", "closed"),
"compression" => ("A CompressionStream", "created", "closed"),
_ => return (format!("\"{name}\"").into(), "created", "cleaned up"),
};
(name.into(), action1, action2)
}
fn resource_close_hint(name: &str) -> &'static str {
match name {
"fsFile" => "Close the file handle by calling `file.close()`.",
"fetchRequest" => {
"Await the promise returned from `fetch()` or abort the fetch with an abort signal."
}
"fetchRequestBody" => {
"Terminate the request body `ReadableStream` by closing or erroring it."
}
"fetchResponse" => {
"Consume or close the response body `ReadableStream`, e.g `await resp.text()` or `await resp.body.cancel()`."
}
"httpClient" => "Close the HTTP client by calling `httpClient.close()`.",
"dynamicLibrary" => {
"Unload the dynamic library by calling `dynamicLibrary.close()`."
}
"httpConn" => {
"Close the inbound HTTP connection by calling `httpConn.close()`."
}
"httpStream" => {
"Close the inbound HTTP request by responding with `e.respondWith()` or closing the HTTP connection."
}
"tcpStream" => "Close the TCP connection by calling `tcpConn.close()`.",
"unixStream" => {
"Close the Unix socket connection by calling `unixConn.close()`."
}
"tlsStream" => "Close the TLS connection by calling `tlsConn.close()`.",
"tlsListener" => "Close the TLS listener by calling `tlsListener.close()`.",
"unixListener" => {
"Close the Unix socket listener by calling `unixListener.close()`."
}
"unixDatagram" => {
"Close the Unix datagram socket by calling `unixDatagram.close()`."
}
"tcpListener" => "Close the TCP listener by calling `tcpListener.close()`.",
"udpSocket" => "Close the UDP socket by calling `udpSocket.close()`.",
"timer" => "Clear the timer by calling `clearInterval` or `clearTimeout`.",
"textDecoder" => {
"Close the text decoder by calling `textDecoder.decode('')` or `await textDecoderStream.readable.cancel()`."
}
"messagePort" => "Close the message port by calling `messagePort.close()`.",
"webSocketStream" => "Close the WebSocket by calling `webSocket.close()`.",
"fsEvents" => "Close the file system watcher by calling `watcher.close()`.",
"childStdin" => {
"Close the child process stdin by calling `proc.stdin.close()`."
}
"childStdout" => {
"Close the child process stdout by calling `proc.stdout.close()` or `await child.stdout.cancel()`."
}
"childStderr" => {
"Close the child process stderr by calling `proc.stderr.close()` or `await child.stderr.cancel()`."
}
"child" => {
"Close the child process by calling `proc.kill()` or `proc.close()`."
}
"signal" => {
"Clear the signal listener by calling `Deno.removeSignalListener`."
}
"stdin" => "Close the stdin pipe by calling `Deno.stdin.close()`.",
"stdout" => "Close the stdout pipe by calling `Deno.stdout.close()`.",
"stderr" => "Close the stderr pipe by calling `Deno.stderr.close()`.",
"compression" => {
"Close the compression stream by calling `await stream.writable.close()`."
}
_ => "Close the resource before the end of the test.",
}
}
pub const OP_DETAILS: phf::Map<&'static str, [&'static str; 2]> = phf_map! {
"op_blob_read_part" => ["read from a Blob or File", "awaiting the result of a Blob or File read"],
"op_broadcast_recv" => ["receive a message from a BroadcastChannel", "closing the BroadcastChannel"],
"op_broadcast_send" => ["send a message to a BroadcastChannel", "closing the BroadcastChannel"],
"op_crypto_decrypt" => ["decrypt data", "awaiting the result of a `crypto.subtle.decrypt` call"],
"op_crypto_derive_bits" => ["derive bits from a key", "awaiting the result of a `crypto.subtle.deriveBits` call"],
"op_crypto_encrypt" => ["encrypt data", "awaiting the result of a `crypto.subtle.encrypt` call"],
"op_crypto_generate_key" => ["generate a key", "awaiting the result of a `crypto.subtle.generateKey` call"],
"op_crypto_sign_key" => ["sign data", "awaiting the result of a `crypto.subtle.sign` call"],
"op_crypto_subtle_digest" => ["digest data", "awaiting the result of a `crypto.subtle.digest` call"],
"op_crypto_verify_key" => ["verify data", "awaiting the result of a `crypto.subtle.verify` call"],
"op_dns_resolve" => ["resolve a DNS name", "awaiting the result of a `Deno.resolveDns` call"],
"op_fetch_send" => ["send a HTTP request", "awaiting the result of a `fetch` call"],
"op_ffi_call_nonblocking" => ["do a non blocking ffi call", "awaiting the returned promise"],
"op_ffi_call_ptr_nonblocking" => ["do a non blocking ffi call", "awaiting the returned promise"],
"op_fs_chmod_async" => ["change the permissions of a file", "awaiting the result of a `Deno.chmod` call"],
"op_fs_chown_async" => ["change the owner of a file", "awaiting the result of a `Deno.chown` call"],
"op_fs_copy_file_async" => ["copy a file", "awaiting the result of a `Deno.copyFile` call"],
"op_fs_events_poll" => ["get the next file system event", "breaking out of a for await loop looping over `Deno.FsEvents`"],
"op_fs_file_sync_data_async" => ["flush pending data operations for a file to disk", "awaiting the result of a `Deno.FsFile.prototype.syncData` call"],
"op_fs_file_stat_async" => ["get file metadata", "awaiting the result of a `Deno.FsFile.prototype.stat` call"],
"op_fs_flock_async" => ["lock a file", "awaiting the result of a `Deno.FsFile.lock` call"],
"op_fs_file_sync_async" => ["flush pending data operations for a file to disk", "awaiting the result of a `Deno.FsFile.sync` call"],
"op_fs_file_truncate_async" => ["truncate a file", "awaiting the result of a `Deno.FsFile.prototype.truncate` call"],
"op_fs_funlock_async" => ["unlock a file", "awaiting the result of a `Deno.FsFile.unlock` call"],
"op_fs_link_async" => ["create a hard link", "awaiting the result of a `Deno.link` call"],
"op_fs_lstat_async" => ["get file metadata", "awaiting the result of a `Deno.lstat` call"],
"op_fs_make_temp_dir_async" => ["create a temporary directory", "awaiting the result of a `Deno.makeTempDir` call"],
"op_fs_make_temp_file_async" => ["create a temporary file", "awaiting the result of a `Deno.makeTempFile` call"],
"op_fs_mkdir_async" => ["create a directory", "awaiting the result of a `Deno.mkdir` call"],
"op_fs_open_async" => ["open a file", "awaiting the result of a `Deno.open` call"],
"op_fs_read_dir_async" => ["read a directory", "collecting all items in the async iterable returned from a `Deno.readDir` call"],
"op_fs_read_file_async" => ["read a file", "awaiting the result of a `Deno.readFile` call"],
"op_fs_read_file_text_async" => ["read a text file", "awaiting the result of a `Deno.readTextFile` call"],
"op_fs_read_link_async" => ["read a symlink", "awaiting the result of a `Deno.readLink` call"],
"op_fs_realpath_async" => ["resolve a path", "awaiting the result of a `Deno.realpath` call"],
"op_fs_remove_async" => ["remove a file or directory", "awaiting the result of a `Deno.remove` call"],
"op_fs_rename_async" => ["rename a file or directory", "awaiting the result of a `Deno.rename` call"],
"op_fs_seek_async" => ["seek in a file", "awaiting the result of a `Deno.FsFile.prototype.seek` call"],
"op_fs_stat_async" => ["get file metadata", "awaiting the result of a `Deno.stat` call"],
"op_fs_symlink_async" => ["create a symlink", "awaiting the result of a `Deno.symlink` call"],
"op_fs_truncate_async" => ["truncate a file", "awaiting the result of a `Deno.truncate` call"],
"op_fs_utime_async" => ["change file timestamps", "awaiting the result of a `Deno.utime` call"],
"op_fs_write_file_async" => ["write a file", "awaiting the result of a `Deno.writeFile` call"],
"op_host_recv_ctrl" => ["receive a message from a web worker", "terminating a `Worker`"],
"op_host_recv_message" => ["receive a message from a web worker", "terminating a `Worker`"],
"op_http_accept" => ["accept a HTTP request", "closing a `Deno.HttpConn`"],
"op_http_shutdown" => ["shutdown a HTTP connection", "awaiting `Deno.HttpEvent#respondWith`"],
"op_http_upgrade_websocket" => ["upgrade a HTTP connection to a WebSocket", "awaiting `Deno.HttpEvent#respondWith`"],
"op_http_write" => ["write HTTP response body", "awaiting `Deno.HttpEvent#respondWith`"],
"op_http_write_headers" => ["write HTTP response headers", "awaiting `Deno.HttpEvent#respondWith`"],
"op_message_port_recv_message" => ["receive a message from a MessagePort", "awaiting the result of not closing a `MessagePort`"],
"op_net_accept_tcp" => ["accept a TCP stream", "closing a `Deno.Listener`"],
"op_net_accept_tls" => ["accept a TLS stream", "closing a `Deno.TlsListener`"],
"op_net_accept_unix" => ["accept a Unix stream", "closing a `Deno.Listener`"],
"op_net_connect_tcp" => ["connect to a TCP server", "awaiting a `Deno.connect` call"],
"op_net_connect_tls" => ["connect to a TLS server", "awaiting a `Deno.connectTls` call"],
"op_net_connect_unix" => ["connect to a Unix server", "awaiting a `Deno.connect` call"],
"op_net_recv_udp" => ["receive a datagram message via UDP", "awaiting the result of `Deno.DatagramConn#receive` call, or not breaking out of a for await loop looping over a `Deno.DatagramConn`"],
"op_net_recv_unixpacket" => ["receive a datagram message via Unixpacket", "awaiting the result of `Deno.DatagramConn#receive` call, or not breaking out of a for await loop looping over a `Deno.DatagramConn`"],
"op_net_send_udp" => ["send a datagram message via UDP", "awaiting the result of `Deno.DatagramConn#send` call"],
"op_net_send_unixpacket" => ["send a datagram message via Unixpacket", "awaiting the result of `Deno.DatagramConn#send` call"],
"op_run_status" => ["get the status of a subprocess", "awaiting the result of a `Deno.Process#status` call"],
"op_signal_poll" => ["get the next signal", "un-registering a OS signal handler"],
"op_spawn_wait" => ["wait for a subprocess to exit", "awaiting the result of a `Deno.Process#status` call"],
"op_tls_handshake" => ["perform a TLS handshake", "awaiting a `Deno.TlsConn#handshake` call"],
"op_tls_start" => ["start a TLS connection", "awaiting a `Deno.startTls` call"],
"op_utime_async" => ["change file timestamps", "awaiting the result of a `Deno.utime` call"],
"op_webgpu_buffer_get_map_async" => ["map a WebGPU buffer", "awaiting the result of a `GPUBuffer#mapAsync` call"],
"op_webgpu_request_adapter" => ["request a WebGPU adapter", "awaiting the result of a `navigator.gpu.requestAdapter` call"],
"op_webgpu_request_device" => ["request a WebGPU device", "awaiting the result of a `GPUAdapter#requestDevice` call"],
"op_ws_close" => ["close a WebSocket", "awaiting until the `close` event is emitted on a `WebSocket`, or the `WebSocketStream#closed` promise resolves"],
"op_ws_create" => ["create a WebSocket", "awaiting until the `open` event is emitted on a `WebSocket`, or the result of a `WebSocketStream#connection` promise"],
"op_ws_next_event" => ["receive the next message on a WebSocket", "closing a `WebSocket` or `WebSocketStream`"],
"op_ws_send_binary" => ["send a message on a WebSocket", "closing a `WebSocket` or `WebSocketStream`"],
"op_ws_send_binary_ab" => ["send a message on a WebSocket", "closing a `WebSocket` or `WebSocketStream`"],
"op_ws_send_ping" => ["send a message on a WebSocket", "closing a `WebSocket` or `WebSocketStream`"],
"op_ws_send_text" => ["send a message on a WebSocket", "closing a `WebSocket` or `WebSocketStream`"],
};
#[cfg(test)]
mod tests {
use deno_core::stats::RuntimeActivity;
macro_rules! leak_format_test {
($name:ident, $appeared:literal, [$($activity:expr),*], $expected:literal) => {
#[test]
fn $name() {
let (leaks, trailer_notes) = super::format_sanitizer_accum(vec![$($activity),*], $appeared);
let mut output = String::new();
for leak in leaks {
output += &format!(" - {leak}\n");
}
for trailer in trailer_notes {
output += &format!("{trailer}\n");
}
assert_eq!(output, $expected);
}
}
}
// https://github.com/denoland/deno/issues/13729
// https://github.com/denoland/deno/issues/13938
leak_format_test!(
op_unknown,
true,
[RuntimeActivity::AsyncOp(0, None, "op_unknown")],
" - An async call to op_unknown was started in this test, but never completed.\n\
To get more details where leaks occurred, run again with the --trace-leaks flag.\n"
);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/tap.rs | cli/tools/test/reporters/tap.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::serde_json::json;
use deno_core::serde_json::{self};
use serde::Serialize;
use super::common;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
const VERSION_HEADER: &str = "TAP version 14";
/// A test reporter for the Test Anything Protocol as defined at
/// https://testanything.org/tap-version-14-specification.html
pub struct TapTestReporter {
cwd: Url,
is_concurrent: bool,
header: bool,
planned: usize,
n: usize,
step_n: usize,
step_results: HashMap<usize, Vec<(TestStepDescription, TestStepResult)>>,
failure_format_options: TestFailureFormatOptions,
}
#[allow(clippy::print_stdout)]
impl TapTestReporter {
pub fn new(
cwd: Url,
is_concurrent: bool,
failure_format_options: TestFailureFormatOptions,
) -> TapTestReporter {
TapTestReporter {
cwd,
is_concurrent,
header: false,
planned: 0,
n: 0,
step_n: 0,
step_results: HashMap::new(),
failure_format_options,
}
}
fn escape_description(description: &str) -> String {
description
.replace('\\', "\\\\")
.replace('\n', "\\n")
.replace('\r', "\\r")
.replace('#', "\\#")
}
fn print_diagnostic(
&self,
indent: usize,
failure: &TestFailure,
location: DiagnosticLocation,
) {
// Unspecified behaviour:
// The diagnostic schema is not specified by the TAP spec,
// but there is an example, so we use it.
// YAML is a superset of JSON, so we can avoid a YAML dependency here.
// This makes the output less readable though.
let diagnostic = serde_json::to_string(&json!({
"message": failure.format(&self.failure_format_options),
"severity": "fail".to_string(),
"at": location,
}))
.expect("failed to serialize TAP diagnostic");
println!("{:indent$} ---", "", indent = indent);
println!("{:indent$} {}", "", diagnostic, indent = indent);
println!("{:indent$} ...", "", indent = indent);
}
fn print_line(
indent: usize,
status: &str,
step: usize,
description: &str,
directive: &str,
) {
println!(
"{:indent$}{} {} - {}{}",
"",
status,
step,
Self::escape_description(description),
directive,
indent = indent
);
}
fn print_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
) {
if self.step_n == 0 {
println!("# Subtest: {}", desc.root_name)
}
let (status, directive) = match result {
TestStepResult::Ok => ("ok", ""),
TestStepResult::Ignored => ("ok", " # SKIP"),
TestStepResult::Failed(_failure) => ("not ok", ""),
};
self.step_n += 1;
Self::print_line(4, status, self.step_n, &desc.name, directive);
if let TestStepResult::Failed(failure) = result {
self.print_diagnostic(
4,
failure,
DiagnosticLocation {
file: to_relative_path_or_remote_url(&self.cwd, &desc.origin),
line: desc.location.line_number,
},
);
}
}
}
#[allow(clippy::print_stdout)]
impl TestReporter for TapTestReporter {
fn report_register(&mut self, _description: &TestDescription) {}
fn report_plan(&mut self, plan: &TestPlan) {
if !self.header {
println!("{}", VERSION_HEADER);
self.header = true;
}
self.planned += plan.total;
if !self.is_concurrent {
// Unspecified behavior: Consumers tend to interpret a comment as a test suite name.
// During concurrent execution these would not correspond to the actual test file, so skip them.
println!(
"# {}",
to_relative_path_or_remote_url(&self.cwd, &plan.origin)
)
}
}
fn report_wait(&mut self, _description: &TestDescription) {
// flush for faster feedback when line buffered
std::io::stdout().flush().ok();
}
fn report_slow(&mut self, _description: &TestDescription, _elapsed: u64) {}
fn report_output(&mut self, _output: &[u8]) {}
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
_elapsed: u64,
) {
if self.is_concurrent {
let results = self.step_results.remove(&description.id);
for (desc, result) in results.iter().flat_map(|v| v.iter()) {
self.print_step_result(desc, result);
}
}
if self.step_n != 0 {
println!(" 1..{}", self.step_n);
self.step_n = 0;
}
let (status, directive) = match result {
TestResult::Ok => ("ok", ""),
TestResult::Ignored => ("ok", " # SKIP"),
TestResult::Failed(_failure) => ("not ok", ""),
TestResult::Cancelled => ("not ok", ""),
};
self.n += 1;
Self::print_line(0, status, self.n, &description.name, directive);
if let TestResult::Failed(failure) = result {
self.print_diagnostic(
0,
failure,
DiagnosticLocation {
file: to_relative_path_or_remote_url(&self.cwd, &description.origin),
line: description.location.line_number,
},
);
}
}
fn report_uncaught_error(&mut self, _origin: &str, _errorr: Box<JsError>) {}
fn report_step_register(&mut self, _description: &TestStepDescription) {}
fn report_step_wait(&mut self, _description: &TestStepDescription) {
// flush for faster feedback when line buffered
std::io::stdout().flush().ok();
}
fn report_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
_elapsed: u64,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
if self.is_concurrent {
// All subtests must be reported immediately before the parent test.
// So during concurrent execution we need to defer printing the results.
// TODO(SyrupThinker) This only outputs one level of subtests, it could support multiple.
self
.step_results
.entry(desc.root_id)
.or_default()
.push((desc.clone(), result.clone()));
return;
}
self.print_step_result(desc, result);
}
fn report_summary(
&mut self,
_elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
println!("1..{}", self.planned);
}
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
println!("Bail out! SIGINT received.");
common::report_sigint(
&mut std::io::stdout(),
&self.cwd,
tests_pending,
tests,
test_steps,
);
}
fn report_completed(&mut self) {}
fn flush_report(
&mut self,
_elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()> {
Ok(())
}
}
#[derive(Serialize)]
struct DiagnosticLocation {
file: String,
line: u32,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/junit.rs | cli/tools/test/reporters/junit.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::VecDeque;
use console_static_text::ansi::strip_ansi_codes;
use deno_core::anyhow::Context;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub struct JunitTestReporter {
cwd: Url,
output_path: String,
// Stores TestCases (i.e. Tests) by the Test ID
cases: IndexMap<usize, quick_junit::TestCase>,
// Stores nodes representing test cases in such a way that can be traversed
// from child to parent to build the full test name that reflects the test
// hierarchy.
test_name_tree: TestNameTree,
failure_format_options: TestFailureFormatOptions,
}
impl JunitTestReporter {
pub fn new(
cwd: Url,
output_path: String,
failure_format_options: TestFailureFormatOptions,
) -> Self {
Self {
cwd,
output_path,
cases: IndexMap::new(),
test_name_tree: TestNameTree::new(),
failure_format_options,
}
}
fn convert_status(
status: &TestResult,
failure_format_options: &TestFailureFormatOptions,
) -> quick_junit::TestCaseStatus {
match status {
TestResult::Ok => quick_junit::TestCaseStatus::success(),
TestResult::Ignored => quick_junit::TestCaseStatus::skipped(),
TestResult::Failed(failure) => quick_junit::TestCaseStatus::NonSuccess {
kind: quick_junit::NonSuccessKind::Failure,
message: Some(failure.overview()),
ty: None,
description: Some(failure.format(failure_format_options).into_owned()),
reruns: vec![],
},
TestResult::Cancelled => quick_junit::TestCaseStatus::NonSuccess {
kind: quick_junit::NonSuccessKind::Error,
message: Some("Cancelled".to_string()),
ty: None,
description: None,
reruns: vec![],
},
}
}
fn convert_step_status(
status: &TestStepResult,
failure_format_options: &TestFailureFormatOptions,
) -> quick_junit::TestCaseStatus {
match status {
TestStepResult::Ok => quick_junit::TestCaseStatus::success(),
TestStepResult::Ignored => quick_junit::TestCaseStatus::skipped(),
TestStepResult::Failed(failure) => {
let message = if failure_format_options.strip_ascii_color {
strip_ansi_codes(&failure.overview()).to_string()
} else {
failure.overview()
};
quick_junit::TestCaseStatus::NonSuccess {
kind: quick_junit::NonSuccessKind::Failure,
message: Some(message),
ty: None,
description: Some(
failure.format(failure_format_options).into_owned(),
),
reruns: vec![],
}
}
}
}
}
impl TestReporter for JunitTestReporter {
fn report_register(&mut self, description: &TestDescription) {
let mut case = quick_junit::TestCase::new(
description.name.clone(),
quick_junit::TestCaseStatus::skipped(),
);
case.classname = Some(to_relative_path_or_remote_url(
&self.cwd,
&description.location.file_name,
));
case.extra.insert(
String::from("line"),
description.location.line_number.to_string(),
);
case.extra.insert(
String::from("col"),
description.location.column_number.to_string(),
);
self.cases.insert(description.id, case);
self.test_name_tree.add_node(description.clone().into());
}
fn report_plan(&mut self, _plan: &TestPlan) {}
fn report_slow(&mut self, _description: &TestDescription, _elapsed: u64) {}
fn report_wait(&mut self, _description: &TestDescription) {}
fn report_output(&mut self, _output: &[u8]) {
/*
TODO(skycoop): Right now I can't include stdout/stderr in the report because
we have a global pair of output streams that don't differentiate between the
output of different tests. This is a nice to have feature, so we can come
back to it later
*/
}
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
elapsed: u64,
) {
if let Some(case) = self.cases.get_mut(&description.id) {
case.status = Self::convert_status(result, &self.failure_format_options);
case.set_time(Duration::from_millis(elapsed));
}
}
fn report_uncaught_error(&mut self, _origin: &str, _error: Box<JsError>) {}
fn report_step_register(&mut self, description: &TestStepDescription) {
self.test_name_tree.add_node(description.clone().into());
let test_case_name =
self.test_name_tree.construct_full_test_name(description.id);
let mut case = quick_junit::TestCase::new(
test_case_name,
quick_junit::TestCaseStatus::skipped(),
);
case.classname = Some(to_relative_path_or_remote_url(
&self.cwd,
&description.location.file_name,
));
case.extra.insert(
String::from("line"),
description.location.line_number.to_string(),
);
case.extra.insert(
String::from("col"),
description.location.column_number.to_string(),
);
self.cases.insert(description.id, case);
}
fn report_step_wait(&mut self, _description: &TestStepDescription) {}
fn report_step_result(
&mut self,
description: &TestStepDescription,
result: &TestStepResult,
elapsed: u64,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
if let Some(case) = self.cases.get_mut(&description.id) {
case.status =
Self::convert_step_status(result, &self.failure_format_options);
case.set_time(Duration::from_millis(elapsed));
}
}
fn report_summary(
&mut self,
_elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
}
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
for id in tests_pending {
if let Some(description) = tests.get(id) {
self.report_result(description, &TestResult::Cancelled, 0)
}
}
}
fn report_completed(&mut self) {
// TODO(mmastrac): This reporter does not handle stdout/stderr yet, and when we do, we may need to redirect
// pre-and-post-test output somewhere.
}
fn flush_report(
&mut self,
elapsed: &Duration,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()> {
let mut suites: IndexMap<String, quick_junit::TestSuite> = IndexMap::new();
for (id, case) in &self.cases {
let abs_filename = match (tests.get(id), test_steps.get(id)) {
(Some(test), _) => &test.location.file_name,
(_, Some(step)) => &step.location.file_name,
(None, None) => {
unreachable!("Unknown test ID '{id}' provided");
}
};
let filename = to_relative_path_or_remote_url(&self.cwd, abs_filename);
suites
.entry(filename.clone())
.and_modify(|s| {
s.add_test_case(case.clone());
})
.or_insert_with(|| {
let mut suite = quick_junit::TestSuite::new(filename);
suite.add_test_case(case.clone());
suite
});
}
let mut report = quick_junit::Report::new("deno test");
report
.set_time(*elapsed)
.add_test_suites(suites.into_values());
if self.output_path == "-" {
report
.serialize(std::io::stdout())
.with_context(|| "Failed to write JUnit report to stdout")?;
} else {
let file = crate::util::fs::create_file(Path::new(&self.output_path))
.context("Failed to open JUnit report file.")?;
report.serialize(file).with_context(|| {
format!("Failed to write JUnit report to {}", self.output_path)
})?;
}
Ok(())
}
}
#[derive(Debug, Default)]
struct TestNameTree(IndexMap<usize, TestNameTreeNode>);
impl TestNameTree {
fn new() -> Self {
// Pre-allocate some space to avoid excessive reallocations.
Self(IndexMap::with_capacity(256))
}
fn add_node(&mut self, node: TestNameTreeNode) {
self.0.insert(node.id, node);
}
/// Constructs the full test name by traversing the tree from the specified
/// node as a child to its parent nodes.
/// If the provided ID is not found in the tree, or the tree is broken (e.g.
/// a child node refers to a parent node that doesn't exist), this method
/// just panics.
fn construct_full_test_name(&self, id: usize) -> String {
let mut current_id = Some(id);
let mut name_pieces = VecDeque::new();
loop {
let Some(id) = current_id else {
break;
};
let Some(node) = self.0.get(&id) else {
// The ID specified as a parent node by the child node should exist in
// the tree, but it doesn't. In this case we give up constructing the
// full test name.
unreachable!("Unregistered test ID '{id}' provided");
};
name_pieces.push_front(node.test_name.as_str());
current_id = node.parent_id;
}
if name_pieces.is_empty() {
unreachable!("Unregistered test ID '{id}' provided");
}
let v: Vec<_> = name_pieces.into();
v.join(" > ")
}
}
#[derive(Debug)]
struct TestNameTreeNode {
id: usize,
parent_id: Option<usize>,
test_name: String,
}
impl From<TestDescription> for TestNameTreeNode {
fn from(description: TestDescription) -> Self {
Self {
id: description.id,
parent_id: None,
test_name: description.name,
}
}
}
impl From<TestStepDescription> for TestNameTreeNode {
fn from(description: TestStepDescription) -> Self {
Self {
id: description.id,
parent_id: Some(description.parent_id),
test_name: description.name,
}
}
}
#[cfg(test)]
mod tests {
use deno_core::error::JsStackFrame;
use super::*;
#[test]
fn construct_full_test_name_one_node() {
let mut tree = TestNameTree::new();
tree.add_node(TestNameTreeNode {
id: 0,
parent_id: None,
test_name: "root".to_string(),
});
assert_eq!(tree.construct_full_test_name(0), "root".to_string());
}
#[test]
fn construct_full_test_name_two_level_hierarchy() {
let mut tree = TestNameTree::new();
tree.add_node(TestNameTreeNode {
id: 0,
parent_id: None,
test_name: "root".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 1,
parent_id: Some(0),
test_name: "child".to_string(),
});
assert_eq!(tree.construct_full_test_name(0), "root".to_string());
assert_eq!(tree.construct_full_test_name(1), "root > child".to_string());
}
#[test]
fn construct_full_test_name_three_level_hierarchy() {
let mut tree = TestNameTree::new();
tree.add_node(TestNameTreeNode {
id: 0,
parent_id: None,
test_name: "root".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 1,
parent_id: Some(0),
test_name: "child".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 2,
parent_id: Some(1),
test_name: "grandchild".to_string(),
});
assert_eq!(tree.construct_full_test_name(0), "root".to_string());
assert_eq!(tree.construct_full_test_name(1), "root > child".to_string());
assert_eq!(
tree.construct_full_test_name(2),
"root > child > grandchild".to_string()
);
}
#[test]
fn construct_full_test_name_one_root_two_chains() {
// 0
// / \
// 1 2
// / \
// 3 4
let mut tree = TestNameTree::new();
tree.add_node(TestNameTreeNode {
id: 0,
parent_id: None,
test_name: "root".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 1,
parent_id: Some(0),
test_name: "child 1".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 2,
parent_id: Some(0),
test_name: "child 2".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 3,
parent_id: Some(1),
test_name: "grandchild 1".to_string(),
});
tree.add_node(TestNameTreeNode {
id: 4,
parent_id: Some(1),
test_name: "grandchild 2".to_string(),
});
assert_eq!(tree.construct_full_test_name(0), "root".to_string());
assert_eq!(
tree.construct_full_test_name(1),
"root > child 1".to_string(),
);
assert_eq!(
tree.construct_full_test_name(2),
"root > child 2".to_string(),
);
assert_eq!(
tree.construct_full_test_name(3),
"root > child 1 > grandchild 1".to_string(),
);
assert_eq!(
tree.construct_full_test_name(4),
"root > child 1 > grandchild 2".to_string(),
);
}
#[test]
fn escapes_short_failure_message() {
let jserror = JsError {
exception_message: "Uncaught Error: \x1b[31mtest error\x1b[0m"
.to_string(),
frames: vec![JsStackFrame::from_location(
Some("File name".to_string()),
Some(10),
Some(15),
)],
name: Some("Error".to_string()),
message: Some("test error".to_string()),
source_line: Some(""source \x1b[32mline\x1b[0m"".to_string()),
source_line_frame_index: Some(0),
stack: None,
cause: None,
aggregated: None,
additional_properties: vec![],
};
let step_result =
TestStepResult::Failed(TestFailure::JsError(Box::new(jserror)));
let step = JunitTestReporter::convert_step_status(
&step_result,
&TestFailureFormatOptions {
strip_ascii_color: true,
hide_stacktraces: false,
..Default::default()
},
);
if let quick_junit::TestCaseStatus::NonSuccess {
description,
message,
..
} = step
{
assert!(!description.unwrap().contains("\x1b"));
assert!(!message.unwrap().contains("\x1b"));
} else {
panic!("Expected NonSuccess status");
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/mod.rs | cli/tools/test/reporters/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use super::*;
mod common;
mod compound;
mod dot;
mod junit;
mod pretty;
mod tap;
pub use compound::CompoundTestReporter;
pub use dot::DotTestReporter;
pub use junit::JunitTestReporter;
pub use pretty::PrettyTestReporter;
pub use tap::TapTestReporter;
pub trait TestReporter {
fn report_register(&mut self, description: &TestDescription);
fn report_plan(&mut self, plan: &TestPlan);
fn report_wait(&mut self, description: &TestDescription);
fn report_slow(&mut self, description: &TestDescription, elapsed: u64);
fn report_output(&mut self, output: &[u8]);
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
elapsed: u64,
);
fn report_uncaught_error(&mut self, origin: &str, error: Box<JsError>);
fn report_step_register(&mut self, description: &TestStepDescription);
fn report_step_wait(&mut self, description: &TestStepDescription);
fn report_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
elapsed: u64,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
);
fn report_summary(
&mut self,
elapsed: &Duration,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
);
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
);
fn report_completed(&mut self);
fn flush_report(
&mut self,
elapsed: &Duration,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()>;
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/dot.rs | cli/tools/test/reporters/dot.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use super::common;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub struct DotTestReporter {
n: usize,
width: usize,
cwd: Url,
summary: TestSummary,
failure_format_options: TestFailureFormatOptions,
}
#[allow(clippy::print_stdout)]
impl DotTestReporter {
pub fn new(
cwd: Url,
failure_format_options: TestFailureFormatOptions,
) -> DotTestReporter {
let console_width = if let Some(size) = crate::util::console::console_size()
{
size.cols as usize
} else {
0
};
let console_width = (console_width as f32 * 0.8) as usize;
DotTestReporter {
n: 0,
width: console_width,
cwd,
summary: TestSummary::new(),
failure_format_options,
}
}
fn print_status(&mut self, status: String) {
// Non-TTY console prints every result on a separate line.
if self.width == 0 {
println!("{}", status);
return;
}
if self.n != 0 && self.n.is_multiple_of(self.width) {
println!();
}
self.n += 1;
print!("{}", status);
}
fn print_test_step_result(&mut self, result: &TestStepResult) {
let status = match result {
TestStepResult::Ok => fmt_ok(),
TestStepResult::Ignored => fmt_ignored(),
TestStepResult::Failed(_failure) => fmt_failed(),
};
self.print_status(status);
}
fn print_test_result(&mut self, result: &TestResult) {
let status = match result {
TestResult::Ok => fmt_ok(),
TestResult::Ignored => fmt_ignored(),
TestResult::Failed(_failure) => fmt_failed(),
TestResult::Cancelled => fmt_cancelled(),
};
self.print_status(status);
}
}
fn fmt_ok() -> String {
colors::gray(".").to_string()
}
fn fmt_ignored() -> String {
colors::cyan(",").to_string()
}
fn fmt_failed() -> String {
colors::red_bold("!").to_string()
}
fn fmt_cancelled() -> String {
colors::gray("!").to_string()
}
#[allow(clippy::print_stdout)]
impl TestReporter for DotTestReporter {
fn report_register(&mut self, _description: &TestDescription) {}
fn report_plan(&mut self, plan: &TestPlan) {
self.summary.total += plan.total;
self.summary.filtered_out += plan.filtered_out;
}
fn report_wait(&mut self, _description: &TestDescription) {
// flush for faster feedback when line buffered
std::io::stdout().flush().ok();
}
fn report_slow(&mut self, _description: &TestDescription, _elapsed: u64) {}
fn report_output(&mut self, _output: &[u8]) {}
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
_elapsed: u64,
) {
match &result {
TestResult::Ok => {
self.summary.passed += 1;
}
TestResult::Ignored => {
self.summary.ignored += 1;
}
TestResult::Failed(failure) => {
self.summary.failed += 1;
self
.summary
.failures
.push((description.into(), failure.clone()));
}
TestResult::Cancelled => {
self.summary.failed += 1;
}
}
self.print_test_result(result);
}
fn report_uncaught_error(&mut self, origin: &str, error: Box<JsError>) {
self.summary.failed += 1;
self
.summary
.uncaught_errors
.push((origin.to_string(), error));
println!(
"Uncaught error from {} {}",
to_relative_path_or_remote_url(&self.cwd, origin),
colors::red("FAILED")
);
}
fn report_step_register(&mut self, _description: &TestStepDescription) {}
fn report_step_wait(&mut self, _description: &TestStepDescription) {
// flush for faster feedback when line buffered
std::io::stdout().flush().ok();
}
fn report_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
_elapsed: u64,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
match &result {
TestStepResult::Ok => {
self.summary.passed_steps += 1;
}
TestStepResult::Ignored => {
self.summary.ignored_steps += 1;
}
TestStepResult::Failed(failure) => {
self.summary.failed_steps += 1;
self.summary.failures.push((
TestFailureDescription {
id: desc.id,
name: common::format_test_step_ancestry(desc, tests, test_steps),
origin: desc.origin.clone(),
location: desc.location.clone(),
},
failure.clone(),
))
}
}
self.print_test_step_result(result);
}
fn report_summary(
&mut self,
elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
common::report_summary(
&mut std::io::stdout(),
&self.cwd,
&self.summary,
elapsed,
&self.failure_format_options,
);
println!();
}
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
common::report_sigint(
&mut std::io::stdout(),
&self.cwd,
tests_pending,
tests,
test_steps,
);
}
fn report_completed(&mut self) {}
fn flush_report(
&mut self,
_elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()> {
Ok(())
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/common.rs | cli/tools/test/reporters/common.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use super::fmt::format_test_error;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub(super) fn format_test_step_ancestry(
desc: &TestStepDescription,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> String {
let root;
let mut ancestor_names = vec![];
let mut current_desc = desc;
loop {
if let Some(step_desc) = test_steps.get(¤t_desc.parent_id) {
ancestor_names.push(&step_desc.name);
current_desc = step_desc;
} else {
root = tests.get(¤t_desc.parent_id).unwrap();
break;
}
}
ancestor_names.reverse();
let mut result = String::new();
result.push_str(&root.name);
result.push_str(" ... ");
for name in ancestor_names {
result.push_str(name);
result.push_str(" ... ");
}
result.push_str(&desc.name);
result
}
pub fn format_test_for_summary(
cwd: &Url,
desc: &TestFailureDescription,
) -> String {
format!(
"{} {}",
&desc.name,
colors::gray(format!(
"=> {}:{}:{}",
to_relative_path_or_remote_url(cwd, &desc.location.file_name),
desc.location.line_number,
desc.location.column_number
))
)
}
pub fn format_test_step_for_summary(
cwd: &Url,
desc: &TestStepDescription,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> String {
let long_name = format_test_step_ancestry(desc, tests, test_steps);
format!(
"{} {}",
long_name,
colors::gray(format!(
"=> {}:{}:{}",
to_relative_path_or_remote_url(cwd, &desc.location.file_name),
desc.location.line_number,
desc.location.column_number
))
)
}
pub(super) fn report_sigint(
writer: &mut dyn std::io::Write,
cwd: &Url,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
if tests_pending.is_empty() {
return;
}
let mut formatted_pending = BTreeSet::new();
for id in tests_pending {
if let Some(desc) = tests.get(id) {
formatted_pending.insert(format_test_for_summary(cwd, &desc.into()));
}
if let Some(desc) = test_steps.get(id) {
formatted_pending
.insert(format_test_step_for_summary(cwd, desc, tests, test_steps));
}
}
writeln!(
writer,
"\n{} The following tests were pending:\n",
colors::intense_blue("SIGINT")
)
.ok();
for entry in formatted_pending {
writeln!(writer, "{}", entry).ok();
}
writeln!(writer).ok();
}
pub(super) fn report_summary(
writer: &mut dyn std::io::Write,
cwd: &Url,
summary: &TestSummary,
elapsed: &Duration,
options: &TestFailureFormatOptions,
) {
if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() {
#[allow(clippy::type_complexity)] // Type alias doesn't look better here
let mut failures_by_origin: BTreeMap<
String,
(
Vec<(&TestFailureDescription, &TestFailure)>,
Option<&JsError>,
),
> = BTreeMap::default();
let mut failure_titles = vec![];
for (description, failure) in &summary.failures {
let (failures, _) = failures_by_origin
.entry(description.origin.clone())
.or_default();
failures.push((description, failure));
}
for (origin, js_error) in &summary.uncaught_errors {
let (_, uncaught_error) =
failures_by_origin.entry(origin.clone()).or_default();
let _ = uncaught_error.insert(js_error.as_ref());
}
// note: the trailing whitespace is intentional to get a red background
writeln!(writer, "\n{}\n", colors::white_bold_on_red(" ERRORS ")).ok();
for (origin, (failures, uncaught_error)) in failures_by_origin {
for (description, failure) in failures {
if !failure.hide_in_summary() {
let failure_title = format_test_for_summary(cwd, description);
writeln!(writer, "{}", &failure_title).ok();
writeln!(
writer,
"{}: {}",
colors::red_bold("error"),
failure.format(options)
)
.ok();
writeln!(writer).ok();
failure_titles.push(failure_title);
}
}
if let Some(js_error) = uncaught_error {
let failure_title = format!(
"{} (uncaught error)",
to_relative_path_or_remote_url(cwd, &origin)
);
writeln!(writer, "{}", &failure_title).ok();
writeln!(
writer,
"{}: {}",
colors::red_bold("error"),
format_test_error(js_error, options)
)
.ok();
writeln!(writer, "This error was not caught from a test and caused the test runner to fail on the referenced module.").ok();
writeln!(writer, "It most likely originated from a dangling promise, event/timeout handler or top-level code.").ok();
writeln!(writer).ok();
failure_titles.push(failure_title);
}
}
// note: the trailing whitespace is intentional to get a red background
writeln!(writer, "{}\n", colors::white_bold_on_red(" FAILURES ")).ok();
for failure_title in failure_titles {
writeln!(writer, "{failure_title}").ok();
}
}
let status = if summary.has_failed() {
colors::red("FAILED").to_string()
} else {
colors::green("ok").to_string()
};
let get_steps_text = |count: usize| -> String {
if count == 0 {
String::new()
} else if count == 1 {
" (1 step)".to_string()
} else {
format!(" ({count} steps)")
}
};
let mut summary_result = String::new();
write!(
summary_result,
"{} passed{} | {} failed{}",
summary.passed,
get_steps_text(summary.passed_steps),
summary.failed,
get_steps_text(summary.failed_steps),
)
.ok();
let ignored_steps = get_steps_text(summary.ignored_steps);
if summary.ignored > 0 || !ignored_steps.is_empty() {
write!(
summary_result,
" | {} ignored{}",
summary.ignored, ignored_steps
)
.ok();
}
if summary.measured > 0 {
write!(summary_result, " | {} measured", summary.measured,).ok();
}
if summary.filtered_out > 0 {
write!(summary_result, " | {} filtered out", summary.filtered_out).ok();
};
writeln!(
writer,
"\n{} | {} {}",
status,
summary_result,
colors::gray(format!("({})", display::human_elapsed(elapsed.as_millis()))),
)
.ok();
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/compound.rs | cli/tools/test/reporters/compound.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use super::*;
pub struct CompoundTestReporter {
test_reporters: Vec<Box<dyn TestReporter>>,
}
impl CompoundTestReporter {
pub fn new(test_reporters: Vec<Box<dyn TestReporter>>) -> Self {
Self { test_reporters }
}
}
impl TestReporter for CompoundTestReporter {
fn report_register(&mut self, description: &TestDescription) {
for reporter in &mut self.test_reporters {
reporter.report_register(description);
}
}
fn report_plan(&mut self, plan: &TestPlan) {
for reporter in &mut self.test_reporters {
reporter.report_plan(plan);
}
}
fn report_wait(&mut self, description: &TestDescription) {
for reporter in &mut self.test_reporters {
reporter.report_wait(description);
}
}
fn report_slow(&mut self, description: &TestDescription, elapsed: u64) {
for reporter in &mut self.test_reporters {
reporter.report_slow(description, elapsed);
}
}
fn report_output(&mut self, output: &[u8]) {
for reporter in &mut self.test_reporters {
reporter.report_output(output);
}
}
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
elapsed: u64,
) {
for reporter in &mut self.test_reporters {
reporter.report_result(description, result, elapsed);
}
}
fn report_uncaught_error(&mut self, origin: &str, error: Box<JsError>) {
for reporter in &mut self.test_reporters {
reporter.report_uncaught_error(origin, error.clone());
}
}
fn report_step_register(&mut self, description: &TestStepDescription) {
for reporter in &mut self.test_reporters {
reporter.report_step_register(description)
}
}
fn report_step_wait(&mut self, description: &TestStepDescription) {
for reporter in &mut self.test_reporters {
reporter.report_step_wait(description)
}
}
fn report_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
elapsed: u64,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
for reporter in &mut self.test_reporters {
reporter.report_step_result(desc, result, elapsed, tests, test_steps);
}
}
fn report_summary(
&mut self,
elapsed: &Duration,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
for reporter in &mut self.test_reporters {
reporter.report_summary(elapsed, tests, test_steps);
}
}
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
for reporter in &mut self.test_reporters {
reporter.report_sigint(tests_pending, tests, test_steps);
}
}
fn report_completed(&mut self) {
for reporter in &mut self.test_reporters {
reporter.report_completed();
}
}
fn flush_report(
&mut self,
elapsed: &Duration,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()> {
let mut errors = vec![];
for reporter in &mut self.test_reporters {
if let Err(err) = reporter.flush_report(elapsed, tests, test_steps) {
errors.push(err)
}
}
if errors.is_empty() {
Ok(())
} else {
anyhow::bail!(
"error in one or more wrapped reporters:\n{}",
errors
.iter()
.enumerate()
.fold(String::new(), |acc, (i, err)| {
format!("{}Error #{}: {:?}\n", acc, i + 1, err)
})
)
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/test/reporters/pretty.rs | cli/tools/test/reporters/pretty.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use super::common;
use super::fmt::to_relative_path_or_remote_url;
use super::*;
pub struct PrettyTestReporter {
parallel: bool,
echo_output: bool,
in_new_line: bool,
phase: &'static str,
filter: bool,
repl: bool,
scope_test_id: Option<usize>,
cwd: Url,
did_have_user_output: bool,
started_tests: bool,
ended_tests: bool,
child_results_buffer:
HashMap<usize, IndexMap<usize, (TestStepDescription, TestStepResult, u64)>>,
summary: TestSummary,
writer: Box<dyn std::io::Write>,
failure_format_options: TestFailureFormatOptions,
}
impl PrettyTestReporter {
pub fn new(
parallel: bool,
echo_output: bool,
filter: bool,
repl: bool,
cwd: Url,
failure_format_options: TestFailureFormatOptions,
) -> PrettyTestReporter {
PrettyTestReporter {
parallel,
echo_output,
in_new_line: true,
phase: "",
filter,
repl,
scope_test_id: None,
cwd,
did_have_user_output: false,
started_tests: false,
ended_tests: false,
child_results_buffer: Default::default(),
summary: TestSummary::new(),
writer: Box::new(std::io::stdout()),
failure_format_options,
}
}
pub fn with_writer(self, writer: Box<dyn std::io::Write>) -> Self {
Self { writer, ..self }
}
fn force_report_wait(&mut self, description: &TestDescription) {
if !self.in_new_line {
writeln!(&mut self.writer).ok();
}
if self.parallel {
write!(
&mut self.writer,
"{}",
colors::gray(format!(
"{} => ",
to_relative_path_or_remote_url(&self.cwd, &description.origin)
))
)
.ok();
}
write!(&mut self.writer, "{} ...", description.name).ok();
self.in_new_line = false;
// flush for faster feedback when line buffered
std::io::stdout().flush().ok();
self.scope_test_id = Some(description.id);
}
fn force_report_step_wait(&mut self, description: &TestStepDescription) {
self.write_output_end();
if !self.in_new_line {
writeln!(&mut self.writer).ok();
}
write!(
&mut self.writer,
"{}{} ...",
" ".repeat(description.level),
description.name
)
.ok();
self.in_new_line = false;
// flush for faster feedback when line buffered
std::io::stdout().flush().ok();
self.scope_test_id = Some(description.id);
}
fn force_report_step_result(
&mut self,
description: &TestStepDescription,
result: &TestStepResult,
elapsed: u64,
) {
self.write_output_end();
if self.in_new_line || self.scope_test_id != Some(description.id) {
self.force_report_step_wait(description);
}
if !self.parallel {
let child_results = self
.child_results_buffer
.remove(&description.id)
.unwrap_or_default();
for (desc, result, elapsed) in child_results.values() {
self.force_report_step_result(desc, result, *elapsed);
}
if !child_results.is_empty() {
self.force_report_step_wait(description);
}
}
let status = match &result {
TestStepResult::Ok => colors::green("ok").to_string(),
TestStepResult::Ignored => colors::yellow("ignored").to_string(),
TestStepResult::Failed(failure) => failure.format_label(),
};
write!(&mut self.writer, " {status}").ok();
if let TestStepResult::Failed(failure) = result
&& let Some(inline_summary) = failure.format_inline_summary()
{
write!(&mut self.writer, " ({})", inline_summary).ok();
}
if !matches!(result, TestStepResult::Failed(TestFailure::Incomplete)) {
write!(
&mut self.writer,
" {}",
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
)
.ok();
}
writeln!(&mut self.writer).ok();
self.in_new_line = true;
if self.parallel {
self.scope_test_id = None;
} else {
self.scope_test_id = Some(description.parent_id);
}
self
.child_results_buffer
.entry(description.parent_id)
.or_default()
.shift_remove(&description.id);
}
fn write_output_end(&mut self) {
if self.did_have_user_output {
writeln!(
&mut self.writer,
"{}",
colors::gray(format!("----- {}output end -----", self.phase))
)
.ok();
self.in_new_line = true;
self.did_have_user_output = false;
}
}
}
impl TestReporter for PrettyTestReporter {
fn report_register(&mut self, _description: &TestDescription) {}
fn report_plan(&mut self, plan: &TestPlan) {
self.write_output_end();
self.summary.total += plan.total;
self.summary.filtered_out += plan.filtered_out;
if self.repl {
return;
}
if self.parallel || (self.filter && plan.total == 0) {
return;
}
let inflection = if plan.total == 1 { "test" } else { "tests" };
writeln!(
&mut self.writer,
"{}",
colors::gray(format!(
"running {} {} from {}",
plan.total,
inflection,
to_relative_path_or_remote_url(&self.cwd, &plan.origin)
))
)
.ok();
self.in_new_line = true;
}
fn report_wait(&mut self, description: &TestDescription) {
self.write_output_end();
if !self.parallel {
self.force_report_wait(description);
}
self.started_tests = true;
}
fn report_slow(&mut self, description: &TestDescription, elapsed: u64) {
writeln!(
&mut self.writer,
"{}",
colors::yellow_bold(format!(
"'{}' has been running for over {}",
description.name,
colors::gray(format!("({})", display::human_elapsed(elapsed.into()))),
))
)
.ok();
}
fn report_output(&mut self, output: &[u8]) {
if !self.echo_output {
return;
}
if !self.did_have_user_output {
self.did_have_user_output = true;
if !self.in_new_line {
writeln!(&mut self.writer).ok();
}
self.phase = if !self.started_tests {
"pre-test "
} else if self.ended_tests {
"post-test "
} else {
""
};
writeln!(
&mut self.writer,
"{}",
colors::gray(format!("------- {}output -------", self.phase))
)
.ok();
self.in_new_line = true;
}
// output everything to stdout in order to prevent
// stdout and stderr racing
std::io::stdout().write_all(output).ok();
}
fn report_result(
&mut self,
description: &TestDescription,
result: &TestResult,
elapsed: u64,
) {
match &result {
TestResult::Ok => {
self.summary.passed += 1;
}
TestResult::Ignored => {
self.summary.ignored += 1;
}
TestResult::Failed(failure) => {
self.summary.failed += 1;
self
.summary
.failures
.push((description.into(), failure.clone()));
}
TestResult::Cancelled => {
self.summary.failed += 1;
}
}
if self.parallel {
self.force_report_wait(description);
}
self.write_output_end();
if self.in_new_line || self.scope_test_id != Some(description.id) {
self.force_report_wait(description);
}
let status = match result {
TestResult::Ok => colors::green("ok").to_string(),
TestResult::Ignored => colors::yellow("ignored").to_string(),
TestResult::Failed(failure) => failure.format_label(),
TestResult::Cancelled => colors::gray("cancelled").to_string(),
};
write!(&mut self.writer, " {status}").ok();
if let TestResult::Failed(failure) = result
&& let Some(inline_summary) = failure.format_inline_summary()
{
write!(&mut self.writer, " ({})", inline_summary).ok();
}
writeln!(
&mut self.writer,
" {}",
colors::gray(format!("({})", display::human_elapsed(elapsed.into())))
)
.ok();
self.in_new_line = true;
self.scope_test_id = None;
}
fn report_uncaught_error(&mut self, origin: &str, error: Box<JsError>) {
self.summary.failed += 1;
self
.summary
.uncaught_errors
.push((origin.to_string(), error));
if !self.in_new_line {
writeln!(&mut self.writer).ok();
}
writeln!(
&mut self.writer,
"Uncaught error from {} {}",
to_relative_path_or_remote_url(&self.cwd, origin),
colors::red("FAILED")
)
.ok();
self.in_new_line = true;
self.did_have_user_output = false;
}
fn report_step_register(&mut self, _description: &TestStepDescription) {}
fn report_step_wait(&mut self, description: &TestStepDescription) {
if !self.parallel && self.scope_test_id == Some(description.parent_id) {
self.force_report_step_wait(description);
}
}
fn report_step_result(
&mut self,
desc: &TestStepDescription,
result: &TestStepResult,
elapsed: u64,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
match &result {
TestStepResult::Ok => {
self.summary.passed_steps += 1;
}
TestStepResult::Ignored => {
self.summary.ignored_steps += 1;
}
TestStepResult::Failed(failure) => {
self.summary.failed_steps += 1;
self.summary.failures.push((
TestFailureDescription {
id: desc.id,
name: common::format_test_step_ancestry(desc, tests, test_steps),
origin: desc.origin.clone(),
location: desc.location.clone(),
},
failure.clone(),
))
}
}
if self.parallel {
self.write_output_end();
write!(
&mut self.writer,
"{} {} ...",
colors::gray(format!(
"{} =>",
to_relative_path_or_remote_url(&self.cwd, &desc.origin)
)),
common::format_test_step_ancestry(desc, tests, test_steps)
)
.ok();
self.in_new_line = false;
self.scope_test_id = Some(desc.id);
self.force_report_step_result(desc, result, elapsed);
} else {
let sibling_results =
self.child_results_buffer.entry(desc.parent_id).or_default();
if self.scope_test_id == Some(desc.id)
|| self.scope_test_id == Some(desc.parent_id)
{
let sibling_results = std::mem::take(sibling_results);
self.force_report_step_result(desc, result, elapsed);
// Flush buffered sibling results.
for (desc, result, elapsed) in sibling_results.values() {
self.force_report_step_result(desc, result, *elapsed);
}
} else {
sibling_results
.insert(desc.id, (desc.clone(), result.clone(), elapsed));
}
}
}
fn report_summary(
&mut self,
elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) {
self.write_output_end();
common::report_summary(
&mut self.writer,
&self.cwd,
&self.summary,
elapsed,
&self.failure_format_options,
);
if !self.repl {
writeln!(&mut self.writer).ok();
}
self.in_new_line = true;
}
fn report_sigint(
&mut self,
tests_pending: &HashSet<usize>,
tests: &IndexMap<usize, TestDescription>,
test_steps: &IndexMap<usize, TestStepDescription>,
) {
common::report_sigint(
&mut self.writer,
&self.cwd,
tests_pending,
tests,
test_steps,
);
self.in_new_line = true;
}
fn report_completed(&mut self) {
self.write_output_end();
self.ended_tests = true;
}
fn flush_report(
&mut self,
_elapsed: &Duration,
_tests: &IndexMap<usize, TestDescription>,
_test_steps: &IndexMap<usize, TestStepDescription>,
) -> anyhow::Result<()> {
self.writer.flush().ok();
Ok(())
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/cache_deps.rs | cli/tools/pm/cache_deps.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::collections::HashSet;
use std::sync::Arc;
use deno_core::error::AnyError;
use deno_core::futures::StreamExt;
use deno_core::futures::stream::FuturesUnordered;
use deno_core::url::Url;
use deno_graph::JsrPackageReqNotFoundError;
use deno_graph::packages::JsrPackageVersionInfo;
use deno_npm_installer::PackageCaching;
use deno_npm_installer::graph::NpmCachingStrategy;
use deno_semver::Version;
use deno_semver::jsr::JsrPackageReqReference;
use deno_semver::npm::NpmPackageReqReference;
use crate::factory::CliFactory;
use crate::graph_container::ModuleGraphContainer;
use crate::graph_container::ModuleGraphUpdatePermit;
use crate::graph_util::BuildGraphRequest;
use crate::graph_util::BuildGraphWithNpmOptions;
pub struct CacheTopLevelDepsOptions {
pub lockfile_only: bool,
}
pub async fn cache_top_level_deps(
// todo(dsherret): don't pass the factory into this function. Instead use ctor deps
factory: &CliFactory,
jsr_resolver: Option<Arc<crate::jsr::JsrFetchResolver>>,
options: CacheTopLevelDepsOptions,
) -> Result<(), AnyError> {
let _clear_guard = factory
.text_only_progress_bar()
.deferred_keep_initialize_alive();
let npm_installer = factory.npm_installer().await?;
npm_installer
.ensure_top_level_package_json_install()
.await?;
if let Some(lockfile) = factory.maybe_lockfile().await? {
lockfile.error_if_changed()?;
}
// cache as many entries in the import map as we can
let resolver = factory.workspace_resolver().await?;
let mut maybe_graph_error = Ok(());
if let Some(import_map) = resolver.maybe_import_map() {
let jsr_resolver = if let Some(resolver) = jsr_resolver {
resolver
} else {
Arc::new(crate::jsr::JsrFetchResolver::new(
factory.file_fetcher()?.clone(),
factory.jsr_version_resolver()?.clone(),
))
};
let mut graph_permit = factory
.main_module_graph_container()
.await?
.acquire_update_permit()
.await;
let graph = graph_permit.graph_mut();
if let Some(lockfile) = factory.maybe_lockfile().await? {
lockfile.fill_graph(graph);
}
let mut roots = Vec::new();
let mut info_futures = FuturesUnordered::new();
let mut seen_reqs = HashSet::new();
let workspace_npm_packages = resolver
.package_jsons()
.filter_map(|pkg_json| {
pkg_json
.name
.as_deref()
.and_then(|name| Some((name, pkg_json.version.as_deref()?)))
})
.collect::<HashMap<_, _>>();
let workspace_jsr_packages = resolver.jsr_packages();
for entry in import_map.imports().entries().chain(
import_map
.scopes()
.flat_map(|scope| scope.imports.entries()),
) {
let Some(specifier) = entry.value else {
continue;
};
match specifier.scheme() {
"jsr" => {
let specifier_str = specifier.as_str();
let Ok(req_ref) = JsrPackageReqReference::from_str(specifier_str)
else {
continue;
};
if workspace_jsr_packages
.iter()
.any(|pkg| pkg.matches_req(req_ref.req()))
{
// do not install a workspace jsr package
continue;
}
if let Some(sub_path) = req_ref.sub_path() {
if sub_path.ends_with('/') {
continue;
}
roots.push(specifier.clone());
continue;
}
if !seen_reqs.insert(req_ref.req().clone()) {
continue;
}
let resolved_req = graph.packages.mappings().get(req_ref.req());
let resolved_req = resolved_req.and_then(|nv| {
// the version might end up being upgraded to a newer version that's already in
// the graph (due to a reverted change), in which case our exports could end up
// being wrong. to avoid that, see if there's a newer version that matches the version
// req.
let versions =
graph.packages.versions_by_name(&req_ref.req().name)?;
let mut best = nv;
for version in versions {
if version.version > best.version
&& req_ref.req().version_req.matches(&version.version)
{
best = version;
}
}
Some(best)
});
let jsr_resolver = jsr_resolver.clone();
info_futures.push(async move {
let nv = if let Some(nv) = resolved_req {
Cow::Borrowed(nv)
} else if let Some(nv) =
jsr_resolver.req_to_nv(req_ref.req()).await?
{
Cow::Owned(nv)
} else {
return Result::<
Option<(Url, Arc<JsrPackageVersionInfo>)>,
JsrPackageReqNotFoundError,
>::Ok(None);
};
if let Some(info) = jsr_resolver.package_version_info(&nv).await {
return Ok(Some((specifier.clone(), info)));
}
Ok(None)
});
}
"npm" => {
let Ok(req_ref) =
NpmPackageReqReference::from_str(specifier.as_str())
else {
continue;
};
let version = workspace_npm_packages.get(&*req_ref.req().name);
if let Some(version) = version {
let Ok(version) = Version::parse_from_npm(version) else {
continue;
};
let version_req = &req_ref.req().version_req;
if version_req.tag().is_none() && version_req.matches(&version) {
// if version req matches the workspace package's version, use that
// (so it doesn't need to be installed)
continue;
}
}
roots.push(specifier.clone())
}
_ => {
if entry.key.ends_with('/') && specifier.as_str().ends_with('/') {
continue;
}
if specifier.scheme() == "file"
&& let Ok(path) = specifier.to_file_path()
&& !path.is_file()
{
continue;
}
roots.push(specifier.clone());
}
}
}
while let Some(info_future) = info_futures.next().await {
if let Some((specifier, info)) = info_future? {
let exports = info.exports();
for (k, _) in exports {
if let Ok(spec) = specifier.join(k) {
roots.push(spec);
}
}
}
}
drop(info_futures);
let graph_builder = factory.module_graph_builder().await?;
graph_builder
.build_graph_with_npm_resolution(
graph,
BuildGraphWithNpmOptions {
request: BuildGraphRequest::Roots(roots.clone()),
loader: None,
is_dynamic: false,
npm_caching: NpmCachingStrategy::Manual,
},
)
.await?;
maybe_graph_error =
graph_builder.graph_roots_valid(graph, &roots, true, true);
}
if options.lockfile_only {
// do a resolution install if the npm snapshot is in a
// pending state due to a config file change
npm_installer.install_resolution_if_pending().await?;
} else {
npm_installer.cache_packages(PackageCaching::All).await?;
}
maybe_graph_error?;
Ok(())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/audit.rs | cli/tools/pm/audit.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::Write;
use std::sync::Arc;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures;
use deno_core::futures::FutureExt;
use deno_core::futures::StreamExt;
use deno_core::serde_json;
use deno_npm::resolution::NpmResolutionSnapshot;
use deno_resolver::npmrc::npm_registry_url;
use eszip::v2::Url;
use http::header::HeaderName;
use http::header::HeaderValue;
use serde::Deserialize;
use serde::Serialize;
use crate::args::AuditFlags;
use crate::args::Flags;
use crate::colors;
use crate::factory::CliFactory;
use crate::http_util;
use crate::http_util::HttpClient;
use crate::http_util::HttpClientProvider;
use crate::sys::CliSys;
pub async fn audit(
flags: Arc<Flags>,
audit_flags: AuditFlags,
) -> Result<i32, AnyError> {
let factory = CliFactory::from_flags(flags);
let workspace = factory.workspace_resolver().await?;
let npm_resolver = factory.npm_resolver().await?;
let npm_resolver = npm_resolver.as_managed().unwrap();
let snapshot = npm_resolver.resolution().snapshot();
let sys = CliSys::default();
let npm_url = npm_registry_url(&sys);
let http_provider = HttpClientProvider::new(None, None);
let http_client = http_provider
.get_or_create()
.context("Failed to create HTTP client")?;
let use_socket = audit_flags.socket;
let r = npm::call_audits_api(
audit_flags,
npm_url,
workspace,
&snapshot,
http_client,
)
.await?;
if use_socket {
socket_dev::call_firewall_api(
&snapshot,
http_provider.get_or_create().unwrap(),
)
.await?;
}
Ok(r)
}
mod npm {
use std::collections::HashMap;
use std::collections::HashSet;
use deno_npm::NpmPackageId;
use deno_package_json::PackageJsonDepValue;
use deno_resolver::workspace::WorkspaceResolver;
use deno_semver::package::PackageNv;
use super::*;
use crate::sys::CliSys;
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
enum AdvisorySeverity {
Low,
Moderate,
High,
Critical,
}
impl AdvisorySeverity {
fn parse(str_: &str) -> Option<Self> {
match str_ {
"low" => Some(Self::Low),
"moderate" => Some(Self::Moderate),
"high" => Some(Self::High),
"critical" => Some(Self::Critical),
_ => None,
}
}
}
fn get_dependency_descriptors_for_deps(
seen: &mut HashSet<PackageNv>,
all_dependencies_snapshot: &NpmResolutionSnapshot,
dev_dependencies_snapshot: &NpmResolutionSnapshot,
package_id: &NpmPackageId,
) -> HashMap<String, Box<DependencyDescriptor>> {
let mut is_dev = false;
let resolution_package =
match dev_dependencies_snapshot.package_from_id(package_id) {
Some(p) => {
is_dev = true;
p
}
None => all_dependencies_snapshot
.package_from_id(package_id)
.unwrap(),
};
let mut deps_map =
HashMap::with_capacity(resolution_package.dependencies.len());
for dep in resolution_package.dependencies.iter() {
if !seen.insert(dep.1.nv.clone()) {
continue;
}
let dep_deps = get_dependency_descriptors_for_deps(
seen,
all_dependencies_snapshot,
dev_dependencies_snapshot,
dep.1,
);
deps_map.insert(
dep.0.to_string(),
Box::new(DependencyDescriptor {
version: dep.1.nv.version.to_string(),
dev: is_dev,
requires: dep_deps
.iter()
.map(|(k, v)| (k.to_string(), v.version.to_string()))
.collect(),
dependencies: dep_deps,
}),
);
}
deps_map
}
pub async fn call_audits_api_inner(
client: &HttpClient,
npm_url: Url,
body: serde_json::Value,
) -> Result<AuditResponse, AnyError> {
let url = npm_url.join("/-/npm/v1/security/audits").unwrap();
let future = client.post_json(url, &body)?.send().boxed_local();
let response = future.await?;
let json_str = http_util::body_to_string(response)
.await
.context("Failed to read response from the npm registry API")?;
let response: AuditResponse = serde_json::from_str(&json_str)
.context("Failed to deserialize response from the npm registry API")?;
Ok(response)
}
/// Partition into as few groups as possible so that no partition
/// contains two entries with the same `name`.
pub fn partition_packages<'a>(
pkgs: &'a [&NpmPackageId],
) -> Vec<Vec<&'a NpmPackageId>> {
// 1) Group by name
let mut by_name: HashMap<&str, Vec<&NpmPackageId>> = HashMap::new();
for p in pkgs {
by_name.entry(&p.nv.name[..]).or_default().push(p);
}
// 2) The minimal number of partitions is the max multiplicity per name
let k = by_name.values().map(|v| v.len()).max().unwrap_or(0);
if k == 0 {
return Vec::new();
}
// 3) Create k partitions
let mut partitions: Vec<Vec<&NpmPackageId>> = vec![Vec::new(); k];
// 4) Round-robin each name-group across the partitions
for group in by_name.values() {
for (i, item) in group.iter().enumerate() {
partitions[i].push(*item);
}
}
partitions
}
/// Merges multiple audit responses into a single consolidated response
fn merge_responses(responses: Vec<AuditResponse>) -> AuditResponse {
let mut merged_advisories = HashMap::new();
let mut merged_actions = Vec::new();
let mut total_low = 0;
let mut total_moderate = 0;
let mut total_high = 0;
let mut total_critical = 0;
for response in responses {
// Merge advisories (HashMap by advisory ID)
for (id, advisory) in response.advisories {
merged_advisories.insert(id, advisory);
}
// Merge actions
merged_actions.extend(response.actions);
// Sum up vulnerability counts
total_low += response.metadata.vulnerabilities.low;
total_moderate += response.metadata.vulnerabilities.moderate;
total_high += response.metadata.vulnerabilities.high;
total_critical += response.metadata.vulnerabilities.critical;
}
AuditResponse {
advisories: merged_advisories,
actions: merged_actions,
metadata: AuditMetadata {
vulnerabilities: AuditVulnerabilities {
low: total_low,
moderate: total_moderate,
high: total_high,
critical: total_critical,
},
},
}
}
pub async fn call_audits_api(
audit_flags: AuditFlags,
npm_url: Url,
workspace: &WorkspaceResolver<CliSys>,
npm_resolution_snapshot: &NpmResolutionSnapshot,
client: HttpClient,
) -> Result<i32, AnyError> {
let top_level_packages = npm_resolution_snapshot
.top_level_packages()
.collect::<Vec<_>>();
// In deno.json users might define two different versions of the same package - so we need
// to partition top level packages into buckets, to check all versions used.
let top_level_packages_partitions = partition_packages(&top_level_packages);
let mut requires = HashMap::with_capacity(top_level_packages.len());
let mut dependencies = HashMap::with_capacity(top_level_packages.len());
// Collect all dev dependencies, so they can be properly marked in the request body - since
// there's no way to specify `devDependencies` in `deno.json`, this is only iterating
// through discovered `package.json` files.
let mut all_dev_deps = Vec::with_capacity(32);
for pkg_json in workspace.package_jsons() {
let deps = pkg_json.resolve_local_package_json_deps();
for v in deps.dev_dependencies.values() {
let Ok(PackageJsonDepValue::Req(package_req)) = v else {
continue;
};
all_dev_deps.push(package_req.clone());
}
}
let dev_dependencies_snapshot =
npm_resolution_snapshot.subset(&all_dev_deps);
let mut responses = Vec::with_capacity(top_level_packages_partitions.len());
// And now let's construct the request body we need for the npm audits API.
let seen = &mut HashSet::with_capacity(top_level_packages.len() * 100);
for partition in top_level_packages_partitions {
for package in partition {
let is_dev =
dev_dependencies_snapshot.package_from_id(package).is_some();
requires
.insert(package.nv.name.to_string(), package.nv.version.to_string());
seen.insert(package.nv.clone());
let package_deps = get_dependency_descriptors_for_deps(
seen,
npm_resolution_snapshot,
&dev_dependencies_snapshot,
package,
);
dependencies.insert(
package.nv.name.to_string(),
Box::new(DependencyDescriptor {
version: package.nv.version.to_string(),
dev: is_dev,
requires: package_deps
.iter()
.map(|(k, v)| (k.to_string(), v.version.to_string()))
.collect(),
dependencies: package_deps,
}),
);
}
let body = serde_json::json!({
"dev": false,
"install": [],
"metadata": {},
"remove": [],
"requires": requires,
"dependencies": dependencies,
});
let r = call_audits_api_inner(&client, npm_url.clone(), body).await;
let audit_response: AuditResponse = match r {
Ok(s) => s,
Err(err) => {
if audit_flags.ignore_registry_errors {
log::error!("Failed to get data from the registry: {}", err);
return Ok(0);
} else {
return Err(err);
}
}
};
responses.push(audit_response);
}
// Merge all responses into a single response
let response = merge_responses(responses);
let vulns = response.metadata.vulnerabilities;
if vulns.total() == 0 {
_ = writeln!(&mut std::io::stdout(), "No known vulnerabilities found",);
return Ok(0);
}
let mut advisories = response.advisories.values().collect::<Vec<_>>();
advisories.sort_by_cached_key(|adv| {
format!("{}@{}", adv.module_name, adv.vulnerable_versions)
});
let minimal_severity =
AdvisorySeverity::parse(&audit_flags.severity).unwrap();
print_report(
vulns,
advisories,
response.actions,
minimal_severity,
audit_flags.ignore_unfixable,
);
Ok(1)
}
fn print_report(
vulns: AuditVulnerabilities,
advisories: Vec<&AuditAdvisory>,
actions: Vec<AuditAction>,
minimal_severity: AdvisorySeverity,
ignore_unfixable: bool,
) {
let stdout = &mut std::io::stdout();
for adv in advisories {
let Some(severity) = AdvisorySeverity::parse(&adv.severity) else {
continue;
};
if severity < minimal_severity {
continue;
}
let actions = adv.find_actions(&actions);
if actions.is_empty() && ignore_unfixable {
continue;
}
_ = writeln!(stdout, "╭ {}", colors::bold(adv.title.to_string()));
_ = writeln!(
stdout,
"│ {} {}",
colors::gray("Severity:"),
match severity {
AdvisorySeverity::Low => colors::bold("low"),
AdvisorySeverity::Moderate => colors::yellow("moderate"),
AdvisorySeverity::High => colors::red("high"),
AdvisorySeverity::Critical => colors::red("critical"),
}
);
_ = writeln!(
stdout,
"│ {} {}",
colors::gray("Package:"),
adv.module_name
);
_ = writeln!(
stdout,
"│ {} {}",
colors::gray("Vulnerable:"),
adv.vulnerable_versions
);
_ = writeln!(
stdout,
"│ {} {}",
colors::gray("Patched:"),
adv.patched_versions
);
if let Some(finding) = adv.findings.first()
&& let Some(path) = finding.paths.first()
{
_ = writeln!(stdout, "│ {} {}", colors::gray("Path:"), path);
}
if actions.is_empty() {
_ = writeln!(stdout, "╰ {} {}", colors::gray("Info:"), adv.url);
} else {
_ = writeln!(stdout, "│ {} {}", colors::gray("Info:"), adv.url);
}
if actions.len() == 1 {
_ =
writeln!(stdout, "╰ {} {}", colors::gray("Actions:"), actions[0]);
} else if actions.len() > 1 {
_ =
writeln!(stdout, "│ {} {}", colors::gray("Actions:"), actions[0]);
for action in &actions[0..actions.len() - 2] {
_ = writeln!(stdout, "│ {}", action);
}
_ = writeln!(stdout, "╰ {}", actions[actions.len() - 1]);
}
_ = writeln!(stdout);
}
_ = writeln!(
stdout,
"Found {} vulnerabilities",
colors::red(vulns.total()),
);
_ = writeln!(
stdout,
"Severity: {} {}, {} {}, {} {}, {} {}",
colors::bold(vulns.low),
colors::bold("low"),
colors::yellow(vulns.moderate),
colors::yellow("moderate"),
colors::red(vulns.high),
colors::red("high"),
colors::red(vulns.critical),
colors::red("critical"),
);
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct DependencyDescriptor {
version: String,
dev: bool,
requires: HashMap<String, String>,
dependencies: HashMap<String, Box<DependencyDescriptor>>,
}
#[derive(Debug, Deserialize)]
pub struct AuditActionResolve {
pub id: i32,
pub path: Option<String>,
// TODO(bartlomieju): currently not used, commented out so it's not flagged by clippy
// pub dev: bool,
// pub optional: bool,
// pub bundled: bool,
}
#[derive(Debug, Deserialize)]
pub struct AuditAction {
#[serde(rename = "isMajor", default)]
pub is_major: bool,
pub action: String,
pub resolves: Vec<AuditActionResolve>,
pub module: Option<String>,
pub target: Option<String>,
}
#[derive(Debug, Deserialize)]
pub struct AdvisoryFinding {
// TODO(bartlomieju): currently not used, commented out so it's not flagged by clippy
// pub version: String,
pub paths: Vec<String>,
}
#[derive(Debug, Deserialize)]
pub struct AuditAdvisory {
pub id: i32,
pub title: String,
pub findings: Vec<AdvisoryFinding>,
// TODO(bartlomieju): currently not used, commented out so it's not flagged by clippy
// pub cves: Vec<String>,
// pub cwe: Vec<String>,
pub severity: String,
pub url: String,
pub module_name: String,
pub vulnerable_versions: String,
pub patched_versions: String,
}
impl AuditAdvisory {
fn find_actions(&self, actions: &[AuditAction]) -> Vec<String> {
let mut acts = Vec::new();
for action in actions {
if !action.resolves.iter().any(|r| r.id == self.id) {
continue;
}
let module = action
.module
.as_deref()
.map(str::to_owned)
.or_else(|| {
// Fallback to infer from dependency path
action.resolves.first().and_then(|r| {
r.path
.as_deref()
.and_then(|p| p.split('>').next_back())
.map(|s| s.trim().to_string())
})
})
.unwrap_or_else(|| "<unknown>".to_string());
let target = action
.target
.as_deref()
.map(|t| format!("@{}", t))
.unwrap_or_default();
let major = if action.is_major {
" (major upgrade)"
} else {
""
};
acts.push(format!("{} {}{}{}", action.action, module, target, major));
}
acts
}
}
#[derive(Debug, Deserialize)]
pub struct AuditVulnerabilities {
pub low: i32,
pub moderate: i32,
pub high: i32,
pub critical: i32,
}
impl AuditVulnerabilities {
fn total(&self) -> i32 {
self.low + self.moderate + self.high + self.critical
}
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct AuditMetadata {
pub vulnerabilities: AuditVulnerabilities,
// TODO(bartlomieju): currently not used, commented out so it's not flagged by clippy
// pub dependencies: i32,
// pub dev_dependencies: i32,
// pub optional_dependencies: i32,
// pub total_dependencies: i32,
}
#[derive(Debug, Deserialize)]
pub struct AuditResponse {
pub actions: Vec<AuditAction>,
pub advisories: HashMap<i32, AuditAdvisory>,
pub metadata: AuditMetadata,
}
}
mod socket_dev {
#![allow(dead_code)]
use super::*;
pub async fn call_firewall_api(
npm_resolution_snapshot: &NpmResolutionSnapshot,
client: HttpClient,
) -> Result<(), AnyError> {
let purls = npm_resolution_snapshot
.all_packages_for_every_system()
.map(|package| {
format!("pkg:npm/{}@{}", package.id.nv.name, package.id.nv.version)
})
.collect::<Vec<_>>();
let api_key = std::env::var("SOCKET_API_KEY").ok();
let mut purl_responses = if let Some(api_key) = api_key {
call_authenticated_api(&client, &purls, &api_key).await?
} else {
call_unauthenticated_api(&client, &purls).await?
};
purl_responses.sort_by_cached_key(|r| r.name.to_string());
print_firewall_report(&purl_responses);
Ok(())
}
async fn call_authenticated_api(
client: &HttpClient,
purls: &[String],
api_key: &str,
) -> Result<Vec<FirewallResponse>, AnyError> {
let socket_dev_url =
std::env::var("SOCKET_DEV_URL").ok().unwrap_or_else(|| {
"https://api.socket.dev/v0/purl?actions=error,warn".to_string()
});
let url = Url::parse(&socket_dev_url).unwrap();
let body = serde_json::json!({
"components": purls.iter().map(|purl| {
serde_json::json!({ "purl": purl })
}).collect::<Vec<_>>()
});
let auth_value = HeaderValue::from_str(&format!("Bearer {}", api_key))
.context("Failed to create Authorization header")?;
let request = client
.post_json(url, &body)?
.header(HeaderName::from_static("authorization"), auth_value);
let response = request.send().boxed_local().await?;
let text = http_util::body_to_string(response).await?;
// Response is nJSON
let responses = text
.lines()
.filter(|line| !line.trim().is_empty())
.map(|line| {
serde_json::from_str::<FirewallResponse>(line)
.context("Failed to parse Socket.dev response")
})
.collect::<Result<Vec<_>, _>>()?;
Ok(responses)
}
async fn call_unauthenticated_api(
client: &HttpClient,
purls: &[String],
) -> Result<Vec<FirewallResponse>, AnyError> {
let socket_dev_url = std::env::var("SOCKET_DEV_URL")
.ok()
.unwrap_or_else(|| "https://firewall-api.socket.dev/".to_string());
let futures = purls
.iter()
.map(|purl| {
let url = Url::parse(&format!(
"{}purl/{}",
socket_dev_url,
percent_encoding::utf8_percent_encode(
purl,
percent_encoding::NON_ALPHANUMERIC
)
))
.unwrap();
client.download_text(url).boxed_local()
})
.collect::<Vec<_>>();
let purl_results = futures::stream::iter(futures)
.buffer_unordered(20)
.collect::<Vec<_>>()
.await;
let responses = purl_results
.into_iter()
.filter_map(|result| match result {
Ok(a) => Some(a),
Err(err) => {
log::error!("Failed to get PURL result {:?}", err);
None
}
})
.filter_map(|json_response| {
match serde_json::from_str::<FirewallResponse>(&json_response) {
Ok(response) => Some(response),
Err(err) => {
log::error!("Failed deserializing socket.dev response {:?}", err);
None
}
}
})
.collect::<Vec<_>>();
Ok(responses)
}
fn print_firewall_report(responses: &[FirewallResponse]) {
let stdout = &mut std::io::stdout();
let responses_with_alerts = responses
.iter()
.filter(|r| !r.alerts.is_empty())
.collect::<Vec<_>>();
if responses_with_alerts.is_empty() {
return;
}
_ = writeln!(stdout);
_ = writeln!(stdout, "{}", colors::bold("Socket.dev firewall report"));
_ = writeln!(stdout);
// Count total alerts by severity
let mut total_critical = 0;
let mut total_high = 0;
let mut total_medium = 0;
let mut total_low = 0;
let mut packages_with_issues = 0;
for response in responses_with_alerts {
packages_with_issues += 1;
_ = writeln!(stdout, "╭ pkg:npm/{}@{}", response.name, response.version);
if let Some(score) = &response.score {
_ = writeln!(
stdout,
"│ {:<20} {:>3}",
colors::gray("Supply Chain Risk:"),
format_score(score.supply_chain)
);
_ = writeln!(
stdout,
"│ {:<20} {:>3}",
colors::gray("Maintenance:"),
format_score(score.maintenance)
);
_ = writeln!(
stdout,
"│ {:<20} {:>3}",
colors::gray("Quality:"),
format_score(score.quality)
);
_ = writeln!(
stdout,
"│ {:<20} {:>3}",
colors::gray("Vulnerabilities:"),
format_score(score.vulnerability)
);
_ = writeln!(
stdout,
"│ {:<20} {:>3}",
colors::gray("License:"),
format_score(score.license)
);
}
// critical and high are counted as one for display.
let mut critical_count = 0;
let mut medium_count = 0;
let mut low_count = 0;
for alert in &response.alerts {
match alert.severity.as_str() {
"critical" => {
total_critical += 1;
critical_count += 1;
}
"high" => {
total_high += 1;
critical_count += 1;
}
"medium" => {
total_medium += 1;
medium_count += 1;
}
"low" => {
total_low += 1;
low_count += 1;
}
_ => {}
}
}
if !response.alerts.is_empty() {
let alerts_str = response
.alerts
.iter()
.map(|alert| {
let severity_bracket = match alert.severity.as_str() {
"critical" => colors::red("critical").to_string(),
"high" => colors::red("high").to_string(),
"medium" => colors::yellow("medium").to_string(),
"low" => "low".to_string(),
_ => alert.severity.clone(),
};
format!("[{}] {}", severity_bracket, alert.r#type)
})
.collect::<Vec<_>>()
.join(", ");
let label = format!(
"Alerts ({}/{}/{}):",
critical_count, medium_count, low_count
);
_ = writeln!(stdout, "╰ {:<20} {}", colors::gray(&label), alerts_str);
} else {
_ = writeln!(stdout, "╰");
}
_ = writeln!(stdout);
}
let total_alerts = total_critical + total_high + total_medium + total_low;
if total_alerts == 0 && packages_with_issues == 0 {
_ = writeln!(stdout, "No security alerts found from Socket.dev");
return;
}
if total_alerts > 0 {
_ = writeln!(
stdout,
"Found {} alerts across {} packages",
colors::red(total_alerts),
colors::bold(packages_with_issues)
);
_ = writeln!(
stdout,
"Severity: {} {}, {} {}, {} {}, {} {}",
colors::bold(total_low),
colors::bold("low"),
colors::yellow(total_medium),
colors::yellow("medium"),
colors::red(total_high),
colors::red("high"),
colors::red(total_critical),
colors::red("critical"),
);
}
}
fn format_score(score: f64) -> String {
let percentage = (score * 100.0) as i32;
let colored = if percentage >= 80 {
colors::green(percentage)
} else if percentage >= 60 {
colors::yellow(percentage)
} else {
colors::red(percentage)
};
format!("{}", colored)
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct FirewallScore {
pub license: f64,
pub maintenance: f64,
pub overall: f64,
pub quality: f64,
pub supply_chain: f64,
pub vulnerability: f64,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct FirewallAlert {
pub r#type: String,
pub action: String,
pub severity: String,
pub category: String,
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct FirewallResponse {
pub id: String,
pub name: String,
pub version: String,
pub score: Option<FirewallScore>,
#[serde(default)]
pub alerts: Vec<FirewallAlert>,
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/approve_scripts.rs | cli/tools/pm/approve_scripts.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashSet;
use std::fmt::Write as _;
use std::sync::Arc;
use console_static_text::TextItem;
use deno_config::deno_json::AllowScriptsConfig;
use deno_config::deno_json::AllowScriptsValueConfig;
use deno_core::anyhow;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_path_util::url_to_file_path;
use deno_semver::jsr::JsrDepPackageReq;
use deno_semver::jsr::JsrDepPackageReqParseError;
use deno_semver::package::PackageKind;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
use deno_terminal::colors;
use jsonc_parser::json;
use super::CacheTopLevelDepsOptions;
use crate::args::ApproveScriptsFlags;
use crate::args::Flags;
use crate::factory::CliFactory;
use crate::npm::CliNpmResolver;
use crate::tools::pm::ConfigKind;
use crate::tools::pm::ConfigUpdater;
use crate::tools::pm::create_deno_json;
use crate::tools::pm::interactive_picker;
struct ScriptCandidate {
req: PackageReq,
specifier: String,
scripts: Vec<String>,
}
pub async fn approve_scripts(
flags: Arc<Flags>,
approve_flags: ApproveScriptsFlags,
) -> Result<(), AnyError> {
let mut factory = CliFactory::from_flags(flags.clone());
let mut options = factory.cli_options()?;
if options.start_dir.member_or_root_deno_json().is_none() {
factory = create_deno_json(&flags, options)?;
options = factory.cli_options()?;
}
let deno_json = options.workspace().root_deno_json().ok_or_else(|| {
anyhow::anyhow!("A deno.json file could not be found or created")
})?;
let deno_json_path = url_to_file_path(&deno_json.specifier)?;
let mut config_updater =
ConfigUpdater::new(ConfigKind::DenoJson, deno_json_path.clone())?;
let allow_scripts_config = deno_json.to_allow_scripts_config()?;
let (mut allow_list, mut deny_list) = match allow_scripts_config.allow {
AllowScriptsValueConfig::All => {
log::info!(
"Lifecycle scripts are already allowed for all npm packages in the workspace.",
);
return Ok(());
}
AllowScriptsValueConfig::Limited(list) => (list, allow_scripts_config.deny),
};
let mut existing_allowed: HashSet<PackageReq> =
allow_list.iter().map(|req| req.req.clone()).collect();
let deny_reqs: Vec<PackageReq> =
deny_list.iter().map(|req| req.req.clone()).collect();
let (approvals, denials) = if !approve_flags.packages.is_empty() {
(
parse_user_packages(&approve_flags.packages, &mut existing_allowed)?,
Vec::new(),
)
} else {
let npm_resolver = factory.npm_resolver().await?;
let candidates = find_script_candidates(
npm_resolver,
&flags.subcommand.npm_system_info(),
&allow_list,
&deny_reqs,
)?;
if candidates.is_empty() {
log::info!("No npm packages with lifecycle scripts need approval.");
return Ok(());
}
let chosen = pick_candidates(&candidates, &mut existing_allowed)?;
(chosen.approved, chosen.denied)
};
if approvals.is_empty() && denials.is_empty() {
log::info!("No new packages to approve.");
return Ok(());
}
for req in &approvals {
allow_list.push(JsrDepPackageReq::npm(req.clone()));
}
for req in &denials {
deny_list.push(JsrDepPackageReq::npm(req.clone()));
}
if !approvals.is_empty() {
deny_list.retain(|entry| {
!(entry.kind == PackageKind::Npm && approvals.contains(&entry.req))
});
}
allow_list.sort_by_key(|a| a.to_string());
allow_list.dedup_by(|a, b| a.req == b.req && a.kind == b.kind);
deny_list.sort_by_key(|a| a.to_string());
deny_list.dedup_by(|a, b| a.req == b.req && a.kind == b.kind);
let updated_allow_scripts = AllowScriptsConfig {
allow: AllowScriptsValueConfig::Limited(allow_list),
deny: deny_list,
};
let allow_scripts_value = allow_scripts_to_value(&updated_allow_scripts);
config_updater.set_allow_scripts_value(allow_scripts_value);
config_updater.commit()?;
for req in denials {
log::info!(
"{} {}{}",
colors::yellow("Denied"),
colors::gray("npm:"),
req
)
}
for req in approvals.iter() {
log::info!(
"{} {}{}",
colors::green("Approved"),
colors::gray("npm:"),
req
);
}
super::npm_install_after_modification(
flags,
None,
CacheTopLevelDepsOptions {
lockfile_only: approve_flags.lockfile_only,
},
)
.await?;
for req in approvals {
log::info!(
"{} {}{}",
colors::cyan("Ran build script"),
colors::gray("npm:"),
req
);
}
Ok(())
}
fn parse_user_packages(
packages: &[String],
existing_allowed: &mut HashSet<PackageReq>,
) -> Result<Vec<PackageReq>, AnyError> {
let mut additions = Vec::new();
for raw in packages {
let req = parse_npm_package_req(raw)
.with_context(|| format!("Failed to parse package: {}", raw))?;
if existing_allowed.insert(req.clone()) {
additions.push(req);
}
}
Ok(additions)
}
fn find_script_candidates(
npm_resolver: &CliNpmResolver,
system_info: &deno_npm::NpmSystemInfo,
allow_list: &[JsrDepPackageReq],
deny_list: &[PackageReq],
) -> Result<Vec<ScriptCandidate>, AnyError> {
let managed_resolver = npm_resolver.as_managed().with_context(|| {
"Lifecycle script approval requires an npm resolution. Run `deno install` first to create one."
})?;
let snapshot = managed_resolver.resolution().snapshot();
let mut candidates = Vec::new();
let mut seen = HashSet::<PackageNv>::new();
for package in snapshot.all_system_packages(system_info) {
if !package.has_scripts {
continue;
}
if !seen.insert(package.id.nv.clone()) {
continue;
}
if allow_list
.iter()
.any(|req| package_req_matches_nv(&req.req, &package.id.nv))
{
continue;
}
if deny_list
.iter()
.any(|req| package_req_matches_nv(req, &package.id.nv))
{
continue;
}
let specifier =
format!("npm:{}@{}", package.id.nv.name, package.id.nv.version);
let req = PackageReq::from_str(&format!(
"{}@{}",
package.id.nv.name, package.id.nv.version
))?;
let mut scripts = package
.extra
.as_ref()
.map(|extra| {
let mut names = extra
.scripts
.keys()
.map(|k| k.to_string())
.collect::<Vec<_>>();
names.sort();
names
})
.unwrap_or_default();
scripts.dedup();
candidates.push(ScriptCandidate {
req,
specifier,
scripts,
});
}
candidates.sort_by(|a, b| a.specifier.cmp(&b.specifier));
Ok(candidates)
}
#[derive(Default, Debug)]
struct ChosenCandidates {
approved: Vec<PackageReq>,
denied: Vec<PackageReq>,
}
fn pick_candidates(
candidates: &[ScriptCandidate],
existing_allowed: &mut HashSet<PackageReq>,
) -> Result<ChosenCandidates, AnyError> {
if candidates.is_empty() {
return Ok(ChosenCandidates {
denied: candidates.iter().map(|c| c.req.clone()).collect(),
..Default::default()
});
}
let selected = interactive_picker::select_items(
"Select which packages to approve lifecycle scripts for (<space> to select, ↑/↓/j/k to navigate, a to select all, i to invert selection, enter to accept, <Ctrl-c> to cancel)",
candidates,
HashSet::new(),
|_idx, is_selected, is_checked, candidate| {
render_candidate(candidate, is_selected, is_checked)
},
)?;
let Some(selected) = selected else {
return Ok(ChosenCandidates::default());
};
let mut approvals = Vec::with_capacity(selected.len());
let mut denials = Vec::with_capacity(candidates.len() - selected.len());
for (idx, candidate) in candidates.iter().enumerate() {
if selected.contains(&idx) {
if existing_allowed.insert(candidate.req.clone()) {
approvals.push(candidate.req.clone());
}
} else {
denials.push(candidate.req.clone());
}
}
Ok(ChosenCandidates {
approved: approvals,
denied: denials,
})
}
fn allow_scripts_to_value(
config: &AllowScriptsConfig,
) -> jsonc_parser::cst::CstInputValue {
let deny: Vec<String> = config.deny.iter().map(|r| r.to_string()).collect();
match &config.allow {
AllowScriptsValueConfig::All => {
if deny.is_empty() {
json!(true)
} else {
json!({ "allow": true, "deny": deny })
}
}
AllowScriptsValueConfig::Limited(reqs) => {
let allow: Vec<String> = reqs.iter().map(|req| req.to_string()).collect();
if deny.is_empty() {
json!(allow)
} else {
json!({ "allow": allow, "deny": deny })
}
}
}
}
fn parse_npm_package_req(text: &str) -> Result<PackageReq, AnyError> {
let req = match JsrDepPackageReq::from_str_loose(text) {
Ok(JsrDepPackageReq {
kind: PackageKind::Jsr,
..
}) => {
bail!("Only npm packages are supported: {}", text);
}
Ok(
req @ JsrDepPackageReq {
kind: PackageKind::Npm,
..
},
) => req,
Err(JsrDepPackageReqParseError::NotExpectedScheme(_))
if !text.contains(':') =>
{
return parse_npm_package_req(&format!("npm:{text}"));
}
Err(e) => return Err(e.into()),
};
if req.req.version_req.tag().is_some() {
bail!("Tags are not supported in the allowScripts field: {}", text);
}
Ok(req.req)
}
fn package_req_matches_nv(req: &PackageReq, nv: &PackageNv) -> bool {
req.name == nv.name && req.version_req.matches(&nv.version)
}
fn render_candidate(
candidate: &ScriptCandidate,
is_selected: bool,
is_checked: bool,
) -> Result<TextItem<'static>, AnyError> {
let mut line = String::new();
write!(
&mut line,
"{} {} {}",
if is_selected {
colors::intense_blue("❯").to_string()
} else {
" ".to_string()
},
if is_checked { "●" } else { "○" },
candidate.specifier
)?;
if !candidate.scripts.is_empty() {
write!(
&mut line,
" {}",
colors::gray(format!("scripts: {}", candidate.scripts.join(", ")))
)?;
}
Ok(TextItem::with_hanging_indent_owned(line, 2))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/deps.rs | cli/tools/pm/deps.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use deno_ast::ModuleSpecifier;
use deno_config::deno_json::ConfigFile;
use deno_config::deno_json::ConfigFileRc;
use deno_config::workspace::Workspace;
use deno_config::workspace::WorkspaceDirectory;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_core::futures::StreamExt;
use deno_core::futures::future::try_join;
use deno_core::futures::stream::FuturesOrdered;
use deno_core::futures::stream::FuturesUnordered;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_graph::JsrPackageReqNotFoundError;
use deno_graph::packages::JsrPackageVersionInfo;
use deno_npm::resolution::NpmVersionResolver;
use deno_package_json::PackageJsonDepsMap;
use deno_package_json::PackageJsonRc;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_semver::StackString;
use deno_semver::Version;
use deno_semver::VersionReq;
use deno_semver::jsr::JsrPackageReqReference;
use deno_semver::npm::NpmPackageReqReference;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
use deno_semver::package::PackageReqReference;
use import_map::ImportMap;
use import_map::ImportMapWithDiagnostics;
use import_map::SpecifierMapEntry;
use tokio::sync::Semaphore;
use super::ConfigUpdater;
use crate::args::CliLockfile;
use crate::graph_container::MainModuleGraphContainer;
use crate::graph_container::ModuleGraphContainer;
use crate::graph_container::ModuleGraphUpdatePermit;
use crate::jsr::JsrFetchResolver;
use crate::module_loader::ModuleLoadPreparer;
use crate::npm::CliNpmInstaller;
use crate::npm::CliNpmResolver;
use crate::npm::NpmFetchResolver;
use crate::util::progress_bar::ProgressBar;
use crate::util::sync::AtomicFlag;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ImportMapKind {
Inline,
Outline(PathBuf),
}
#[derive(Clone)]
pub enum DepLocation {
DenoJson(ConfigFileRc, KeyPath, ImportMapKind),
PackageJson(PackageJsonRc, KeyPath),
}
impl DepLocation {
pub fn is_deno_json(&self) -> bool {
matches!(self, DepLocation::DenoJson(..))
}
pub fn file_path(&self) -> Cow<'_, std::path::Path> {
match self {
DepLocation::DenoJson(arc, _, kind) => match kind {
ImportMapKind::Inline => {
Cow::Owned(arc.specifier.to_file_path().unwrap())
}
ImportMapKind::Outline(path) => Cow::Borrowed(path.as_path()),
},
DepLocation::PackageJson(arc, _) => Cow::Borrowed(arc.path.as_ref()),
}
}
fn config_kind(&self) -> super::ConfigKind {
match self {
DepLocation::DenoJson(_, _, _) => super::ConfigKind::DenoJson,
DepLocation::PackageJson(_, _) => super::ConfigKind::PackageJson,
}
}
}
struct DebugAdapter<T>(T);
impl std::fmt::Debug for DebugAdapter<&ConfigFileRc> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ConfigFile")
.field("specifier", &self.0.specifier)
.finish()
}
}
impl std::fmt::Debug for DebugAdapter<&PackageJsonRc> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PackageJson")
.field("path", &self.0.path)
.finish()
}
}
impl std::fmt::Debug for DepLocation {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DepLocation::DenoJson(arc, key_path, kind) => {
let mut debug = f.debug_tuple("DenoJson");
debug
.field(&DebugAdapter(arc))
.field(key_path)
.field(kind)
.finish()
}
DepLocation::PackageJson(arc, key_path) => {
let mut debug = f.debug_tuple("PackageJson");
debug.field(&DebugAdapter(arc)).field(key_path).finish()
}
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum DepKind {
Jsr,
Npm,
}
impl DepKind {
pub fn scheme(&self) -> &'static str {
match self {
DepKind::Npm => "npm",
DepKind::Jsr => "jsr",
}
}
}
#[derive(Clone, Debug)]
pub enum KeyPart {
Imports,
Scopes,
Dependencies,
DevDependencies,
String(StackString),
}
impl From<PackageJsonDepKind> for KeyPart {
fn from(value: PackageJsonDepKind) -> Self {
match value {
PackageJsonDepKind::Normal => Self::Dependencies,
PackageJsonDepKind::Dev => Self::DevDependencies,
}
}
}
impl KeyPart {
pub fn as_str(&self) -> &str {
match self {
KeyPart::Imports => "imports",
KeyPart::Scopes => "scopes",
KeyPart::Dependencies => "dependencies",
KeyPart::DevDependencies => "devDependencies",
KeyPart::String(s) => s.as_str(),
}
}
}
#[derive(Clone, Debug)]
pub struct KeyPath {
pub parts: Vec<KeyPart>,
}
impl KeyPath {
fn from_parts(parts: impl IntoIterator<Item = KeyPart>) -> Self {
Self {
parts: parts.into_iter().collect(),
}
}
fn last(&self) -> Option<&KeyPart> {
self.parts.last()
}
fn push(&mut self, part: KeyPart) {
self.parts.push(part)
}
}
#[derive(Clone, Debug)]
pub struct Dep {
pub req: PackageReq,
pub kind: DepKind,
pub location: DepLocation,
#[allow(dead_code)]
pub id: DepId,
#[allow(dead_code)]
pub alias: Option<String>,
}
impl Dep {
pub fn alias_or_name(&self) -> &str {
self.alias.as_deref().unwrap_or_else(|| &self.req.name)
}
}
fn import_map_entries(
import_map: &ImportMap,
) -> impl Iterator<Item = (KeyPath, SpecifierMapEntry<'_>)> {
import_map
.imports()
.entries()
.map(|entry| {
(
KeyPath::from_parts([
KeyPart::Imports,
KeyPart::String(entry.raw_key.into()),
]),
entry,
)
})
.chain(import_map.scopes().flat_map(|scope| {
let path = KeyPath::from_parts([
KeyPart::Scopes,
KeyPart::String(scope.raw_key.into()),
]);
scope.imports.entries().map(move |entry| {
let mut full_path = path.clone();
full_path.push(KeyPart::String(entry.raw_key.into()));
(full_path, entry)
})
}))
}
fn to_import_map_value_from_imports(
deno_json: &ConfigFile,
) -> serde_json::Value {
let mut value = serde_json::Map::with_capacity(2);
if let Some(imports) = &deno_json.json.imports {
value.insert("imports".to_string(), imports.clone());
}
if let Some(scopes) = &deno_json.json.scopes {
value.insert("scopes".to_string(), scopes.clone());
}
serde_json::Value::Object(value)
}
fn deno_json_import_map(
deno_json: &ConfigFile,
) -> Result<Option<(ImportMapWithDiagnostics, ImportMapKind)>, AnyError> {
let (value, kind) = if deno_json.json.imports.is_some()
|| deno_json.json.scopes.is_some()
{
(
to_import_map_value_from_imports(deno_json),
ImportMapKind::Inline,
)
} else {
match deno_json.to_import_map_path()? {
Some(path) => {
let err_context = || {
format!(
"loading import map at '{}' (from \"importMap\" field in '{}')",
path.display(),
deno_json.specifier
)
};
let text = std::fs::read_to_string(&path).with_context(err_context)?;
let value = serde_json::from_str(&text).with_context(err_context)?;
(value, ImportMapKind::Outline(path))
}
None => return Ok(None),
}
};
import_map::parse_from_value(deno_json.specifier.clone(), value)
.map_err(Into::into)
.map(|import_map| Some((import_map, kind)))
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum PackageJsonDepKind {
Normal,
Dev,
}
fn add_deps_from_deno_json(
deno_json: &Arc<ConfigFile>,
mut filter: impl DepFilter,
deps: &mut Vec<Dep>,
) {
let (import_map, import_map_kind) = match deno_json_import_map(deno_json) {
Ok(Some((import_map, import_map_kind))) => (import_map, import_map_kind),
Ok(None) => return,
Err(e) => {
log::warn!("failed to parse imports from {}: {e}", &deno_json.specifier);
return;
}
};
for (key_path, entry) in import_map_entries(&import_map.import_map) {
let Some(value) = entry.value else { continue };
let kind = match value.scheme() {
"npm" => DepKind::Npm,
"jsr" => DepKind::Jsr,
_ => continue,
};
let req = match parse_req_reference(value.as_str(), kind) {
Ok(req) => req.req,
Err(err) => {
log::warn!("failed to parse package req \"{}\": {err}", value.as_str());
continue;
}
};
let alias: &str = key_path.last().unwrap().as_str().trim_end_matches('/');
let alias = (alias != req.name).then(|| alias.to_string());
if !filter.should_include(alias.as_deref(), &req, kind) {
continue;
}
let id = DepId(deps.len());
deps.push(Dep {
location: DepLocation::DenoJson(
deno_json.clone(),
key_path,
import_map_kind.clone(),
),
kind,
req,
id,
alias,
});
}
}
fn add_deps_from_package_json(
package_json: &PackageJsonRc,
filter: impl DepFilter,
deps: &mut Vec<Dep>,
) {
let package_json_deps = package_json.resolve_local_package_json_deps();
fn iterate(
package_json: &PackageJsonRc,
mut filter: impl DepFilter,
package_dep_kind: PackageJsonDepKind,
package_json_deps: &PackageJsonDepsMap,
deps: &mut Vec<Dep>,
) {
for (k, v) in package_json_deps {
let v = match v {
Ok(v) => v,
Err(e) => {
log::warn!("bad package json dep value: {e}");
continue;
}
};
match v {
deno_package_json::PackageJsonDepValue::File(_)
| deno_package_json::PackageJsonDepValue::JsrReq(_) => {
// ignore
}
deno_package_json::PackageJsonDepValue::Req(req) => {
let alias = k.as_str();
let alias = (alias != req.name).then(|| alias.to_string());
if !filter.should_include(alias.as_deref(), req, DepKind::Npm) {
continue;
}
let id = DepId(deps.len());
deps.push(Dep {
id,
kind: DepKind::Npm,
location: DepLocation::PackageJson(
package_json.clone(),
KeyPath::from_parts([
package_dep_kind.into(),
KeyPart::String(k.clone()),
]),
),
req: req.clone(),
alias,
})
}
deno_package_json::PackageJsonDepValue::Workspace(_) => continue,
}
}
}
iterate(
package_json,
filter,
PackageJsonDepKind::Normal,
&package_json_deps.dependencies,
deps,
);
iterate(
package_json,
filter,
PackageJsonDepKind::Dev,
&package_json_deps.dev_dependencies,
deps,
);
}
fn deps_from_workspace(
workspace: &Arc<Workspace>,
dep_filter: impl DepFilter,
) -> Result<Vec<Dep>, AnyError> {
let mut deps = Vec::with_capacity(256);
for deno_json in workspace.deno_jsons() {
add_deps_from_deno_json(deno_json, dep_filter, &mut deps);
}
for package_json in workspace.package_jsons() {
add_deps_from_package_json(package_json, dep_filter, &mut deps);
}
Ok(deps)
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct DepId(usize);
#[derive(Debug, Clone)]
pub enum Change {
Update(DepId, VersionReq),
}
pub trait DepFilter: Copy {
fn should_include(
&mut self,
alias: Option<&str>,
package_req: &PackageReq,
dep_kind: DepKind,
) -> bool;
}
impl<T> DepFilter for T
where
T: FnMut(Option<&str>, &PackageReq, DepKind) -> bool + Copy,
{
fn should_include<'a>(
&mut self,
alias: Option<&'a str>,
package_req: &'a PackageReq,
dep_kind: DepKind,
) -> bool {
(*self)(alias, package_req, dep_kind)
}
}
#[derive(Clone, Debug)]
pub struct PackageLatestVersion {
pub semver_compatible: Option<PackageNv>,
pub latest: Option<PackageNv>,
}
pub struct DepManager {
deps: Vec<Dep>,
resolved_versions: Vec<Option<PackageNv>>,
latest_versions: Vec<PackageLatestVersion>,
pending_changes: Vec<Change>,
dependencies_resolved: AtomicFlag,
module_load_preparer: Arc<ModuleLoadPreparer>,
// TODO(nathanwhit): probably shouldn't be pub
pub(crate) jsr_fetch_resolver: Arc<JsrFetchResolver>,
pub(crate) npm_fetch_resolver: Arc<NpmFetchResolver>,
npm_resolver: CliNpmResolver,
npm_version_resolver: Arc<NpmVersionResolver>,
npm_installer: Arc<CliNpmInstaller>,
permissions_container: PermissionsContainer,
progress_bar: ProgressBar,
main_module_graph_container: Arc<MainModuleGraphContainer>,
lockfile: Option<Arc<CliLockfile>>,
}
pub struct DepManagerArgs {
pub module_load_preparer: Arc<ModuleLoadPreparer>,
pub jsr_fetch_resolver: Arc<JsrFetchResolver>,
pub npm_fetch_resolver: Arc<NpmFetchResolver>,
pub npm_installer: Arc<CliNpmInstaller>,
pub npm_resolver: CliNpmResolver,
pub npm_version_resolver: Arc<NpmVersionResolver>,
pub permissions_container: PermissionsContainer,
pub progress_bar: ProgressBar,
pub main_module_graph_container: Arc<MainModuleGraphContainer>,
pub lockfile: Option<Arc<CliLockfile>>,
}
impl DepManager {
pub fn reloaded_after_modification(self, args: DepManagerArgs) -> Self {
let mut new = Self::with_deps_args(self.deps, args);
new.latest_versions = self.latest_versions;
new
}
fn with_deps_args(deps: Vec<Dep>, args: DepManagerArgs) -> Self {
let DepManagerArgs {
module_load_preparer,
jsr_fetch_resolver,
npm_fetch_resolver,
npm_installer,
npm_resolver,
npm_version_resolver,
progress_bar,
permissions_container,
main_module_graph_container,
lockfile,
} = args;
Self {
deps,
resolved_versions: Vec::new(),
latest_versions: Vec::new(),
jsr_fetch_resolver,
dependencies_resolved: AtomicFlag::lowered(),
module_load_preparer,
npm_fetch_resolver,
npm_installer,
npm_resolver,
npm_version_resolver,
progress_bar,
permissions_container,
main_module_graph_container,
lockfile,
pending_changes: Vec::new(),
}
}
pub fn from_workspace_dir(
workspace_dir: &Arc<WorkspaceDirectory>,
dep_filter: impl DepFilter,
args: DepManagerArgs,
) -> Result<Self, AnyError> {
let mut deps = Vec::with_capacity(256);
if let Some(deno_json) = workspace_dir.member_deno_json() {
if deno_json.specifier.scheme() != "file" {
bail!("remote deno.json files are not supported");
}
let path = deno_json.specifier.to_file_path().unwrap();
if path.parent().unwrap() == workspace_dir.dir_path() {
add_deps_from_deno_json(deno_json, dep_filter, &mut deps);
}
}
if let Some(package_json) = workspace_dir.member_pkg_json() {
add_deps_from_package_json(package_json, dep_filter, &mut deps);
}
Ok(Self::with_deps_args(deps, args))
}
pub fn from_workspace(
workspace: &Arc<Workspace>,
dep_filter: impl DepFilter,
args: DepManagerArgs,
) -> Result<Self, AnyError> {
let deps = deps_from_workspace(workspace, dep_filter)?;
Ok(Self::with_deps_args(deps, args))
}
async fn run_dependency_resolution(&self) -> Result<(), AnyError> {
if self.dependencies_resolved.is_raised() {
return Ok(());
}
let _clear_guard = self.progress_bar.deferred_keep_initialize_alive();
let mut graph_permit = self
.main_module_graph_container
.acquire_update_permit()
.await;
let graph = graph_permit.graph_mut();
// populate the information from the lockfile
if let Some(lockfile) = &self.lockfile {
lockfile.fill_graph(graph);
}
let npm_resolver = self.npm_resolver.as_managed().unwrap();
if self.deps.iter().all(|dep| match dep.kind {
DepKind::Npm => npm_resolver
.resolution()
.resolve_pkg_id_from_pkg_req(&dep.req)
.is_ok(),
DepKind::Jsr => graph.packages.mappings().contains_key(&dep.req),
}) {
self.dependencies_resolved.raise();
graph_permit.commit();
return Ok(());
}
self
.npm_installer
.ensure_top_level_package_json_install()
.await?;
let mut roots = Vec::new();
let mut info_futures = FuturesUnordered::new();
for dep in &self.deps {
if dep.location.is_deno_json() {
match dep.kind {
DepKind::Npm => roots.push(
ModuleSpecifier::parse(&format!("npm:/{}/", dep.req)).unwrap(),
),
DepKind::Jsr => {
let resolved_nv = graph.packages.mappings().get(&dep.req);
let resolved_nv = resolved_nv
.and_then(|nv| {
let versions =
graph.packages.versions_by_name(&dep.req.name)?;
let mut best = nv;
for version in versions {
if version.version > best.version
&& dep.req.version_req.matches(&version.version)
{
best = version;
}
}
Some(best)
})
.cloned();
info_futures.push(async {
let nv = if let Some(nv) = resolved_nv {
nv
} else if let Some(nv) =
self.jsr_fetch_resolver.req_to_nv(&dep.req).await?
{
nv
} else {
return Result::<
Option<(Url, Arc<JsrPackageVersionInfo>)>,
JsrPackageReqNotFoundError,
>::Ok(None);
};
if let Some(info) =
self.jsr_fetch_resolver.package_version_info(&nv).await
{
let specifier =
ModuleSpecifier::parse(&format!("jsr:/{}/", &dep.req))
.unwrap();
return Ok(Some((specifier, info)));
}
Ok(None)
});
}
}
}
}
while let Some(info_future) = info_futures.next().await {
if let Some((specifier, info)) = info_future? {
let exports = info.exports();
for (k, _) in exports {
if let Ok(spec) = specifier.join(k) {
roots.push(spec);
}
}
}
}
self
.module_load_preparer
.prepare_module_load(
graph,
&roots,
crate::module_loader::PrepareModuleLoadOptions {
is_dynamic: false,
lib: deno_config::workspace::TsTypeLib::DenoWindow,
permissions: self.permissions_container.clone(),
ext_overwrite: None,
allow_unknown_media_types: true,
skip_graph_roots_validation: false,
},
)
.await?;
self.dependencies_resolved.raise();
graph_permit.commit();
Ok(())
}
pub fn resolved_version(&self, id: DepId) -> Option<&PackageNv> {
self.resolved_versions[id.0].as_ref()
}
pub async fn resolve_current_versions(&mut self) -> Result<(), AnyError> {
self.run_dependency_resolution().await?;
let graph = self.main_module_graph_container.graph();
let mut resolved = Vec::with_capacity(self.deps.len());
let snapshot = self
.npm_resolver
.as_managed()
.unwrap()
.resolution()
.snapshot();
let resolved_npm = snapshot.package_reqs();
let resolved_jsr = graph.packages.mappings();
for dep in &self.deps {
match dep.kind {
DepKind::Npm => {
let resolved_version = resolved_npm.get(&dep.req).cloned();
resolved.push(resolved_version);
}
DepKind::Jsr => {
let resolved_version = resolved_jsr.get(&dep.req).cloned();
resolved.push(resolved_version)
}
}
}
self.resolved_versions = resolved;
Ok(())
}
async fn load_latest_versions(
&self,
) -> Result<Vec<PackageLatestVersion>, AnyError> {
if self.latest_versions.len() == self.deps.len() {
return Ok(self.latest_versions.clone());
}
let mut latest_versions = Vec::with_capacity(self.deps.len());
let npm_sema = Semaphore::new(32);
let jsr_sema = Semaphore::new(32);
let mut futs = FuturesOrdered::new();
for dep in &self.deps {
match dep.kind {
DepKind::Npm => futs.push_back(
async {
let semver_req = &dep.req;
let _permit = npm_sema.acquire().await;
let mut semver_compatible = self
.npm_fetch_resolver
.req_to_nv(semver_req)
.await
.ok()
.flatten();
let info =
self.npm_fetch_resolver.package_info(&semver_req.name).await;
let latest = info
.and_then(|info| {
let version_resolver =
self.npm_version_resolver.get_for_package(&info);
let latest_tag = info.dist_tags.get("latest")?;
let can_use_latest = version_resolver
.version_req_satisfies_and_matches_newest_dependency_date(
&semver_req.version_req,
latest_tag,
)
.ok()?;
if can_use_latest {
semver_compatible = Some(PackageNv {
name: semver_req.name.clone(),
version: latest_tag.clone(),
});
return Some(latest_tag.clone());
}
let lower_bound = &semver_compatible.as_ref()?.version;
let latest_matches_newest_dep_date =
version_resolver.matches_newest_dependency_date(latest_tag);
if latest_matches_newest_dep_date && latest_tag >= lower_bound {
Some(latest_tag.clone())
} else {
latest_version(
if latest_matches_newest_dep_date {
Some(latest_tag)
} else {
None
},
version_resolver.applicable_version_infos().filter_map(
|version_info| {
if version_info.deprecated.is_none() {
Some(&version_info.version)
} else {
None
}
},
),
)
}
})
.map(|version| PackageNv {
name: semver_req.name.clone(),
version,
});
PackageLatestVersion {
latest,
semver_compatible,
}
}
.boxed_local(),
),
DepKind::Jsr => futs.push_back(
async {
let semver_req = &dep.req;
let _permit = jsr_sema.acquire().await;
let semver_compatible = self
.jsr_fetch_resolver
.req_to_nv(semver_req)
.await
.ok()
.flatten();
let info =
self.jsr_fetch_resolver.package_info(&semver_req.name).await;
let latest = info
.and_then(|info| {
let version_resolver = self
.jsr_fetch_resolver
.version_resolver_for_package(&semver_req.name, &info);
let lower_bound = &semver_compatible.as_ref()?.version;
latest_version(
Some(lower_bound),
info.versions.iter().filter_map(|(version, version_info)| {
if !version_info.yanked
&& version_resolver
.matches_newest_dependency_date(version_info)
{
Some(version)
} else {
None
}
}),
)
})
.map(|version| PackageNv {
name: semver_req.name.clone(),
version,
});
PackageLatestVersion {
latest,
semver_compatible,
}
}
.boxed_local(),
),
}
}
while let Some(nv) = futs.next().await {
latest_versions.push(nv);
}
Ok(latest_versions)
}
pub async fn resolve_versions(&mut self) -> Result<(), AnyError> {
let (_, latest_versions) = try_join(
self.run_dependency_resolution(),
self.load_latest_versions(),
)
.await?;
self.latest_versions = latest_versions;
self.resolve_current_versions().await?;
Ok(())
}
pub fn deps_with_resolved_latest_versions(
&self,
) -> impl IntoIterator<Item = (DepId, Option<PackageNv>, PackageLatestVersion)> + '_
{
self
.resolved_versions
.iter()
.zip(self.latest_versions.iter())
.enumerate()
.map(|(i, (resolved, latest))| {
(DepId(i), resolved.clone(), latest.clone())
})
}
pub fn get_dep(&self, id: DepId) -> &Dep {
&self.deps[id.0]
}
pub fn update_dep(&mut self, dep_id: DepId, new_version_req: VersionReq) {
self
.pending_changes
.push(Change::Update(dep_id, new_version_req));
}
pub fn commit_changes(&mut self) -> Result<(), AnyError> {
let changes = std::mem::take(&mut self.pending_changes);
let mut config_updaters = HashMap::new();
for change in changes {
match change {
Change::Update(dep_id, version_req) => {
// TODO: move most of this to ConfigUpdater
let dep = &mut self.deps[dep_id.0];
dep.req.version_req = version_req.clone();
match &dep.location {
DepLocation::DenoJson(arc, key_path, _) => {
let updater =
get_or_create_updater(&mut config_updaters, &dep.location)?;
let Some(property) = updater.get_property_for_mutation(key_path)
else {
log::warn!(
"failed to find property at path {key_path:?} for file {}",
arc.specifier
);
continue;
};
let Some(string_value) = cst_string_literal(&property) else {
continue;
};
let mut req_reference = match dep.kind {
DepKind::Npm => NpmPackageReqReference::from_str(&string_value)
.unwrap()
.into_inner(),
DepKind::Jsr => JsrPackageReqReference::from_str(&string_value)
.unwrap()
.into_inner(),
};
req_reference.req.version_req = version_req;
let mut new_value =
format!("{}:{}", dep.kind.scheme(), req_reference);
if string_value.ends_with('/') && !new_value.ends_with('/') {
// the display impl for PackageReqReference maps `/` to the root
// subpath, but for the import map the trailing `/` is significant
new_value.push('/');
}
if string_value
.trim_start_matches(format!("{}:", dep.kind.scheme()).as_str())
.starts_with('/')
{
// this is gross
new_value = new_value.replace(':', ":/");
}
property
.set_value(jsonc_parser::cst::CstInputValue::String(new_value));
}
DepLocation::PackageJson(arc, key_path) => {
let updater =
get_or_create_updater(&mut config_updaters, &dep.location)?;
let Some(property) = updater.get_property_for_mutation(key_path)
else {
log::warn!(
"failed to find property at path {key_path:?} for file {}",
arc.path.display()
);
continue;
};
let Some(string_value) = cst_string_literal(&property) else {
continue;
};
let new_value = if string_value.starts_with("npm:") {
// aliased
let rest = string_value.trim_start_matches("npm:");
let mut parts = rest.split('@');
let first = parts.next().unwrap();
if first.is_empty() {
let scope_and_name = parts.next().unwrap();
format!("npm:@{scope_and_name}@{version_req}")
} else {
format!("npm:{first}@{version_req}")
}
} else if string_value.contains(":") {
bail!(
"Unexpected package json dependency string: \"{string_value}\" in {}",
arc.path.display()
);
} else {
version_req.to_string()
};
property
.set_value(jsonc_parser::cst::CstInputValue::String(new_value));
}
}
}
}
}
for (_, updater) in config_updaters {
updater.commit()?;
}
Ok(())
}
}
fn get_or_create_updater<'a>(
config_updaters: &'a mut HashMap<std::path::PathBuf, ConfigUpdater>,
location: &DepLocation,
) -> Result<&'a mut ConfigUpdater, AnyError> {
match config_updaters.entry(location.file_path().into_owned()) {
std::collections::hash_map::Entry::Occupied(occupied_entry) => {
Ok(occupied_entry.into_mut())
}
std::collections::hash_map::Entry::Vacant(vacant_entry) => {
let updater = ConfigUpdater::new(
location.config_kind(),
location.file_path().into_owned(),
)?;
Ok(vacant_entry.insert(updater))
}
}
}
fn cst_string_literal(
property: &jsonc_parser::cst::CstObjectProp,
) -> Option<String> {
// TODO(nathanwhit): ensure this unwrap is safe
let value = property.value().unwrap();
let Some(string) = value.as_string_lit() else {
log::warn!("malformed entry");
return None;
};
let Ok(string_value) = string.decoded_value() else {
log::warn!("malformed string: {string:?}");
return None;
};
Some(string_value)
}
fn parse_req_reference(
input: &str,
kind: DepKind,
) -> Result<
PackageReqReference,
deno_semver::package::PackageReqReferenceParseError,
> {
Ok(match kind {
DepKind::Npm => NpmPackageReqReference::from_str(input)?.into_inner(),
DepKind::Jsr => JsrPackageReqReference::from_str(input)?.into_inner(),
})
}
fn latest_version<'a>(
start: Option<&Version>,
versions: impl IntoIterator<Item = &'a Version>,
) -> Option<Version> {
let mut best = start;
for version in versions {
match best {
Some(best_version) if version > best_version => best = Some(version),
None => best = Some(version),
_ => {}
}
}
best.cloned()
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/mod.rs | cli/tools/pm/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_cache_dir::GlobalOrLocalHttpCache;
use deno_cache_dir::file_fetcher::CacheSetting;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_core::futures::StreamExt;
use deno_path_util::url_to_file_path;
use deno_semver::StackString;
use deno_semver::Version;
use deno_semver::jsr::JsrPackageReqReference;
use deno_semver::npm::NpmPackageReqReference;
use deno_semver::package::PackageName;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
use deps::KeyPath;
use jsonc_parser::cst::CstObject;
use jsonc_parser::cst::CstObjectProp;
use jsonc_parser::cst::CstRootNode;
use jsonc_parser::json;
use crate::args::AddFlags;
use crate::args::CliOptions;
use crate::args::Flags;
use crate::args::RemoveFlags;
use crate::factory::CliFactory;
use crate::file_fetcher::CreateCliFileFetcherOptions;
use crate::file_fetcher::create_cli_file_fetcher;
use crate::jsr::JsrFetchResolver;
use crate::npm::NpmFetchResolver;
mod approve_scripts;
mod audit;
mod cache_deps;
pub(crate) mod deps;
pub(crate) mod interactive_picker;
mod outdated;
pub use approve_scripts::approve_scripts;
pub use audit::audit;
pub use cache_deps::CacheTopLevelDepsOptions;
pub use cache_deps::cache_top_level_deps;
pub use outdated::outdated;
#[derive(Debug, Copy, Clone, Hash)]
enum ConfigKind {
DenoJson,
PackageJson,
}
struct ConfigUpdater {
kind: ConfigKind,
cst: CstRootNode,
root_object: CstObject,
path: PathBuf,
modified: bool,
}
impl ConfigUpdater {
fn new(
kind: ConfigKind,
config_file_path: PathBuf,
) -> Result<Self, AnyError> {
let config_file_contents = std::fs::read_to_string(&config_file_path)
.with_context(|| {
format!("Reading config file '{}'", config_file_path.display())
})?;
let cst = CstRootNode::parse(&config_file_contents, &Default::default())
.with_context(|| {
format!("Parsing config file '{}'", config_file_path.display())
})?;
let root_object = cst.object_value_or_set();
Ok(Self {
kind,
cst,
root_object,
path: config_file_path,
modified: false,
})
}
fn display_path(&self) -> String {
deno_path_util::url_from_file_path(&self.path)
.map(|u| u.to_string())
.unwrap_or_else(|_| self.path.display().to_string())
}
fn obj(&self) -> &CstObject {
&self.root_object
}
fn contents(&self) -> String {
self.cst.to_string()
}
fn get_property_for_mutation(
&mut self,
key_path: &KeyPath,
) -> Option<CstObjectProp> {
let mut current_node = self.root_object.clone();
self.modified = true;
for (i, part) in key_path.parts.iter().enumerate() {
let s = part.as_str();
if i < key_path.parts.len().saturating_sub(1) {
let object = current_node.object_value(s)?;
current_node = object;
} else {
// last part
return current_node.get(s);
}
}
None
}
fn add(&mut self, selected: SelectedPackage, dev: bool) {
fn insert_index(object: &CstObject, searching_name: &str) -> usize {
object
.properties()
.into_iter()
.take_while(|prop| {
let prop_name =
prop.name().and_then(|name| name.decoded_value().ok());
match prop_name {
Some(current_name) => {
searching_name.cmp(¤t_name) == std::cmp::Ordering::Greater
}
None => true,
}
})
.count()
}
match self.kind {
ConfigKind::DenoJson => {
let imports = self.root_object.object_value_or_set("imports");
let value =
format!("{}@{}", selected.package_name, selected.version_req);
match imports.get(&selected.import_name) {
Some(prop) => {
prop.set_value(json!(value));
}
_ => {
let index = insert_index(&imports, &selected.import_name);
imports.insert(index, &selected.import_name, json!(value));
}
}
}
ConfigKind::PackageJson => {
let deps_prop = self.root_object.get("dependencies");
let dev_deps_prop = self.root_object.get("devDependencies");
let dependencies = if dev {
self
.root_object
.object_value("devDependencies")
.unwrap_or_else(|| {
let index = deps_prop
.as_ref()
.map(|p| p.property_index() + 1)
.unwrap_or_else(|| self.root_object.properties().len());
self
.root_object
.insert(index, "devDependencies", json!({}))
.object_value_or_set()
})
} else {
self
.root_object
.object_value("dependencies")
.unwrap_or_else(|| {
let index = dev_deps_prop
.as_ref()
.map(|p| p.property_index())
.unwrap_or_else(|| self.root_object.properties().len());
self
.root_object
.insert(index, "dependencies", json!({}))
.object_value_or_set()
})
};
let other_dependencies = if dev {
deps_prop.and_then(|p| p.value().and_then(|v| v.as_object()))
} else {
dev_deps_prop.and_then(|p| p.value().and_then(|v| v.as_object()))
};
let (alias, value) = package_json_dependency_entry(selected);
if let Some(other) = other_dependencies
&& let Some(prop) = other.get(&alias)
{
remove_prop_and_maybe_parent_prop(prop);
}
match dependencies.get(&alias) {
Some(prop) => {
prop.set_value(json!(value));
}
_ => {
let index = insert_index(&dependencies, &alias);
dependencies.insert(index, &alias, json!(value));
}
}
}
}
self.modified = true;
}
fn remove(&mut self, package: &str) -> bool {
let removed = match self.kind {
ConfigKind::DenoJson => {
match self
.root_object
.object_value("imports")
.and_then(|i| i.get(package))
{
Some(prop) => {
remove_prop_and_maybe_parent_prop(prop);
true
}
_ => false,
}
}
ConfigKind::PackageJson => {
let deps = [
self
.root_object
.object_value("dependencies")
.and_then(|deps| deps.get(package)),
self
.root_object
.object_value("devDependencies")
.and_then(|deps| deps.get(package)),
];
let removed = deps.iter().any(|d| d.is_some());
for dep in deps.into_iter().flatten() {
remove_prop_and_maybe_parent_prop(dep);
}
removed
}
};
if removed {
self.modified = true;
}
removed
}
fn set_allow_scripts_value(
&mut self,
value: jsonc_parser::cst::CstInputValue,
) {
if let Some(prop) = self.root_object.get("allowScripts") {
prop.set_value(value);
} else {
let index = self.root_object.properties().len();
self.root_object.insert(index, "allowScripts", value);
}
self.modified = true;
}
fn commit(&self) -> Result<(), AnyError> {
if !self.modified {
return Ok(());
}
let new_text = self.contents();
std::fs::write(&self.path, new_text).with_context(|| {
format!("failed writing to '{}'", self.path.display())
})?;
Ok(())
}
}
fn remove_prop_and_maybe_parent_prop(prop: CstObjectProp) {
let parent = prop.parent().unwrap().as_object().unwrap();
prop.remove();
if parent.properties().is_empty() {
let parent_property = parent.parent().unwrap();
let root_object = parent_property.parent().unwrap().as_object().unwrap();
// remove the property
parent_property.remove();
root_object.ensure_multiline();
}
}
fn create_deno_json(
flags: &Arc<Flags>,
options: &CliOptions,
) -> Result<CliFactory, AnyError> {
std::fs::write(options.initial_cwd().join("deno.json"), "{}\n")
.context("Failed to create deno.json file")?;
log::info!("Created deno.json configuration file.");
let factory = CliFactory::from_flags(flags.clone());
Ok(factory)
}
fn package_json_dependency_entry(
selected: SelectedPackage,
) -> (String, String) {
if let Some(npm_package) = selected.package_name.strip_prefix("npm:") {
if selected.import_name == npm_package {
(npm_package.into(), selected.version_req)
} else {
(
selected.import_name.into_string(),
format!("npm:{}@{}", npm_package, selected.version_req),
)
}
} else if let Some(jsr_package) = selected.package_name.strip_prefix("jsr:") {
let jsr_package = jsr_package.strip_prefix('@').unwrap_or(jsr_package);
let scope_replaced = jsr_package.replace('/', "__");
let version_req =
format!("npm:@jsr/{scope_replaced}@{}", selected.version_req);
(selected.import_name.into_string(), version_req)
} else {
(selected.package_name, selected.version_req)
}
}
#[derive(Clone, Copy)]
/// The name of the subcommand invoking the `add` operation.
pub enum AddCommandName {
Add,
Install,
}
impl std::fmt::Display for AddCommandName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AddCommandName::Add => write!(f, "add"),
AddCommandName::Install => write!(f, "install"),
}
}
}
fn load_configs(
flags: &Arc<Flags>,
has_jsr_specifiers: impl FnOnce() -> bool,
) -> Result<(CliFactory, Option<ConfigUpdater>, Option<ConfigUpdater>), AnyError>
{
let cli_factory = CliFactory::from_flags(flags.clone());
let options = cli_factory.cli_options()?;
let start_dir = &options.start_dir;
let npm_config = match start_dir.member_pkg_json() {
Some(pkg_json) => Some(ConfigUpdater::new(
ConfigKind::PackageJson,
pkg_json.path.clone(),
)?),
None => None,
};
let deno_config = match start_dir.member_deno_json() {
Some(deno_json) => Some(ConfigUpdater::new(
ConfigKind::DenoJson,
url_to_file_path(&deno_json.specifier)?,
)?),
None => None,
};
let (cli_factory, deno_config) = match deno_config {
Some(config) => (cli_factory, Some(config)),
None if npm_config.is_some() && !has_jsr_specifiers() => {
(cli_factory, None)
}
_ => {
let factory = create_deno_json(flags, options)?;
let options = factory.cli_options()?.clone();
let deno_json = options
.start_dir
.member_or_root_deno_json()
.expect("Just created deno.json");
(
factory,
Some(ConfigUpdater::new(
ConfigKind::DenoJson,
url_to_file_path(&deno_json.specifier)?,
)?),
)
}
};
assert!(deno_config.is_some() || npm_config.is_some());
Ok((cli_factory, npm_config, deno_config))
}
fn path_distance(a: &Path, b: &Path) -> usize {
let diff = pathdiff::diff_paths(a, b);
let Some(diff) = diff else {
return usize::MAX;
};
diff.components().count()
}
pub async fn add(
flags: Arc<Flags>,
add_flags: AddFlags,
cmd_name: AddCommandName,
) -> Result<(), AnyError> {
let (cli_factory, mut npm_config, mut deno_config) =
load_configs(&flags, || {
add_flags.packages.iter().any(|s| s.starts_with("jsr:"))
})?;
if let Some(deno) = &deno_config
&& deno.obj().get("importMap").is_some()
{
bail!(
concat!(
"`deno {}` is not supported when configuration file contains an \"importMap\" field. ",
"Inline the import map into the Deno configuration file.\n",
" at {}",
),
cmd_name,
deno.display_path(),
);
}
let start_dir = cli_factory.cli_options()?.start_dir.dir_path();
// only prefer to add npm deps to `package.json` if there isn't a closer deno.json.
// example: if deno.json is in the CWD and package.json is in the parent, we should add
// npm deps to deno.json, since it's closer
let prefer_npm_config = match (npm_config.as_ref(), deno_config.as_ref()) {
(Some(npm), Some(deno)) => {
let npm_distance = path_distance(&npm.path, &start_dir);
let deno_distance = path_distance(&deno.path, &start_dir);
npm_distance <= deno_distance
}
(Some(_), None) => true,
(None, _) => false,
};
let http_client = cli_factory.http_client_provider();
let deps_http_cache = cli_factory.global_http_cache()?;
let deps_file_fetcher = create_cli_file_fetcher(
Default::default(),
GlobalOrLocalHttpCache::Global(deps_http_cache.clone()),
http_client.clone(),
cli_factory.memory_files().clone(),
cli_factory.sys(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::ReloadAll,
download_log_level: log::Level::Trace,
progress_bar: None,
},
);
let npmrc = cli_factory.npmrc()?;
let deps_file_fetcher = Arc::new(deps_file_fetcher);
let jsr_resolver = Arc::new(JsrFetchResolver::new(
deps_file_fetcher.clone(),
cli_factory.jsr_version_resolver()?.clone(),
));
let npm_resolver = Arc::new(NpmFetchResolver::new(
deps_file_fetcher,
npmrc.clone(),
cli_factory.npm_version_resolver()?.clone(),
));
let mut selected_packages = Vec::with_capacity(add_flags.packages.len());
let mut package_reqs = Vec::with_capacity(add_flags.packages.len());
for entry_text in add_flags.packages.iter() {
let req = AddRmPackageReq::parse(
entry_text,
add_flags.default_registry.map(|r| r.into()),
)
.with_context(|| format!("Failed to parse package: {}", entry_text))?;
match req {
Ok(add_req) => package_reqs.push(add_req),
Err(package_req) => {
if jsr_resolver
.req_to_nv(&package_req)
.await
.ok()
.flatten()
.is_some()
{
bail!(
"{entry_text} is missing a prefix. Did you mean `{}`?",
crate::colors::yellow(format!("deno {cmd_name} jsr:{package_req}"))
)
} else if npm_resolver
.req_to_nv(&package_req)
.await
.ok()
.flatten()
.is_some()
{
bail!(
"{entry_text} is missing a prefix. Did you mean `{}`?",
crate::colors::yellow(format!("deno {cmd_name} npm:{package_req}"))
)
} else {
bail!(
"{} was not found in either jsr or npm.",
crate::colors::red(entry_text)
);
}
}
}
}
let package_futures = package_reqs
.into_iter()
.map({
let jsr_resolver = jsr_resolver.clone();
move |package_req| {
find_package_and_select_version_for_req(
jsr_resolver.clone(),
npm_resolver.clone(),
package_req,
)
.boxed_local()
}
})
.collect::<Vec<_>>();
let stream_of_futures = deno_core::futures::stream::iter(package_futures);
let mut buffered = stream_of_futures.buffered(10);
while let Some(package_and_version_result) = buffered.next().await {
let package_and_version = package_and_version_result?;
match package_and_version {
PackageAndVersion::NotFound {
package: package_name,
help,
package_req,
} => match help {
Some(NotFoundHelp::NpmPackage) => {
bail!(
"{} was not found, but a matching npm package exists. Did you mean `{}`?",
crate::colors::red(package_name),
crate::colors::yellow(format!("deno {cmd_name} npm:{package_req}"))
);
}
Some(NotFoundHelp::JsrPackage) => {
bail!(
"{} was not found, but a matching jsr package exists. Did you mean `{}`?",
crate::colors::red(package_name),
crate::colors::yellow(format!("deno {cmd_name} jsr:{package_req}"))
)
}
Some(NotFoundHelp::PreReleaseVersion(version)) => {
bail!(
"{} has only pre-release versions available. Try specifying a version: `{}`",
crate::colors::red(&package_name),
crate::colors::yellow(format!(
"deno {cmd_name} {package_name}@^{version}"
))
)
}
None => bail!("{} was not found.", crate::colors::red(package_name)),
},
PackageAndVersion::Selected(selected) => {
selected_packages.push(selected);
}
}
}
let dev = add_flags.dev;
for selected_package in selected_packages {
log::info!(
"Add {}{}{}",
crate::colors::green(&selected_package.package_name),
crate::colors::gray("@"),
selected_package.selected_version
);
if selected_package.package_name.starts_with("npm:") && prefer_npm_config {
if let Some(npm) = &mut npm_config {
npm.add(selected_package, dev);
} else {
deno_config.as_mut().unwrap().add(selected_package, dev);
}
} else if let Some(deno) = &mut deno_config {
deno.add(selected_package, dev);
} else {
npm_config.as_mut().unwrap().add(selected_package, dev);
}
}
if let Some(npm) = npm_config {
npm.commit()?;
}
if let Some(deno) = deno_config {
deno.commit()?;
}
npm_install_after_modification(
flags,
Some(jsr_resolver),
CacheTopLevelDepsOptions {
lockfile_only: add_flags.lockfile_only,
},
)
.await?;
Ok(())
}
struct SelectedPackage {
import_name: StackString,
package_name: String,
version_req: String,
selected_version: StackString,
}
enum NotFoundHelp {
NpmPackage,
JsrPackage,
PreReleaseVersion(Version),
}
enum PackageAndVersion {
NotFound {
package: String,
package_req: PackageReq,
help: Option<NotFoundHelp>,
},
Selected(SelectedPackage),
}
fn best_version<'a>(
versions: impl Iterator<Item = &'a Version>,
) -> Option<&'a Version> {
let mut maybe_best_version: Option<&Version> = None;
for version in versions {
let is_best_version = maybe_best_version
.as_ref()
.map(|best_version| (*best_version).cmp(version).is_lt())
.unwrap_or(true);
if is_best_version {
maybe_best_version = Some(version);
}
}
maybe_best_version
}
trait PackageInfoProvider {
const SPECIFIER_PREFIX: &str;
/// The help to return if a package is found by this provider
const HELP: NotFoundHelp;
async fn req_to_nv(
&self,
req: &PackageReq,
) -> Result<Option<PackageNv>, AnyError>;
async fn latest_version(&self, name: &PackageName) -> Option<Version>;
}
impl PackageInfoProvider for Arc<JsrFetchResolver> {
const HELP: NotFoundHelp = NotFoundHelp::JsrPackage;
const SPECIFIER_PREFIX: &str = "jsr";
async fn req_to_nv(
&self,
req: &PackageReq,
) -> Result<Option<PackageNv>, AnyError> {
Ok((**self).req_to_nv(req).await?)
}
async fn latest_version(&self, name: &PackageName) -> Option<Version> {
let info = self.package_info(name).await?;
best_version(
info
.versions
.iter()
.filter(|(_, version_info)| !version_info.yanked)
.map(|(version, _)| version),
)
.cloned()
}
}
impl PackageInfoProvider for Arc<NpmFetchResolver> {
const HELP: NotFoundHelp = NotFoundHelp::NpmPackage;
const SPECIFIER_PREFIX: &str = "npm";
async fn req_to_nv(
&self,
req: &PackageReq,
) -> Result<Option<PackageNv>, AnyError> {
(**self).req_to_nv(req).await
}
async fn latest_version(&self, name: &PackageName) -> Option<Version> {
let info = self.package_info(name).await?;
best_version(self.applicable_version_infos(&info).map(|vi| &vi.version))
.cloned()
}
}
async fn find_package_and_select_version_for_req(
jsr_resolver: Arc<JsrFetchResolver>,
npm_resolver: Arc<NpmFetchResolver>,
add_package_req: AddRmPackageReq,
) -> Result<PackageAndVersion, AnyError> {
async fn select<T: PackageInfoProvider, S: PackageInfoProvider>(
main_resolver: T,
fallback_resolver: S,
add_package_req: AddRmPackageReq,
) -> Result<PackageAndVersion, AnyError> {
let req = match &add_package_req.value {
AddRmPackageReqValue::Jsr(req) => req,
AddRmPackageReqValue::Npm(req) => req,
};
let prefixed_name = format!("{}:{}", T::SPECIFIER_PREFIX, req.name);
let help_if_found_in_fallback = S::HELP;
let nv = match main_resolver.req_to_nv(req).await {
Ok(Some(nv)) => nv,
Ok(None) => {
if fallback_resolver
.req_to_nv(req)
.await
.ok()
.flatten()
.is_some()
{
// it's in the other registry
return Ok(PackageAndVersion::NotFound {
package: prefixed_name,
help: Some(help_if_found_in_fallback),
package_req: req.clone(),
});
}
return Ok(PackageAndVersion::NotFound {
package: prefixed_name,
help: None,
package_req: req.clone(),
});
}
Err(err) => {
if req.version_req.version_text() == "*"
&& let Some(pre_release_version) =
main_resolver.latest_version(&req.name).await
{
return Ok(PackageAndVersion::NotFound {
package: prefixed_name,
package_req: req.clone(),
help: Some(NotFoundHelp::PreReleaseVersion(
pre_release_version.clone(),
)),
});
}
return Err(err);
}
};
let range_symbol = if req.version_req.version_text().starts_with('~') {
"~"
} else if req.version_req.version_text() == nv.version.to_string() {
""
} else {
"^"
};
Ok(PackageAndVersion::Selected(SelectedPackage {
import_name: add_package_req.alias,
package_name: prefixed_name,
version_req: format!("{}{}", range_symbol, &nv.version),
selected_version: nv.version.to_custom_string::<StackString>(),
}))
}
match &add_package_req.value {
AddRmPackageReqValue::Jsr(_) => {
select(jsr_resolver, npm_resolver, add_package_req).await
}
AddRmPackageReqValue::Npm(_) => {
select(npm_resolver, jsr_resolver, add_package_req).await
}
}
}
#[derive(Debug, PartialEq, Eq)]
enum AddRmPackageReqValue {
Jsr(PackageReq),
Npm(PackageReq),
}
#[derive(Debug, PartialEq, Eq)]
pub struct AddRmPackageReq {
alias: StackString,
value: AddRmPackageReqValue,
}
#[derive(Debug, Clone, Copy)]
pub enum Prefix {
Jsr,
Npm,
}
impl From<crate::args::DefaultRegistry> for Prefix {
fn from(registry: crate::args::DefaultRegistry) -> Self {
match registry {
crate::args::DefaultRegistry::Npm => Prefix::Npm,
crate::args::DefaultRegistry::Jsr => Prefix::Jsr,
}
}
}
impl AddRmPackageReq {
pub fn parse(
entry_text: &str,
default_prefix: Option<Prefix>,
) -> Result<Result<Self, PackageReq>, AnyError> {
fn parse_prefix(text: &str) -> (Option<Prefix>, &str) {
if let Some(text) = text.strip_prefix("jsr:") {
(Some(Prefix::Jsr), text)
} else if let Some(text) = text.strip_prefix("npm:") {
(Some(Prefix::Npm), text)
} else {
(None, text)
}
}
// parse the following:
// - alias@npm:<package_name>
// - other_alias@npm:<package_name>
// - @alias/other@jsr:<package_name>
fn parse_alias(entry_text: &str) -> Option<(&str, &str)> {
for prefix in ["npm:", "jsr:"] {
let Some(location) = entry_text.find(prefix) else {
continue;
};
let prefix = &entry_text[..location];
if let Some(alias) = prefix.strip_suffix('@') {
return Some((alias, &entry_text[location..]));
}
}
None
}
let (maybe_prefix, entry_text) = parse_prefix(entry_text);
let maybe_prefix = maybe_prefix.or(default_prefix);
let (prefix, maybe_alias, entry_text) = match maybe_prefix {
Some(prefix) => (prefix, None, entry_text),
None => match parse_alias(entry_text) {
Some((alias, text)) => {
let (maybe_prefix, entry_text) = parse_prefix(text);
let maybe_prefix = maybe_prefix.or(default_prefix);
if maybe_prefix.is_none() {
return Ok(Err(PackageReq::from_str(entry_text)?));
}
(
maybe_prefix.unwrap(),
Some(StackString::from(alias)),
entry_text,
)
}
None => return Ok(Err(PackageReq::from_str(entry_text)?)),
},
};
match prefix {
Prefix::Jsr => {
let req_ref =
JsrPackageReqReference::from_str(&format!("jsr:{}", entry_text))?;
let package_req = req_ref.into_inner().req;
Ok(Ok(AddRmPackageReq {
alias: maybe_alias.unwrap_or_else(|| package_req.name.clone()),
value: AddRmPackageReqValue::Jsr(package_req),
}))
}
Prefix::Npm => {
let req_ref =
NpmPackageReqReference::from_str(&format!("npm:{}", entry_text))?;
let package_req = req_ref.into_inner().req;
Ok(Ok(AddRmPackageReq {
alias: maybe_alias.unwrap_or_else(|| package_req.name.clone()),
value: AddRmPackageReqValue::Npm(package_req),
}))
}
}
}
}
pub async fn remove(
flags: Arc<Flags>,
remove_flags: RemoveFlags,
) -> Result<(), AnyError> {
let (_, npm_config, deno_config) = load_configs(&flags, || false)?;
let mut configs = [npm_config, deno_config];
let mut removed_packages = vec![];
for package in &remove_flags.packages {
let req = AddRmPackageReq::parse(package, None)
.with_context(|| format!("Failed to parse package: {}", package))?;
let mut parsed_pkg_name = None;
for config in configs.iter_mut().flatten() {
match &req {
Ok(rm_pkg) => {
if config.remove(&rm_pkg.alias) && parsed_pkg_name.is_none() {
parsed_pkg_name = Some(rm_pkg.alias.clone());
}
}
Err(pkg) => {
// An alias or a package name without registry/version
// constraints. Try to remove the package anyway.
if config.remove(&pkg.name) && parsed_pkg_name.is_none() {
parsed_pkg_name = Some(pkg.name.clone());
}
}
}
}
if let Some(pkg) = parsed_pkg_name {
removed_packages.push(pkg);
}
}
if removed_packages.is_empty() {
log::info!("No packages were removed");
} else {
for package in &removed_packages {
log::info!("Removed {}", crate::colors::green(package));
}
for config in configs.into_iter().flatten() {
config.commit()?;
}
npm_install_after_modification(
flags,
None,
CacheTopLevelDepsOptions {
lockfile_only: remove_flags.lockfile_only,
},
)
.await?;
}
Ok(())
}
async fn npm_install_after_modification(
flags: Arc<Flags>,
// explicitly provided to prevent redownloading
jsr_resolver: Option<Arc<crate::jsr::JsrFetchResolver>>,
cache_options: CacheTopLevelDepsOptions,
) -> Result<CliFactory, AnyError> {
// clear the previously cached package.json from memory before reloading it
node_resolver::PackageJsonThreadLocalCache::clear();
// make a new CliFactory to pick up the updated config file
let cli_factory = CliFactory::from_flags(flags);
// surface any errors in the package.json
let start = std::time::Instant::now();
let npm_installer = cli_factory.npm_installer().await?;
npm_installer.ensure_no_pkg_json_dep_errors()?;
// npm install
cache_deps::cache_top_level_deps(&cli_factory, jsr_resolver, cache_options)
.await?;
if let Some(install_reporter) = cli_factory.install_reporter()? {
let workspace = cli_factory.workspace_resolver().await?;
let npm_resolver = cli_factory.npm_resolver().await?;
super::installer::print_install_report(
&cli_factory.sys(),
start.elapsed(),
install_reporter,
workspace,
npm_resolver,
);
}
if let Some(lockfile) = cli_factory.maybe_lockfile().await? {
lockfile.write_if_changed()?;
}
Ok(cli_factory)
}
#[cfg(test)]
mod test {
use super::*;
fn jsr_pkg_req(alias: &str, req: &str) -> AddRmPackageReq {
AddRmPackageReq {
alias: alias.into(),
value: AddRmPackageReqValue::Jsr(PackageReq::from_str(req).unwrap()),
}
}
fn npm_pkg_req(alias: &str, req: &str) -> AddRmPackageReq {
AddRmPackageReq {
alias: alias.into(),
value: AddRmPackageReqValue::Npm(PackageReq::from_str(req).unwrap()),
}
}
#[test]
fn test_parse_add_package_req() {
let cases = [
(("jsr:foo", None), jsr_pkg_req("foo", "foo")),
(("alias@jsr:foo", None), jsr_pkg_req("alias", "foo")),
(
("@alias/pkg@npm:foo", None),
npm_pkg_req("@alias/pkg", "foo@*"),
),
(
("@alias/pkg@jsr:foo", None),
jsr_pkg_req("@alias/pkg", "foo"),
),
(
("alias@jsr:foo@^1.5.0", None),
jsr_pkg_req("alias", "foo@^1.5.0"),
),
(("foo", Some(Prefix::Npm)), npm_pkg_req("foo", "foo@*")),
(("foo", Some(Prefix::Jsr)), jsr_pkg_req("foo", "foo")),
(("npm:foo", Some(Prefix::Npm)), npm_pkg_req("foo", "foo@*")),
(("jsr:foo", Some(Prefix::Jsr)), jsr_pkg_req("foo", "foo")),
(("npm:foo", Some(Prefix::Jsr)), npm_pkg_req("foo", "foo@*")),
(("jsr:foo", Some(Prefix::Npm)), jsr_pkg_req("foo", "foo")),
];
for ((input, maybe_prefix), expected) in cases {
let s = format!("on input: {input}, maybe_prefix: {maybe_prefix:?}");
assert_eq!(
AddRmPackageReq::parse(input, maybe_prefix)
.expect(&s)
.expect(&s),
expected,
"{s}",
);
}
assert_eq!(
AddRmPackageReq::parse("@scope/pkg@tag", None)
.unwrap()
.unwrap_err()
.to_string(),
"@scope/pkg@tag",
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/interactive_picker.rs | cli/tools/pm/interactive_picker.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashSet;
use std::io;
use console_static_text::TextItem;
use crossterm::ExecutableCommand;
use crossterm::cursor;
use crossterm::event::KeyCode;
use crossterm::event::KeyEvent;
use crossterm::event::KeyEventKind;
use crossterm::event::KeyModifiers;
use crossterm::terminal;
use deno_core::anyhow;
use deno_terminal::colors;
use unicode_width::UnicodeWidthStr;
use crate::util::console::HideCursorGuard;
use crate::util::console::RawMode;
use crate::util::console::new_console_static_text;
pub fn select_items<T, TRender>(
instructions_line: &str,
items: &[T],
initial_checked: HashSet<usize>,
mut render_item: TRender,
) -> anyhow::Result<Option<HashSet<usize>>>
where
TRender: FnMut(usize, bool, bool, &T) -> anyhow::Result<TextItem<'static>>,
{
if items.is_empty() {
return Ok(Some(HashSet::new()));
}
let mut stderr = io::stderr();
let raw_mode = RawMode::enable()?;
let mut static_text = new_console_static_text();
static_text.keep_cursor_zero_column(true);
let (_, start_row) = cursor::position().unwrap_or_default();
let (_, rows) = terminal::size()?;
if rows - start_row < (items.len() + 2) as u16 {
let pad = ((items.len() + 2) as u16) - (rows - start_row);
stderr.execute(terminal::ScrollUp(pad.min(rows)))?;
stderr.execute(cursor::MoveUp(pad.min(rows)))?;
}
let mut currently_selected = 0;
let mut checked = initial_checked;
let hide_cursor_guard = HideCursorGuard::hide()?;
let instructions_width = format!("? {}", instructions_line).width();
let mut do_it = false;
let mut scroll_offset = 0;
loop {
let mut rendered_items = Vec::with_capacity(items.len() + 1);
rendered_items.push(TextItem::new_owned(format!(
"{} {}",
colors::intense_blue("?"),
instructions_line
)));
for (idx, item) in items.iter().enumerate() {
rendered_items.push(render_item(
idx,
idx == currently_selected,
checked.contains(&idx),
item,
)?);
}
let size = static_text.console_size();
let first_line_rows = size
.cols
.map(|cols| (instructions_width / cols as usize) + 1)
.unwrap_or(1);
if let Some(rows) = size.rows
&& rendered_items.len() + first_line_rows >= rows as usize
{
let adj = if scroll_offset == 0 {
first_line_rows.saturating_sub(1)
} else {
0
};
if currently_selected < scroll_offset {
scroll_offset = currently_selected;
} else if currently_selected + 1
>= scroll_offset + (rows as usize).saturating_sub(adj)
{
scroll_offset =
(currently_selected + 1).saturating_sub(rows as usize) + 1;
}
let adj = if scroll_offset == 0 {
first_line_rows.saturating_sub(1)
} else {
0
};
let mut new_items = Vec::with_capacity(rows as usize);
scroll_offset = scroll_offset.clamp(0, rendered_items.len() - 1);
new_items.extend(
rendered_items.drain(
scroll_offset
..(scroll_offset + (rows as usize).saturating_sub(adj))
.min(rendered_items.len()),
),
);
rendered_items = new_items;
}
static_text.eprint_items(rendered_items.iter());
let event = crossterm::event::read()?;
#[allow(clippy::single_match)]
match event {
crossterm::event::Event::Key(KeyEvent {
kind: KeyEventKind::Press,
code,
modifiers,
..
}) => match (code, modifiers) {
(KeyCode::Char('c'), KeyModifiers::CONTROL) => break,
(KeyCode::Up | KeyCode::Char('k'), KeyModifiers::NONE) => {
currently_selected = if currently_selected == 0 {
items.len() - 1
} else {
currently_selected - 1
};
}
(KeyCode::Down | KeyCode::Char('j'), KeyModifiers::NONE) => {
currently_selected = (currently_selected + 1) % items.len();
}
(KeyCode::Char(' '), _) => {
if !checked.insert(currently_selected) {
checked.remove(¤tly_selected);
}
}
(KeyCode::Char('a'), _) => {
if (0..items.len()).all(|idx| checked.contains(&idx)) {
checked.clear();
} else {
checked.extend(0..items.len());
}
}
(KeyCode::Char('i'), _) => {
for idx in 0..items.len() {
if checked.contains(&idx) {
checked.remove(&idx);
} else {
checked.insert(idx);
}
}
}
(KeyCode::Enter, _) => {
do_it = true;
break;
}
_ => {}
},
_ => {}
}
}
static_text.eprint_clear();
hide_cursor_guard.show()?;
raw_mode.disable()?;
if do_it { Ok(Some(checked)) } else { Ok(None) }
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/outdated/mod.rs | cli/tools/pm/outdated/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod interactive;
use std::collections::HashSet;
use std::sync::Arc;
use deno_cache_dir::GlobalOrLocalHttpCache;
use deno_cache_dir::file_fetcher::CacheSetting;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_semver::StackString;
use deno_semver::VersionReq;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
use deno_terminal::colors;
use super::CacheTopLevelDepsOptions;
use super::deps::Dep;
use super::deps::DepId;
use super::deps::DepKind;
use super::deps::DepManager;
use super::deps::DepManagerArgs;
use super::deps::PackageLatestVersion;
use crate::args::Flags;
use crate::args::OutdatedFlags;
use crate::factory::CliFactory;
use crate::file_fetcher::CreateCliFileFetcherOptions;
use crate::file_fetcher::create_cli_file_fetcher;
use crate::jsr::JsrFetchResolver;
use crate::npm::NpmFetchResolver;
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
struct OutdatedPackage {
kind: DepKind,
latest: String,
semver_compatible: String,
current: String,
name: StackString,
}
#[allow(clippy::print_stdout)]
fn print_outdated_table(packages: &[OutdatedPackage]) {
const HEADINGS: &[&str] = &["Package", "Current", "Update", "Latest"];
let mut longest_package = 0;
let mut longest_current = 0;
let mut longest_update = 0;
let mut longest_latest = 0;
for package in packages {
let name_len = package.kind.scheme().len() + 1 + package.name.len();
longest_package = longest_package.max(name_len);
longest_current = longest_current.max(package.current.len());
longest_update = longest_update.max(package.semver_compatible.len());
longest_latest = longest_latest.max(package.latest.len());
}
let package_column_width = longest_package.max(HEADINGS[0].len()) + 2;
let current_column_width = longest_current.max(HEADINGS[1].len()) + 2;
let update_column_width = longest_update.max(HEADINGS[2].len()) + 2;
let latest_column_width = longest_latest.max(HEADINGS[3].len()) + 2;
let package_fill = "─".repeat(package_column_width);
let current_fill = "─".repeat(current_column_width);
let update_fill = "─".repeat(update_column_width);
let latest_fill = "─".repeat(latest_column_width);
println!("┌{package_fill}┬{current_fill}┬{update_fill}┬{latest_fill}┐");
println!(
"│ {}{} │ {}{} │ {}{} │ {}{} │",
colors::intense_blue(HEADINGS[0]),
" ".repeat(package_column_width - 2 - HEADINGS[0].len()),
colors::intense_blue(HEADINGS[1]),
" ".repeat(current_column_width - 2 - HEADINGS[1].len()),
colors::intense_blue(HEADINGS[2]),
" ".repeat(update_column_width - 2 - HEADINGS[2].len()),
colors::intense_blue(HEADINGS[3]),
" ".repeat(latest_column_width - 2 - HEADINGS[3].len())
);
for package in packages {
println!("├{package_fill}┼{current_fill}┼{update_fill}┼{latest_fill}┤",);
print!(
"│ {:<package_column_width$} ",
format!("{}:{}", package.kind.scheme(), package.name),
package_column_width = package_column_width - 2
);
print!(
"│ {:<current_column_width$} ",
package.current,
current_column_width = current_column_width - 2
);
print!(
"│ {:<update_column_width$} ",
package.semver_compatible,
update_column_width = update_column_width - 2
);
println!(
"│ {:<latest_column_width$} │",
package.latest,
latest_column_width = latest_column_width - 2
);
}
println!("└{package_fill}┴{current_fill}┴{update_fill}┴{latest_fill}┘",);
}
fn print_suggestion(compatible: bool) {
log::info!("");
let (cmd, txt) = if compatible {
("", "compatible")
} else {
(" --latest", "available")
};
log::info!(
"{}",
color_print::cformat!(
"<p(245)>Run</> <u>deno update{}</> <p(245)>to update to the latest {} versions,</>\n<p(245)>or</> <u>deno outdated --help</> <p(245)>for more information.</>",
cmd,
txt,
)
);
}
fn print_outdated(
deps: &mut DepManager,
compatible: bool,
) -> Result<(), AnyError> {
let mut outdated = Vec::new();
let mut seen = std::collections::BTreeSet::new();
for (dep_id, resolved, latest_versions) in
deps.deps_with_resolved_latest_versions()
{
let dep = deps.get_dep(dep_id);
let Some(resolved) = resolved else { continue };
let latest = {
let preferred = if compatible {
&latest_versions.semver_compatible
} else {
&latest_versions.latest
};
if let Some(v) = preferred {
v
} else {
continue;
}
};
if latest > &resolved
&& seen.insert((dep.kind, dep.req.name.clone(), resolved.version.clone()))
{
outdated.push(OutdatedPackage {
kind: dep.kind,
name: dep.req.name.clone(),
current: resolved.version.to_string(),
latest: latest_versions
.latest
.map(|l| l.version.to_string())
.unwrap_or_default(),
semver_compatible: latest_versions
.semver_compatible
.map(|l| l.version.to_string())
.unwrap_or_default(),
})
}
}
if !outdated.is_empty() {
outdated.sort();
print_outdated_table(&outdated);
print_suggestion(compatible);
}
Ok(())
}
pub async fn outdated(
flags: Arc<Flags>,
update_flags: OutdatedFlags,
) -> Result<(), AnyError> {
let factory = CliFactory::from_flags(flags.clone());
let cli_options = factory.cli_options()?;
let workspace = cli_options.workspace();
let http_client = factory.http_client_provider();
let deps_http_cache = factory.global_http_cache()?;
let file_fetcher = create_cli_file_fetcher(
Default::default(),
GlobalOrLocalHttpCache::Global(deps_http_cache.clone()),
http_client.clone(),
factory.memory_files().clone(),
factory.sys(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::RespectHeaders,
download_log_level: log::Level::Trace,
progress_bar: None,
},
);
let file_fetcher = Arc::new(file_fetcher);
let npm_fetch_resolver = Arc::new(NpmFetchResolver::new(
file_fetcher.clone(),
factory.npmrc()?.clone(),
factory.npm_version_resolver()?.clone(),
));
let jsr_fetch_resolver = Arc::new(JsrFetchResolver::new(
file_fetcher.clone(),
factory.jsr_version_resolver()?.clone(),
));
if !cli_options.start_dir.has_deno_or_pkg_json() {
bail!(
"No deno.json or package.json in \"{}\".",
cli_options.initial_cwd().display(),
);
}
let args = dep_manager_args(
&factory,
npm_fetch_resolver.clone(),
jsr_fetch_resolver.clone(),
)
.await?;
let filter_set = filter::FilterSet::from_filter_strings(
update_flags.filters.iter().map(|s| s.as_str()),
)?;
let filter_fn = |alias: Option<&str>, req: &PackageReq, _: DepKind| {
if filter_set.is_empty() {
return true;
}
let name = alias.unwrap_or(&req.name);
filter_set.matches(name)
};
let mut deps = if update_flags.recursive {
super::deps::DepManager::from_workspace(workspace, filter_fn, args)?
} else {
super::deps::DepManager::from_workspace_dir(
&cli_options.start_dir,
filter_fn,
args,
)?
};
deps.resolve_versions().await?;
match update_flags.kind {
crate::args::OutdatedKind::Update {
latest,
interactive,
lockfile_only,
} => {
update(
deps,
latest,
&filter_set,
interactive,
flags,
CacheTopLevelDepsOptions { lockfile_only },
)
.await?;
}
crate::args::OutdatedKind::PrintOutdated { compatible } => {
print_outdated(&mut deps, compatible)?;
}
}
Ok(())
}
enum ChosenVersionReq {
Some(VersionReq),
None { latest_available: bool },
}
fn choose_new_version_req(
dep: &Dep,
resolved: Option<&PackageNv>,
latest_versions: &PackageLatestVersion,
update_to_latest: bool,
filter_set: &filter::FilterSet,
) -> ChosenVersionReq {
let explicit_version_req = filter_set
.matching_filter(dep.alias.as_deref().unwrap_or(&dep.req.name))
.version_spec()
.cloned();
if let Some(version_req) = explicit_version_req {
if let Some(resolved) = resolved {
// todo(nathanwhit): handle tag
if version_req.tag().is_none() && version_req.matches(&resolved.version) {
return ChosenVersionReq::None {
latest_available: false,
};
}
}
ChosenVersionReq::Some(version_req)
} else {
let Some(resolved) = resolved else {
return ChosenVersionReq::None {
latest_available: false,
};
};
let Some(preferred) = (if update_to_latest {
latest_versions.latest.as_ref()
} else {
latest_versions.semver_compatible.as_ref()
}) else {
return ChosenVersionReq::None {
latest_available: false,
};
};
// Detect the original operator to preserve it
let version_req_str = dep.req.version_req.to_string();
let operator = if version_req_str.starts_with('~') {
"~"
} else if version_req_str.starts_with('^') {
"^"
} else {
// Check if it's an exact version (no operator)
let exact = if let Some(range) = dep.req.version_req.range() {
range.0[0].start == range.0[0].end
} else {
false
};
if exact { "" } else { "^" }
};
let candidate_version_req = VersionReq::parse_from_specifier(
format!("{}{}", operator, preferred.version).as_str(),
)
.unwrap();
if preferred.version <= resolved.version
&& candidate_version_req == dep.req.version_req
{
return ChosenVersionReq::None {
latest_available: !update_to_latest
&& latest_versions
.latest
.as_ref()
.is_some_and(|nv| nv.version > resolved.version),
};
}
ChosenVersionReq::Some(candidate_version_req)
}
}
struct ToUpdate {
dep_id: DepId,
package_name: String,
current_version: Option<PackageNv>,
current_version_req: VersionReq,
new_version_req: VersionReq,
}
async fn update(
mut deps: DepManager,
update_to_latest: bool,
filter_set: &filter::FilterSet,
interactive: bool,
flags: Arc<Flags>,
cache_options: CacheTopLevelDepsOptions,
) -> Result<(), AnyError> {
let mut to_update = Vec::new();
let mut can_update_to_latest = false;
for (dep_id, resolved, latest_versions) in deps
.deps_with_resolved_latest_versions()
.into_iter()
.collect::<Vec<_>>()
{
let dep = deps.get_dep(dep_id);
let new_version_req = choose_new_version_req(
dep,
resolved.as_ref(),
&latest_versions,
update_to_latest,
filter_set,
);
let new_version_req = match new_version_req {
ChosenVersionReq::Some(version_req) => version_req,
ChosenVersionReq::None { latest_available } => {
can_update_to_latest = can_update_to_latest || latest_available;
continue;
}
};
to_update.push(ToUpdate {
dep_id,
package_name: format!("{}:{}", dep.kind.scheme(), dep.req.name),
current_version: resolved.clone(),
current_version_req: dep.req.version_req.clone(),
new_version_req: new_version_req.clone(),
});
}
if interactive && !to_update.is_empty() {
let selected = interactive::select_interactive(
to_update
.iter()
.map(|to_update: &ToUpdate| {
let dep = deps.get_dep(to_update.dep_id);
interactive::PackageInfo {
id: to_update.dep_id,
current_version: to_update
.current_version
.as_ref()
.map(|nv| nv.version.clone()),
name: dep.alias_or_name().into(),
kind: dep.kind,
new_version: to_update.new_version_req.clone(),
}
})
.collect(),
)?;
if let Some(selected) = selected {
to_update.retain(|to_update| selected.contains(&to_update.dep_id));
} else {
log::info!("Cancelled, not updating");
return Ok(());
}
}
if !to_update.is_empty() {
for pkg in &to_update {
deps.update_dep(pkg.dep_id, pkg.new_version_req.clone());
}
deps.commit_changes()?;
let factory = super::npm_install_after_modification(
flags.clone(),
Some(deps.jsr_fetch_resolver.clone()),
cache_options,
)
.await?;
let mut updated_to_versions = HashSet::new();
let args = dep_manager_args(
&factory,
deps.npm_fetch_resolver.clone(),
deps.jsr_fetch_resolver.clone(),
)
.await?;
let mut deps = deps.reloaded_after_modification(args);
deps.resolve_current_versions().await?;
for pkg in &to_update {
if deps.resolved_version(pkg.dep_id).is_some() {
updated_to_versions.insert((
pkg.package_name.clone(),
pkg.current_version_req.clone(),
pkg.new_version_req.clone(),
));
} else {
log::warn!(
"Failed to resolve version for new version requirement: {} -> {}",
pkg.package_name,
pkg.new_version_req
);
}
}
log::info!(
"Updated {} dependenc{}:",
updated_to_versions.len(),
if updated_to_versions.len() == 1 {
"y"
} else {
"ies"
}
);
let mut updated_to_versions =
updated_to_versions.into_iter().collect::<Vec<_>>();
updated_to_versions.sort_by(|(k, _, _), (k2, _, _)| k.cmp(k2));
let max_name = updated_to_versions
.iter()
.map(|(name, _, _)| name.len())
.max()
.unwrap_or(0);
let max_old = updated_to_versions
.iter()
.map(|(_, maybe_current, _)| maybe_current.to_string().len())
.max()
.unwrap_or(0);
let max_new = updated_to_versions
.iter()
.map(|(_, _, new_version)| new_version.to_string().len())
.max()
.unwrap_or(0);
for (package_name, current_version_req, new_version_req) in
updated_to_versions
{
let current_version = current_version_req.to_string();
log::info!(
" - {}{}{} {}{} -> {}{}",
colors::gray(package_name[0..4].to_string()),
&package_name[4..],
" ".repeat(max_name - package_name.len()),
" ".repeat(max_old - current_version.len()),
colors::gray(¤t_version),
" ".repeat(max_new - new_version_req.to_string().len()),
colors::green(&new_version_req),
);
}
} else {
let maybe_matching = if filter_set.is_empty() {
""
} else {
"matching "
};
if !update_to_latest && can_update_to_latest {
let note = deno_terminal::colors::intense_blue("note");
log::info!(
"All {maybe_matching}dependencies are at newest compatible versions.\n{note}: newer, incompatible versions are available.\n Run with `--latest` to update",
);
} else {
log::info!("All {maybe_matching}dependencies are up to date.");
}
}
Ok(())
}
async fn dep_manager_args(
factory: &CliFactory,
npm_fetch_resolver: Arc<NpmFetchResolver>,
jsr_fetch_resolver: Arc<JsrFetchResolver>,
) -> Result<DepManagerArgs, AnyError> {
Ok(DepManagerArgs {
module_load_preparer: factory.module_load_preparer().await?.clone(),
jsr_fetch_resolver,
npm_fetch_resolver,
npm_resolver: factory.npm_resolver().await?.clone(),
npm_installer: factory.npm_installer().await?.clone(),
npm_version_resolver: factory.npm_version_resolver()?.clone(),
progress_bar: factory.text_only_progress_bar().clone(),
permissions_container: factory.root_permissions_container()?.clone(),
main_module_graph_container: factory
.main_module_graph_container()
.await?
.clone(),
lockfile: factory.maybe_lockfile().await?.cloned(),
})
}
mod filter {
use deno_core::anyhow::Context;
use deno_core::anyhow::anyhow;
use deno_core::error::AnyError;
use deno_semver::VersionReq;
enum FilterKind {
Exclude,
Include,
}
pub struct Filter {
kind: FilterKind,
regex: regex::Regex,
version_spec: Option<VersionReq>,
}
fn pattern_to_regex(pattern: &str) -> Result<regex::Regex, AnyError> {
let escaped = regex::escape(pattern);
let unescaped_star = escaped.replace(r"\*", ".*");
Ok(regex::Regex::new(&format!("^{}$", unescaped_star))?)
}
impl Filter {
pub fn version_spec(&self) -> Option<&VersionReq> {
self.version_spec.as_ref()
}
pub fn from_str(input: &str) -> Result<Self, AnyError> {
let (kind, first_idx) = if input.starts_with('!') {
(FilterKind::Exclude, 1)
} else {
(FilterKind::Include, 0)
};
let s = &input[first_idx..];
let (pattern, version_spec) =
if let Some(scope_name) = s.strip_prefix('@') {
if let Some(idx) = scope_name.find('@') {
let (pattern, version_spec) = s.split_at(idx + 1);
(
pattern,
Some(
VersionReq::parse_from_specifier(
version_spec.trim_start_matches('@'),
)
.with_context(|| format!("Invalid filter \"{input}\""))?,
),
)
} else {
(s, None)
}
} else {
let mut parts = s.split('@');
let Some(pattern) = parts.next() else {
return Err(anyhow!("Invalid filter \"{input}\""));
};
(
pattern,
parts
.next()
.map(VersionReq::parse_from_specifier)
.transpose()
.with_context(|| format!("Invalid filter \"{input}\""))?,
)
};
Ok(Filter {
kind,
regex: pattern_to_regex(pattern)
.with_context(|| format!("Invalid filter \"{input}\""))?,
version_spec,
})
}
pub fn matches(&self, name: &str) -> bool {
self.regex.is_match(name)
}
}
pub struct FilterSet {
filters: Vec<Filter>,
has_exclude: bool,
has_include: bool,
}
impl FilterSet {
pub fn from_filter_strings<'a>(
filter_strings: impl IntoIterator<Item = &'a str>,
) -> Result<Self, AnyError> {
let filters = filter_strings
.into_iter()
.map(Filter::from_str)
.collect::<Result<Vec<_>, _>>()?;
let has_exclude = filters
.iter()
.any(|f| matches!(f.kind, FilterKind::Exclude));
let has_include = filters
.iter()
.any(|f| matches!(f.kind, FilterKind::Include));
Ok(FilterSet {
filters,
has_exclude,
has_include,
})
}
pub fn is_empty(&self) -> bool {
self.filters.is_empty()
}
pub fn matches(&self, name: &str) -> bool {
self.matching_filter(name).is_included()
}
pub fn matching_filter(&self, name: &str) -> MatchResult<'_> {
if self.filters.is_empty() {
return MatchResult::Included;
}
let mut matched = None;
for filter in &self.filters {
match filter.kind {
FilterKind::Include => {
if matched.is_none() && filter.matches(name) {
matched = Some(filter);
}
}
FilterKind::Exclude => {
if filter.matches(name) {
return MatchResult::Excluded;
}
}
}
}
if let Some(filter) = matched {
MatchResult::Matches(filter)
} else if self.has_exclude && !self.has_include {
MatchResult::Included
} else {
MatchResult::Excluded
}
}
}
pub enum MatchResult<'a> {
Matches(&'a Filter),
Included,
Excluded,
}
impl MatchResult<'_> {
pub fn version_spec(&self) -> Option<&VersionReq> {
match self {
MatchResult::Matches(filter) => filter.version_spec(),
_ => None,
}
}
pub fn is_included(&self) -> bool {
matches!(self, MatchResult::Included | MatchResult::Matches(_))
}
}
#[cfg(test)]
mod test {
fn matches_filters<'a, 'b>(
filters: impl IntoIterator<Item = &'a str>,
name: &str,
) -> bool {
let filters = super::FilterSet::from_filter_strings(filters).unwrap();
filters.matches(name)
}
fn version_spec(s: &str) -> deno_semver::VersionReq {
deno_semver::VersionReq::parse_from_specifier(s).unwrap()
}
#[test]
fn basic_glob() {
assert!(matches_filters(["foo*"], "foo"));
assert!(matches_filters(["foo*"], "foobar"));
assert!(!matches_filters(["foo*"], "barfoo"));
assert!(matches_filters(["*foo"], "foo"));
assert!(matches_filters(["*foo"], "barfoo"));
assert!(!matches_filters(["*foo"], "foobar"));
assert!(matches_filters(["@scope/foo*"], "@scope/foobar"));
}
#[test]
fn basic_glob_with_version() {
assert!(matches_filters(["foo*@1"], "foo",));
assert!(matches_filters(["foo*@1"], "foobar",));
assert!(matches_filters(["foo*@1"], "foo-bar",));
assert!(!matches_filters(["foo*@1"], "barfoo",));
assert!(matches_filters(["@scope/*@1"], "@scope/foo"));
}
#[test]
fn glob_exclude() {
assert!(!matches_filters(["!foo*"], "foo"));
assert!(!matches_filters(["!foo*"], "foobar"));
assert!(matches_filters(["!foo*"], "barfoo"));
assert!(!matches_filters(["!*foo"], "foo"));
assert!(!matches_filters(["!*foo"], "barfoo"));
assert!(matches_filters(["!*foo"], "foobar"));
assert!(!matches_filters(["!@scope/foo*"], "@scope/foobar"));
}
#[test]
fn multiple_globs() {
assert!(matches_filters(["foo*", "bar*"], "foo"));
assert!(matches_filters(["foo*", "bar*"], "bar"));
assert!(!matches_filters(["foo*", "bar*"], "baz"));
assert!(matches_filters(["foo*", "!bar*"], "foo"));
assert!(!matches_filters(["foo*", "!bar*"], "bar"));
assert!(matches_filters(["foo*", "!bar*"], "foobar"));
assert!(!matches_filters(["foo*", "!*bar"], "foobar"));
assert!(!matches_filters(["foo*", "!*bar"], "baz"));
let filters =
super::FilterSet::from_filter_strings(["foo*@1", "bar*@2"]).unwrap();
assert_eq!(
filters.matching_filter("foo").version_spec().cloned(),
Some(version_spec("1"))
);
assert_eq!(
filters.matching_filter("bar").version_spec().cloned(),
Some(version_spec("2"))
);
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/pm/outdated/interactive.rs | cli/tools/pm/outdated/interactive.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt::Write as _;
use console_static_text::TextItem;
use deno_core::anyhow;
use deno_semver::Version;
use deno_semver::VersionReq;
use deno_terminal::colors;
use crate::tools::pm::deps::DepId;
use crate::tools::pm::deps::DepKind;
use crate::tools::pm::interactive_picker;
#[derive(Debug)]
pub struct PackageInfo {
pub id: DepId,
pub current_version: Option<Version>,
pub new_version: VersionReq,
pub name: String,
pub kind: DepKind,
}
#[derive(Debug)]
struct FormattedPackageInfo {
dep_ids: Vec<DepId>,
current_version_string: Option<String>,
new_version_highlighted: String,
formatted_name: String,
formatted_name_len: usize,
name: String,
}
#[derive(Debug)]
struct State {
packages: Vec<FormattedPackageInfo>,
name_width: usize,
current_width: usize,
}
impl From<PackageInfo> for FormattedPackageInfo {
fn from(package: PackageInfo) -> Self {
let new_version_string =
package.new_version.version_text().trim_start_matches('^');
let new_version_highlighted = match (
&package.current_version,
Version::parse_standard(new_version_string),
) {
(Some(current_version), Ok(new_version)) => {
highlight_new_version(current_version, &new_version)
}
_ => new_version_string.to_string(),
};
FormattedPackageInfo {
dep_ids: vec![package.id],
current_version_string: package
.current_version
.as_ref()
.map(|v| v.to_string()),
new_version_highlighted,
formatted_name: format!(
"{}{}",
colors::gray(format!("{}:", package.kind.scheme())),
package.name
),
formatted_name_len: package.kind.scheme().len() + 1 + package.name.len(),
name: package.name,
}
}
}
impl State {
fn new(packages: Vec<PackageInfo>) -> anyhow::Result<Self> {
let mut deduped_packages: HashMap<
(String, Option<Version>, VersionReq),
FormattedPackageInfo,
> = HashMap::with_capacity(packages.len());
for package in packages {
match deduped_packages.entry((
package.name.clone(),
package.current_version.clone(),
package.new_version.clone(),
)) {
std::collections::hash_map::Entry::Occupied(mut occupied_entry) => {
occupied_entry.get_mut().dep_ids.push(package.id)
}
std::collections::hash_map::Entry::Vacant(vacant_entry) => {
vacant_entry.insert(FormattedPackageInfo::from(package));
}
}
}
let mut packages: Vec<_> = deduped_packages.into_values().collect();
packages.sort_by(|a, b| a.name.cmp(&b.name));
let name_width = packages
.iter()
.map(|p| p.formatted_name_len)
.max()
.unwrap_or_default();
let current_width = packages
.iter()
.map(|p| {
p.current_version_string
.as_ref()
.map(|s| s.len())
.unwrap_or_default()
})
.max()
.unwrap_or_default();
Ok(Self {
packages,
name_width,
current_width,
})
}
fn instructions_line() -> &'static str {
"Select which packages to update (<space> to select, ↑/↓/j/k to navigate, a to select all, i to invert selection, enter to accept, <Ctrl-c> to cancel)"
}
}
enum VersionDifference {
Major,
Minor,
Patch,
Prerelease,
}
fn version_diff(a: &Version, b: &Version) -> VersionDifference {
if a.major != b.major {
VersionDifference::Major
} else if a.minor != b.minor {
VersionDifference::Minor
} else if a.patch != b.patch {
VersionDifference::Patch
} else {
VersionDifference::Prerelease
}
}
fn highlight_new_version(current: &Version, new: &Version) -> String {
let diff = version_diff(current, new);
let new_pre = if new.pre.is_empty() {
String::new()
} else {
let mut s = String::new();
s.push('-');
for p in &new.pre {
s.push_str(p);
}
s
};
match diff {
VersionDifference::Major => format!(
"{}.{}.{}{}",
colors::red_bold(new.major),
colors::red_bold(new.minor),
colors::red_bold(new.patch),
colors::red_bold(new_pre)
),
VersionDifference::Minor => format!(
"{}.{}.{}{}",
new.major,
colors::yellow_bold(new.minor),
colors::yellow_bold(new.patch),
colors::yellow_bold(new_pre)
),
VersionDifference::Patch => format!(
"{}.{}.{}{}",
new.major,
new.minor,
colors::green_bold(new.patch),
colors::green_bold(new_pre)
),
VersionDifference::Prerelease => format!(
"{}.{}.{}{}",
new.major,
new.minor,
new.patch,
colors::red_bold(new_pre)
),
}
}
fn render_package(
package: &FormattedPackageInfo,
name_width: usize,
current_width: usize,
is_selected: bool,
is_checked: bool,
) -> anyhow::Result<TextItem<'static>> {
let mut line = String::new();
let f = &mut line;
write!(
f,
"{} {} ",
if is_selected {
colors::intense_blue("❯").to_string()
} else {
" ".to_string()
},
if is_checked { "●" } else { "○" }
)?;
let name_pad = " ".repeat(name_width + 2 - package.formatted_name_len);
write!(
f,
"{formatted_name}{name_pad} {:<current_width$} -> {}",
package
.current_version_string
.as_deref()
.unwrap_or_default(),
&package.new_version_highlighted,
name_pad = name_pad,
formatted_name = package.formatted_name,
current_width = current_width
)?;
Ok(TextItem::with_hanging_indent_owned(line, 1))
}
pub fn select_interactive(
packages: Vec<PackageInfo>,
) -> anyhow::Result<Option<HashSet<DepId>>> {
let state = State::new(packages)?;
let name_width = state.name_width;
let current_width = state.current_width;
let packages = state.packages;
let selected = interactive_picker::select_items(
State::instructions_line(),
&packages,
HashSet::new(),
|_idx, is_selected, is_checked, package| {
render_package(
package,
name_width,
current_width,
is_selected,
is_checked,
)
},
)?;
Ok(selected.map(|indices| {
indices
.into_iter()
.flat_map(|idx| &packages[idx].dep_ids)
.copied()
.collect()
}))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/bundle/html.rs | cli/tools/bundle/html.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use capacity_builder::StringBuilder;
use deno_core::anyhow;
use deno_core::error::AnyError;
use lol_html::element;
use lol_html::html_content::ContentType as LolContentType;
use crate::tools::bundle::OutputFile;
#[derive(Debug, Clone)]
pub struct Script {
pub src: Option<String>,
pub is_async: bool,
pub is_module: bool,
pub resolved_path: Option<PathBuf>,
}
struct Attr<'a> {
name: Cow<'static, str>,
value: Option<Cow<'a, str>>,
}
impl<'a> Attr<'a> {
fn new(
name: impl Into<Cow<'static, str>>,
value: Option<Cow<'a, str>>,
) -> Self {
Self {
name: name.into(),
value,
}
}
fn write_out<'s>(&'s self, out: &mut StringBuilder<'s>)
where
'a: 's,
{
out.append(&self.name);
if let Some(value) = &self.value {
out.append("=\"");
out.append(value);
out.append('"');
}
}
}
fn write_attr_list<'a, 's>(attrs: &'s [Attr<'a>], out: &mut StringBuilder<'s>)
where
'a: 's,
{
if attrs.is_empty() {
return;
}
out.append(' ');
for item in attrs.iter().take(attrs.len() - 1) {
item.write_out(out);
out.append(' ');
}
attrs.last().unwrap().write_out(out);
}
impl Script {
pub fn to_element_string(&self) -> String {
let mut attrs = Vec::new();
if let Some(src) = &self.src {
attrs.push(Attr::new("src", Some(Cow::Borrowed(src))));
}
if self.is_async {
attrs.push(Attr::new("async", None));
}
if self.is_module {
attrs.push(Attr::new("type", Some("module".into())));
}
attrs.push(Attr::new("crossorigin", None));
StringBuilder::build(|out| {
out.append("<script");
write_attr_list(&attrs, out);
out.append("></script>");
})
.unwrap()
}
}
struct NoOutput;
impl lol_html::OutputSink for NoOutput {
fn handle_chunk(&mut self, _: &[u8]) {}
}
fn collect_scripts(doc: &str) -> Result<Vec<Script>, AnyError> {
let mut scripts = Vec::new();
let mut rewriter = lol_html::HtmlRewriter::new(
lol_html::Settings {
element_content_handlers: vec![element!("script[src]", |el| {
let is_ignored =
el.has_attribute("deno-ignore") || el.has_attribute("vite-ignore");
if is_ignored {
return Ok(());
}
let typ = el.get_attribute("type");
let (Some("module") | None) = typ.as_deref() else {
return Ok(());
};
let src = el.get_attribute("src").unwrap();
let is_async = el.has_attribute("async");
let is_module = matches!(typ.as_deref(), Some("module"));
scripts.push(Script {
src: Some(src),
is_async,
is_module,
resolved_path: None,
});
Ok(())
})],
..lol_html::Settings::new()
},
NoOutput,
);
rewriter.write(doc.as_bytes())?;
rewriter.end()?;
Ok(scripts)
}
#[derive(Debug, Clone)]
pub struct HtmlEntrypoint {
pub path: PathBuf,
pub canonical_path: PathBuf,
pub scripts: Vec<Script>,
pub temp_module: String,
pub contents: String,
pub entry_name: String,
pub virtual_module_path: PathBuf,
}
const VIRTUAL_ENTRY_SUFFIX: &str = ".deno-bundle-html.entry";
// Helper to create a filesystem-friendly name based on a path
fn sanitize_entry_name(cwd: &Path, path: &Path) -> String {
let rel =
pathdiff::diff_paths(path, cwd).unwrap_or_else(|| path.to_path_buf());
let stem = rel
.with_extension("")
.to_string_lossy()
.replace(['\\', '/', ':'], "_");
if stem.is_empty() {
"entry".to_string()
} else {
stem
}
}
fn parse_html_entrypoint(
cwd: &Path,
path: &Path,
canonical_path: PathBuf,
contents: String,
) -> anyhow::Result<HtmlEntrypoint> {
let mut scripts = collect_scripts(&contents)?;
let mut temp_module = String::new();
for script in &mut scripts {
if let Some(src) = &mut script.src {
let src = src.trim_start_matches('/');
let path = path.parent().unwrap_or(Path::new("")).join(src);
let url = deno_path_util::url_from_file_path(&path)?;
temp_module.push_str(&format!("import \"{}\";\n", url));
script.resolved_path = Some(path);
}
}
let entry_name = sanitize_entry_name(cwd, path);
let virtual_module_path = path
.parent()
.unwrap_or(Path::new(""))
.join(format!("{}{}.js", entry_name, VIRTUAL_ENTRY_SUFFIX));
Ok(HtmlEntrypoint {
path: path.to_path_buf(),
canonical_path,
scripts,
temp_module,
contents,
entry_name,
virtual_module_path,
})
}
pub fn load_html_entrypoint(
cwd: &Path,
path: &Path,
) -> anyhow::Result<HtmlEntrypoint> {
let contents = std::fs::read_to_string(path)?;
let canonical_path = crate::util::fs::canonicalize_path(path)?;
parse_html_entrypoint(cwd, path, canonical_path, contents)
}
#[derive(Debug, Clone)]
pub struct ParsedOutput {
path: PathBuf,
index: usize,
hash: String,
}
#[derive(Debug)]
pub struct HtmlOutputFiles<'a, 'f> {
output_files: &'f mut Vec<OutputFile<'a>>,
index: HashMap<String, ParsedOutput>,
}
impl<'a, 'f> HtmlOutputFiles<'a, 'f> {
pub fn new(output_files: &'f mut Vec<OutputFile<'a>>) -> Self {
let re =
lazy_regex::regex!(r"(^.+\.deno-bundle-html.entry)-([^.]+)(\..+)$");
let mut index = std::collections::HashMap::new();
for (i, f) in output_files.iter().enumerate() {
if let Some(name) = f.path.file_name().map(|s| s.to_string_lossy()) {
let Some(captures) = re.captures(&name) else {
continue;
};
let mut entry_name = captures.get(1).unwrap().as_str().to_string();
let ext = captures.get(3).unwrap().as_str();
entry_name.push_str(ext);
index.insert(
entry_name,
ParsedOutput {
path: f.path.clone(),
index: i,
hash: captures.get(2).unwrap().as_str().to_string(),
},
);
}
}
Self {
output_files,
index,
}
}
pub fn get_and_update_path(
&mut self,
name: &str,
f: impl FnOnce(PathBuf, &ParsedOutput) -> PathBuf,
) -> Option<PathBuf> {
let parsed_output = self.index.get_mut(name)?;
let new_path = f(parsed_output.path.clone(), parsed_output);
parsed_output.path = new_path.clone();
self.output_files[parsed_output.index].path = new_path.clone();
Some(new_path)
}
}
impl HtmlEntrypoint {
fn original_entry_name(&self) -> String {
self.path.file_stem().unwrap().to_string_lossy().to_string()
}
pub fn patch_html_with_response<'a>(
self,
_cwd: &Path,
outdir: &Path,
html_output_files: &mut HtmlOutputFiles<'a, '_>,
) -> anyhow::Result<()> {
let original_entry_name = self.original_entry_name();
if self.scripts.is_empty() {
let html_out_path =
// TODO(nathanwhit): not really correct
{ outdir.join(format!("{}.html", &original_entry_name)) };
html_output_files.output_files.push(OutputFile {
path: html_out_path,
contents: Cow::Owned(self.contents.into_bytes()),
hash: None,
});
return Ok(());
}
let entry_name = format!("{}{}", self.entry_name, VIRTUAL_ENTRY_SUFFIX);
let js_entry_name = format!("{}.js", entry_name);
let mut js_out_no_hash = None;
let js_out = html_output_files
.get_and_update_path(&js_entry_name, |p, f| {
let p = p.to_string_lossy();
js_out_no_hash = Some(
p.replace(entry_name.as_str(), &original_entry_name)
.replace(&format!("-{}", f.hash), "")
.into(),
);
p.replace(entry_name.as_str(), &original_entry_name).into()
})
.ok_or_else(|| {
anyhow::anyhow!(
"failed to locate output for HTML entry '{}'; {js_entry_name}",
self.entry_name
)
})?;
let html_out_path = js_out_no_hash
.unwrap_or_else(|| js_out.clone())
.with_extension("html");
let css_entry_name = format!("{}.css", entry_name);
let css_out =
html_output_files.get_and_update_path(&css_entry_name, |p, _| {
p.to_string_lossy()
.replace(entry_name.as_str(), &original_entry_name)
.into()
});
let script_src = {
let base = html_out_path.parent().unwrap_or(outdir);
let mut rel = pathdiff::diff_paths(&js_out, base)
.unwrap_or_else(|| js_out.clone())
.to_string_lossy()
.into_owned();
if std::path::MAIN_SEPARATOR != '/' {
rel = rel.replace('\\', "/");
}
rel
};
let any_async = self.scripts.iter().any(|s| s.is_async);
let any_module = self.scripts.iter().any(|s| s.is_module);
let to_inject = Script {
src: Some(
if !script_src.starts_with(".") && !script_src.starts_with("/") {
format!("./{}", script_src)
} else {
script_src
},
),
is_async: any_async,
is_module: any_module,
resolved_path: None,
};
let css_href = css_out.as_ref().map(|p| {
let base = html_out_path.parent().unwrap_or(outdir);
let mut rel = pathdiff::diff_paths(p, base)
.unwrap_or_else(|| p.clone())
.to_string_lossy()
.into_owned();
if std::path::MAIN_SEPARATOR != '/' {
rel = rel.replace('\\', "/");
}
if !rel.starts_with(".") && !rel.starts_with("/") {
rel = format!("./{}", rel);
}
rel
});
let patched = inject_scripts_and_css(
&self.contents,
to_inject,
&self.scripts,
css_href,
)?;
html_output_files.output_files.push(OutputFile {
path: html_out_path,
contents: Cow::Owned(patched.into_bytes()),
hash: None,
});
Ok(())
}
}
fn make_link_str(attrs: &[Attr]) -> String {
StringBuilder::build(|out| {
out.append("<link");
write_attr_list(attrs, out);
out.append(">");
})
.unwrap()
}
fn stylesheet_str(path: &str) -> String {
let attrs = &[
Attr::new("rel", Some("stylesheet".into())),
Attr::new("crossorigin", None),
Attr::new("href", Some(Cow::Borrowed(path))),
];
make_link_str(attrs)
}
fn inject_scripts_and_css(
input: &str,
to_inject: Script,
to_remove: &[Script],
css_to_inject_path: Option<String>,
) -> anyhow::Result<String> {
let did_inject = Cell::new(false);
let rewritten = lol_html::rewrite_str(
input,
lol_html::Settings {
element_content_handlers: vec![
element!("head", |el| {
let already_done = did_inject.replace(true);
if already_done {
return Ok(());
}
el.append(&to_inject.to_element_string(), LolContentType::Html);
if let Some(css_to_inject_path) = &css_to_inject_path {
let link = stylesheet_str(css_to_inject_path);
el.append(&link, LolContentType::Html);
}
Ok(())
}),
element!("script[src]", |el| {
let src = el.get_attribute("src").unwrap();
if to_remove
.iter()
.any(|script| script.src.as_deref() == Some(src.as_str()))
{
el.remove();
}
Ok(())
}),
],
document_content_handlers: vec![lol_html::end!(|end| {
if !did_inject.replace(true) {
let script = to_inject.to_element_string();
let link = css_to_inject_path
.as_ref()
.map(|p| stylesheet_str(p))
.unwrap_or_default();
end.append(
&format!("<head>{script}{link}</head>"),
LolContentType::Html,
);
}
Ok(())
})],
..lol_html::Settings::new()
},
)?;
Ok(rewritten)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/bundle/externals.rs | cli/tools/bundle/externals.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashSet;
use std::path::Path;
#[derive(Debug)]
struct Pattern {
prefix: String,
suffix: String,
}
impl Pattern {
fn new_with_wildcard(pattern: &str, wildcard_index: usize) -> Self {
let prefix = pattern[..wildcard_index].to_string();
let suffix = pattern[wildcard_index + 1..].to_string();
Self { prefix, suffix }
}
fn new_prefix(pattern: String) -> Self {
Self {
prefix: pattern,
suffix: String::new(),
}
}
}
#[derive(Debug)]
struct Patterns {
patterns: Vec<Pattern>,
exact: HashSet<String>,
}
impl Patterns {
fn is_match(&self, path: &str) -> bool {
if self.exact.contains(path) {
return true;
}
for pattern in &self.patterns {
if path.starts_with(&pattern.prefix) && path.ends_with(&pattern.suffix) {
return true;
}
}
false
}
}
#[derive(Debug)]
pub struct ExternalsMatcher {
pre_resolve: Patterns,
post_resolve: Patterns,
}
fn is_package_path(path: &str) -> bool {
!path.starts_with('/')
&& !path.starts_with("./")
&& !path.starts_with("../")
&& path != "."
&& path != ".."
}
fn to_absolute_path(path: &str, cwd: &Path) -> String {
if path.starts_with('/') {
path.to_string()
} else {
let path = cwd.join(path);
deno_path_util::normalize_path(Cow::Owned(path))
.to_string_lossy()
.into_owned()
}
}
impl ExternalsMatcher {
/// A set of patterns indicating files to mark as external.
///
/// For instance given, `--external="*.node" --external="*.wasm"`, the matcher will match
/// any path that ends with `.node` or `.wasm`.
pub fn new(externals: &[String], cwd: &Path) -> Self {
let mut pre_resolve = Patterns {
patterns: vec![],
exact: HashSet::new(),
};
let mut post_resolve = Patterns {
patterns: vec![],
exact: HashSet::new(),
};
for external in externals {
let wildcard = external.find("*");
if let Some(wildcard_index) = wildcard {
if external[wildcard_index + 1..].contains('*') {
log::error!("Externals must not contain multiple wildcards");
continue;
}
pre_resolve
.patterns
.push(Pattern::new_with_wildcard(external, wildcard_index));
if !is_package_path(external) {
let normalized = to_absolute_path(external, cwd);
if let Some(index) = normalized.find('*') {
post_resolve
.patterns
.push(Pattern::new_with_wildcard(&normalized, index));
}
}
} else {
pre_resolve.exact.insert(external.to_string());
if is_package_path(external) {
pre_resolve
.patterns
.push(Pattern::new_prefix([external, "/"].join("")));
} else {
let normalized = to_absolute_path(external, cwd);
post_resolve.exact.insert(normalized);
}
}
}
Self {
pre_resolve,
post_resolve,
}
}
pub fn is_pre_resolve_match(&self, path: &str) -> bool {
self.pre_resolve.is_match(path)
}
pub fn is_post_resolve_match(&self, path: &str) -> bool {
self.post_resolve.is_match(path)
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::print_stderr)]
use std::path::Path;
use super::ExternalsMatcher;
struct Matches {
pre_resolve: Vec<String>,
post_resolve: Vec<String>,
}
fn matches_all<'a, S: AsRef<str>>(
patterns: impl IntoIterator<Item = S>,
matches: Matches,
no_match: impl IntoIterator<Item = &'a str>,
) -> bool {
let patterns = patterns
.into_iter()
.map(|p| p.as_ref().to_string())
.collect::<Vec<_>>();
let cwd = std::env::current_dir().unwrap();
let matcher = ExternalsMatcher::new(&patterns, &cwd);
for path in matches.pre_resolve {
if !matcher.is_pre_resolve_match(&path) {
eprintln!("failed to match pre resolve: {}", path);
return false;
}
}
for path in matches.post_resolve {
if !matcher.is_post_resolve_match(&path) {
eprintln!("failed to match post resolve: {}", path);
return false;
}
}
for path in no_match {
if matcher.is_pre_resolve_match(path) {
eprintln!("matched pre resolve when it should not: {}", path);
return false;
}
if matcher.is_post_resolve_match(path) {
eprintln!("matched post resolve when it should not: {}", path);
return false;
}
}
true
}
fn s<S: AsRef<str>>(s: impl IntoIterator<Item = S>) -> Vec<String> {
s.into_iter().map(|p| p.as_ref().to_string()).collect()
}
fn path_str(path: impl AsRef<Path>) -> String {
path.as_ref().to_string_lossy().into_owned()
}
#[test]
fn matches_package_path() {
assert!(matches_all(
["chalk"],
Matches {
pre_resolve: s(["chalk", "chalk/foo"]),
post_resolve: vec![],
},
["other/chalk", "./chalk/foo.ts", "./chalk"]
));
assert!(matches_all(
["@std/fs"],
Matches {
pre_resolve: s(["@std/fs", "@std/fs/foo"]),
post_resolve: vec![],
},
["other/@std/fs", "./@std/fs/foo.ts", "./@std/fs"]
));
}
#[test]
fn matches_path() {
assert!(matches_all(
["/node_modules/fo"],
Matches {
pre_resolve: s(["/node_modules/fo"]),
post_resolve: s(["/node_modules/fo"]),
},
["/node_modules/foo"]
));
let cwd = std::env::current_dir().unwrap();
assert!(matches_all(
["./foo"],
Matches {
pre_resolve: s(["./foo"]),
post_resolve: s([path_str(cwd.join("foo"))]),
},
["other/foo", "./foo.ts", "./foo/bar", "thing/./foo"]
));
}
#[test]
fn matches_wildcard() {
assert!(matches_all(
["*.node"],
Matches {
pre_resolve: s(["foo.node", "foo/bar.node"]),
post_resolve: vec![],
},
["foo.ts", "./foo.node.ts", "./foo/bar.node.ts"]
));
assert!(matches_all(
["@std/*"],
Matches {
pre_resolve: s(["@std/fs", "@std/fs/foo"]),
post_resolve: vec![],
},
["other/@std/fs", "./@std/fs/foo.ts", "./@std/fs"]
));
let cwd = std::env::current_dir().unwrap();
assert!(matches_all(
["./foo/*"],
Matches {
pre_resolve: s(["./foo/bar", "./foo/baz"]),
post_resolve: vec![
path_str(cwd.join("foo").join("bar")),
path_str(cwd.join("foo").join("baz")),
],
},
["other/foo/bar", "./bar/foo", "./bar/./foo/bar"]
));
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/bundle/transform.rs | cli/tools/bundle/transform.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_ast::swc;
use deno_ast::swc::ast::Bool;
use deno_ast::swc::ecma_visit::VisitMut;
use deno_ast::swc::ecma_visit::VisitMutWith;
pub struct BundleImportMetaMainTransform {
is_entrypoint: bool,
}
impl BundleImportMetaMainTransform {
pub fn new(is_entrypoint: bool) -> Self {
Self { is_entrypoint }
}
}
impl VisitMut for BundleImportMetaMainTransform {
fn visit_mut_expr(&mut self, node: &mut swc::ast::Expr) {
// if entrypoint to bundle:
// import.meta.main => import.meta.main
// else:
// import.meta.main => false
if let swc::ast::Expr::Member(member) = node
&& let swc::ast::Expr::MetaProp(meta_prop) = &mut *member.obj
&& meta_prop.kind == swc::ast::MetaPropKind::ImportMeta
&& member.prop.is_ident_with("main")
{
if self.is_entrypoint {
return;
} else {
let span = member.span;
*node =
swc::ast::Expr::Lit(swc::ast::Lit::Bool(Bool { span, value: false }));
return;
}
}
node.visit_mut_children_with(self);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/bundle/esbuild.rs | cli/tools/bundle/esbuild.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::PathBuf;
use std::sync::Arc;
use deno_core::anyhow;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_npm::npm_rc::ResolvedNpmRc;
use deno_npm::registry::NpmRegistryApi;
use deno_npm_cache::TarballCache;
use deno_resolver::workspace::WorkspaceNpmLinkPackagesRc;
use deno_semver::package::PackageNv;
use crate::cache::DenoDir;
use crate::npm::CliNpmCache;
use crate::npm::CliNpmCacheHttpClient;
use crate::npm::CliNpmRegistryInfoProvider;
use crate::sys::CliSys;
pub const ESBUILD_VERSION: &str = "0.25.5";
fn esbuild_platform() -> &'static str {
match (std::env::consts::ARCH, std::env::consts::OS) {
("x86_64", "linux") => "linux-x64",
("aarch64", "linux") => "linux-arm64",
("x86_64", "macos" | "apple") => "darwin-x64",
("aarch64", "macos" | "apple") => "darwin-arm64",
("x86_64", "windows") => "win32-x64",
("aarch64", "windows") => "win32-arm64",
("x86_64", "android") => "android-x64",
("aarch64", "android") => "android-arm64",
_ => panic!(
"Unsupported platform: {} {}",
std::env::consts::ARCH,
std::env::consts::OS
),
}
}
pub async fn ensure_esbuild(
deno_dir: &DenoDir,
npmrc: &ResolvedNpmRc,
api: &Arc<CliNpmRegistryInfoProvider>,
workspace_link_packages: &WorkspaceNpmLinkPackagesRc,
tarball_cache: &Arc<TarballCache<CliNpmCacheHttpClient, CliSys>>,
npm_cache: &CliNpmCache,
) -> Result<PathBuf, AnyError> {
let target = esbuild_platform();
let mut esbuild_path = deno_dir
.dl_folder_path()
.join(format!("esbuild-{}", ESBUILD_VERSION))
.join(format!("esbuild-{}", target));
if cfg!(windows) {
esbuild_path.set_extension("exe");
}
if esbuild_path.exists() {
return Ok(esbuild_path);
}
let pkg_name = format!("@esbuild/{}", target);
let nv =
PackageNv::from_str(&format!("{}@{}", pkg_name, ESBUILD_VERSION)).unwrap();
let mut info = api.package_info(&pkg_name).await?;
let version_info = match info.version_info(&nv, &workspace_link_packages.0) {
Ok(version_info) => version_info,
Err(_) => {
api.mark_force_reload();
info = api.package_info(&pkg_name).await?;
info.version_info(&nv, &workspace_link_packages.0)?
}
};
if let Some(dist) = &version_info.dist {
let registry_url = npmrc.get_registry_url(&nv.name);
let package_folder =
npm_cache.package_folder_for_nv_and_url(&nv, registry_url);
let existed = package_folder.exists();
if !existed {
tarball_cache
.ensure_package(&nv, dist)
.await
.with_context(|| {
format!(
"failed to download esbuild package tarball {} from {}",
nv, dist.tarball
)
})?;
}
let path = if cfg!(windows) {
package_folder.join("esbuild.exe")
} else {
package_folder.join("bin").join("esbuild")
};
std::fs::create_dir_all(esbuild_path.parent().unwrap()).with_context(
|| {
format!(
"failed to create directory {}",
esbuild_path.parent().unwrap().display()
)
},
)?;
std::fs::copy(&path, &esbuild_path).with_context(|| {
format!(
"failed to copy esbuild binary from {} to {}",
path.display(),
esbuild_path.display()
)
})?;
if !existed {
let _ = std::fs::remove_dir_all(&package_folder).inspect_err(|e| {
log::warn!(
"failed to remove directory {}: {}",
package_folder.display(),
e
);
});
}
Ok(esbuild_path)
} else {
anyhow::bail!(
"could not get fetch esbuild binary; download it manually and copy it to {}",
esbuild_path.display()
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/bundle/mod.rs | cli/tools/bundle/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod esbuild;
mod externals;
mod html;
mod provider;
mod transform;
use std::borrow::Cow;
use std::cell::RefCell;
use std::ops::Deref;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::LazyLock;
use std::time::Duration;
use deno_ast::EmitOptions;
use deno_ast::MediaType;
use deno_ast::ModuleKind;
use deno_ast::ModuleSpecifier;
use deno_bundle_runtime::BundleFormat;
use deno_bundle_runtime::BundlePlatform;
use deno_bundle_runtime::PackageHandling;
use deno_bundle_runtime::SourceMapType;
use deno_config::workspace::TsTypeLib;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt as _;
use deno_core::parking_lot::Mutex;
use deno_core::parking_lot::RwLock;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_error::JsError;
use deno_graph::ModuleErrorKind;
use deno_graph::Position;
use deno_path_util::resolve_url_or_path;
use deno_resolver::cache::ParsedSourceCache;
use deno_resolver::graph::ResolveWithGraphError;
use deno_resolver::graph::ResolveWithGraphOptions;
use deno_resolver::loader::LoadCodeSourceError;
use deno_resolver::loader::LoadCodeSourceErrorKind;
use deno_resolver::loader::LoadPreparedModuleErrorKind;
use deno_resolver::loader::LoadedModuleOrAsset;
use deno_resolver::loader::LoadedModuleSource;
use deno_resolver::loader::RequestedModuleType;
use deno_resolver::npm::managed::ResolvePkgFolderFromDenoModuleError;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_semver::npm::NpmPackageReqReference;
use esbuild_client::EsbuildFlagsBuilder;
use esbuild_client::EsbuildService;
use esbuild_client::protocol;
use esbuild_client::protocol::BuildResponse;
use indexmap::IndexMap;
use indexmap::IndexSet;
use node_resolver::NodeResolutionKind;
use node_resolver::ResolutionMode;
use node_resolver::errors::PackageNotFoundError;
use node_resolver::errors::PackageSubpathResolveError;
pub use provider::CliBundleProvider;
use sys_traits::EnvCurrentDir;
use crate::args::BundleFlags;
use crate::args::Flags;
use crate::factory::CliFactory;
use crate::file_fetcher::CliFileFetcher;
use crate::graph_container::MainModuleGraphContainer;
use crate::graph_container::ModuleGraphContainer;
use crate::graph_container::ModuleGraphUpdatePermit;
use crate::module_loader::CliDenoResolverModuleLoader;
use crate::module_loader::CliEmitter;
use crate::module_loader::ModuleLoadPreparer;
use crate::module_loader::PrepareModuleLoadOptions;
use crate::node::CliNodeResolver;
use crate::npm::CliNpmResolver;
use crate::resolver::CliCjsTracker;
use crate::resolver::CliResolver;
use crate::sys::CliSys;
use crate::tools::bundle::externals::ExternalsMatcher;
use crate::util::file_watcher::WatcherRestartMode;
static DISABLE_HACK: LazyLock<bool> =
LazyLock::new(|| std::env::var("NO_DENO_BUNDLE_HACK").is_err());
pub async fn prepare_inputs(
resolver: &CliResolver,
sys: CliSys,
npm_resolver: &CliNpmResolver,
node_resolver: &CliNodeResolver,
init_cwd: &Path,
bundle_flags: &BundleFlags,
plugin_handler: &mut DenoPluginHandler,
) -> Result<BundlerInput, AnyError> {
let resolved_entrypoints =
resolve_entrypoints(resolver, init_cwd, &bundle_flags.entrypoints)?;
// Partition into HTML and non-HTML entrypoints
let mut html_paths = Vec::new();
let mut script_entry_urls = Vec::new();
for url in &resolved_entrypoints {
if url.as_str().to_lowercase().ends_with(".html") {
html_paths.push(url.to_file_path().unwrap());
} else {
script_entry_urls.push(url.clone());
}
}
if html_paths.is_empty() {
plugin_handler
.prepare_module_load(&resolved_entrypoints)
.await?;
let roots =
resolve_roots(resolved_entrypoints, sys, npm_resolver, node_resolver);
plugin_handler.prepare_module_load(&roots).await?;
let graph = plugin_handler.module_graph_container.graph();
let mut fully_resolved_roots = IndexSet::with_capacity(graph.roots.len());
for root in &graph.roots {
fully_resolved_roots.insert(graph.resolve(root).clone());
}
*plugin_handler.resolved_roots.write() = Arc::new(fully_resolved_roots);
Ok(BundlerInput::Entrypoints(
roots.into_iter().map(|e| ("".into(), e.into())).collect(),
))
} else {
// require an outdir when any HTML is present
if bundle_flags.output_dir.is_none() {
return Err(deno_core::anyhow::anyhow!(
"--outdir is required when bundling HTML entrypoints",
));
}
if bundle_flags.output_path.is_some() {
return Err(deno_core::anyhow::anyhow!(
"--output is not supported with HTML entrypoints; use --outdir",
));
}
// Prepare HTML pages and temp entry modules
let mut html_pages = Vec::new();
let mut to_cache_urls = Vec::new();
let mut entries: Vec<(String, String)> = Vec::new();
let virtual_modules = Arc::new(VirtualModules::new());
for html_path in &html_paths {
let entry = html::load_html_entrypoint(init_cwd, html_path)?;
let virtual_module_path =
deno_path_util::url_from_file_path(&entry.virtual_module_path)?;
let virtual_module_path = virtual_module_path.to_string();
virtual_modules.insert(
virtual_module_path.clone(),
VirtualModule::new(
entry.temp_module.as_bytes().to_vec(),
esbuild_client::BuiltinLoader::Js,
),
);
for script in &entry.scripts {
if let Some(path) = &script.resolved_path {
let url = deno_path_util::url_from_file_path(path)?;
to_cache_urls.push(url);
}
}
entries.push(("".into(), virtual_module_path));
html_pages.push(entry);
}
plugin_handler.virtual_modules = Some(virtual_modules);
// Prepare non-HTML entries too
let _ = plugin_handler.prepare_module_load(&script_entry_urls).await;
let roots =
resolve_roots(script_entry_urls, sys, npm_resolver, node_resolver);
let _ = plugin_handler.prepare_module_load(&roots).await;
for url in roots {
entries.push(("".into(), url.into()));
}
// Pre-cache modules referenced by HTML pages
let _ = plugin_handler.prepare_module_load(&to_cache_urls).await;
let graph = plugin_handler.module_graph_container.graph();
let mut fully_resolved_roots = IndexSet::with_capacity(graph.roots.len());
for root in &graph.roots {
fully_resolved_roots.insert(graph.resolve(root).clone());
}
*plugin_handler.resolved_roots.write() = Arc::new(fully_resolved_roots);
Ok(BundlerInput::EntrypointsWithHtml {
entries,
html_pages,
})
}
}
pub async fn bundle_init(
mut flags: Arc<Flags>,
bundle_flags: &BundleFlags,
) -> Result<EsbuildBundler, AnyError> {
{
let flags_mut = Arc::make_mut(&mut flags);
flags_mut.unstable_config.sloppy_imports = true;
}
let factory = CliFactory::from_flags(flags.clone());
let esbuild_path = ensure_esbuild_downloaded(&factory).await?;
let resolver = factory.resolver().await?.clone();
let module_load_preparer = factory.module_load_preparer().await?.clone();
let root_permissions = factory.root_permissions_container()?;
let npm_resolver = factory.npm_resolver().await?;
let node_resolver = factory.node_resolver().await?;
let cli_options = factory.cli_options()?;
let module_loader = factory.resolver_factory()?.module_loader()?;
let sys = factory.sys();
let init_cwd = cli_options.initial_cwd().to_path_buf();
let module_graph_container =
factory.main_module_graph_container().await?.clone();
let (on_end_tx, on_end_rx) = tokio::sync::mpsc::channel(10);
#[allow(clippy::arc_with_non_send_sync)]
let mut plugin_handler = Arc::new(DenoPluginHandler {
file_fetcher: factory.file_fetcher()?.clone(),
resolver: resolver.clone(),
module_load_preparer,
resolved_roots: Arc::new(RwLock::new(Arc::new(IndexSet::new()))),
module_graph_container,
permissions: root_permissions.clone(),
module_loader: module_loader.clone(),
externals_matcher: if bundle_flags.external.is_empty() {
None
} else {
Some(ExternalsMatcher::new(&bundle_flags.external, &init_cwd))
},
on_end_tx,
parsed_source_cache: factory.parsed_source_cache()?.clone(),
cjs_tracker: factory.cjs_tracker()?.clone(),
emitter: factory.emitter()?.clone(),
deferred_resolve_errors: Default::default(),
virtual_modules: None,
});
let input = prepare_inputs(
&resolver,
sys,
npm_resolver,
node_resolver,
&init_cwd,
bundle_flags,
Arc::get_mut(&mut plugin_handler).unwrap(),
)
.await?;
let esbuild = EsbuildService::new(
esbuild_path,
esbuild::ESBUILD_VERSION,
plugin_handler.clone(),
Default::default(),
)
.await
.unwrap();
let client = esbuild.client().clone();
tokio::spawn(async move {
let res = esbuild.wait_for_exit().await;
log::warn!("esbuild exited: {:?}", res);
});
let esbuild_flags = configure_esbuild_flags(
bundle_flags,
matches!(input, BundlerInput::EntrypointsWithHtml { .. }),
);
let bundler = EsbuildBundler::new(
client,
plugin_handler.clone(),
match bundle_flags.watch {
true => BundlingMode::Watch,
false => BundlingMode::OneShot,
},
on_end_rx,
init_cwd.clone(),
esbuild_flags,
input.clone(),
);
Ok(bundler)
}
pub async fn bundle(
mut flags: Arc<Flags>,
bundle_flags: BundleFlags,
) -> Result<(), AnyError> {
{
let flags_mut = Arc::make_mut(&mut flags);
flags_mut.unstable_config.sloppy_imports = true;
}
let bundler = bundle_init(flags.clone(), &bundle_flags).await?;
let init_cwd = bundler.cwd.clone();
let start = std::time::Instant::now();
let response = bundler.build().await?;
let end = std::time::Instant::now();
let duration = end.duration_since(start);
if bundle_flags.watch {
if !response.errors.is_empty() || !response.warnings.is_empty() {
handle_esbuild_errors_and_warnings(
&response,
&init_cwd,
&bundler.plugin_handler.take_deferred_resolve_errors(),
);
if !response.errors.is_empty() {
deno_core::anyhow::bail!("bundling failed");
}
}
return bundle_watch(
flags,
bundler,
bundle_flags.minify,
bundle_flags.platform,
bundle_flags.output_dir.as_ref().map(Path::new),
)
.await;
}
handle_esbuild_errors_and_warnings(
&response,
&init_cwd,
&bundler.plugin_handler.take_deferred_resolve_errors(),
);
if response.errors.is_empty() {
let metafile = metafile_from_response(&response)?;
let output_infos = process_result(
&response,
&init_cwd,
should_replace_require_shim(bundle_flags.platform),
bundle_flags.minify,
bundler.input.clone(),
bundle_flags.output_dir.as_ref().map(Path::new),
)?;
if bundle_flags.output_dir.is_some() || bundle_flags.output_path.is_some() {
print_finished_message(&metafile, &output_infos, duration)?;
}
}
if !response.errors.is_empty() {
deno_core::anyhow::bail!("bundling failed");
}
Ok(())
}
fn metafile_from_response(
response: &BuildResponse,
) -> Result<esbuild_client::Metafile, AnyError> {
Ok(serde_json::from_str::<esbuild_client::Metafile>(
response.metafile.as_deref().ok_or_else(|| {
deno_core::anyhow::anyhow!("expected a metafile to be present")
})?,
)?)
}
async fn bundle_watch(
flags: Arc<Flags>,
bundler: EsbuildBundler,
minified: bool,
platform: BundlePlatform,
output_dir: Option<&Path>,
) -> Result<(), AnyError> {
let (initial_roots, always_watch) = match &bundler.input {
BundlerInput::Entrypoints(entries) => (
entries
.iter()
.filter_map(|(_, root)| {
let url = Url::parse(root).ok()?;
deno_path_util::url_to_file_path(&url).ok()
})
.collect::<Vec<_>>(),
vec![],
),
BundlerInput::EntrypointsWithHtml {
entries,
html_pages,
} => {
let mut roots = entries
.iter()
.filter_map(|(_, root)| {
let url = Url::parse(root).ok()?;
deno_path_util::url_to_file_path(&url).ok()
})
.collect::<Vec<_>>();
let always = html_pages
.iter()
.map(|p| p.path.clone())
.collect::<Vec<_>>();
roots.extend(always.iter().cloned());
(roots, always)
}
};
let always_watch = Rc::new(always_watch);
let current_roots = Rc::new(RefCell::new(initial_roots.clone()));
let bundler = Rc::new(tokio::sync::Mutex::new(bundler));
let mut print_config =
crate::util::file_watcher::PrintConfig::new_with_banner(
"Watcher", "Bundle", true,
);
print_config.print_finished = false;
crate::util::file_watcher::watch_recv(
flags,
print_config,
WatcherRestartMode::Automatic,
move |_flags, watcher_communicator, changed_paths| {
watcher_communicator.show_path_changed(changed_paths.clone());
let bundler = Rc::clone(&bundler);
let current_roots = current_roots.clone();
let always_watch = always_watch.clone();
Ok(async move {
let mut bundler = bundler.lock().await;
let start = std::time::Instant::now();
if let Some(changed_paths) = changed_paths {
bundler.reload_specifiers(&changed_paths).await?;
}
let input = bundler.input.clone();
let response = bundler.rebuild().await?;
handle_esbuild_errors_and_warnings(
&response,
&bundler.cwd,
&bundler.plugin_handler.take_deferred_resolve_errors(),
);
if response.errors.is_empty() {
let metafile = metafile_from_response(&response)?;
let output_infos = process_result(
&response,
&bundler.cwd,
should_replace_require_shim(platform),
minified,
input,
output_dir,
)?;
print_finished_message(&metafile, &output_infos, start.elapsed())?;
let mut new_watched = get_input_paths_for_watch(&response);
new_watched.extend(always_watch.iter().cloned());
*current_roots.borrow_mut() = new_watched.clone();
let _ = watcher_communicator.watch_paths(new_watched);
} else {
let _ =
watcher_communicator.watch_paths(current_roots.borrow().clone());
}
Ok(())
})
},
)
.boxed_local()
.await?;
Ok(())
}
pub fn should_replace_require_shim(platform: BundlePlatform) -> bool {
*DISABLE_HACK && matches!(platform, BundlePlatform::Deno)
}
fn get_input_paths_for_watch(response: &BuildResponse) -> Vec<PathBuf> {
let metafile = serde_json::from_str::<esbuild_client::Metafile>(
response
.metafile
.as_deref()
.expect("metafile is required for watch mode"),
)
.unwrap();
metafile
.inputs
.keys()
.cloned()
.map(PathBuf::from)
.collect::<Vec<_>>()
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum BundlingMode {
OneShot,
Watch,
}
#[derive(Debug, Clone)]
pub enum BundlerInput {
Entrypoints(Vec<(String, String)>),
EntrypointsWithHtml {
entries: Vec<(String, String)>,
html_pages: Vec<html::HtmlEntrypoint>,
},
}
pub type EsbuildFlags = Vec<String>;
pub struct EsbuildBundler {
client: esbuild_client::ProtocolClient,
plugin_handler: Arc<DenoPluginHandler>,
on_end_rx: tokio::sync::mpsc::Receiver<esbuild_client::OnEndArgs>,
mode: BundlingMode,
cwd: PathBuf,
flags: EsbuildFlags,
input: BundlerInput,
}
impl EsbuildBundler {
pub fn new(
client: esbuild_client::ProtocolClient,
plugin_handler: Arc<DenoPluginHandler>,
mode: BundlingMode,
on_end_rx: tokio::sync::mpsc::Receiver<esbuild_client::OnEndArgs>,
cwd: PathBuf,
flags: EsbuildFlags,
input: BundlerInput,
) -> EsbuildBundler {
EsbuildBundler {
client,
plugin_handler,
on_end_rx,
mode,
cwd,
flags,
input,
}
}
// When doing a watch build, we're actually enabling the
// "context" mode of esbuild. That leaves esbuild running and
// waits for a rebuild to be triggered. The initial build request
// doesn't actually do anything, it's just registering the args/flags
// we're going to use for all of the rebuilds.
fn make_build_request(&self) -> protocol::BuildRequest {
let entries = match &self.input {
BundlerInput::Entrypoints(entries) => entries.clone(),
BundlerInput::EntrypointsWithHtml { entries, .. } => entries.clone(),
};
protocol::BuildRequest {
entries,
key: 0,
flags: self.flags.clone(),
write: false,
stdin_contents: None.into(),
stdin_resolve_dir: None.into(),
abs_working_dir: self.cwd.to_string_lossy().into_owned(),
context: matches!(self.mode, BundlingMode::Watch),
mangle_cache: None,
node_paths: vec![],
plugins: Some(vec![protocol::BuildPlugin {
name: "deno".into(),
on_start: false,
on_end: matches!(self.mode, BundlingMode::Watch),
on_resolve: (vec![protocol::OnResolveSetupOptions {
id: 0,
filter: ".*".into(),
namespace: "".into(),
}]),
on_load: vec![protocol::OnLoadSetupOptions {
id: 0,
filter: ".*".into(),
namespace: "".into(),
}],
}]),
}
}
async fn build(&self) -> Result<BuildResponse, AnyError> {
let response: BuildResponse = self
.client
.send_build_request(self.make_build_request())
.await
.unwrap()
.map_err(|e| message_to_error(&e, &self.cwd))?;
Ok(response)
}
async fn rebuild(&mut self) -> Result<BuildResponse, AnyError> {
match self.mode {
BundlingMode::OneShot => {
panic!("rebuild not supported for one-shot mode")
}
BundlingMode::Watch => {
log::trace!("sending rebuild request");
let _response = self
.client
.send_rebuild_request(0)
.await
.unwrap()
.map_err(|e| message_to_error(&e, &self.cwd))?;
let response = self.on_end_rx.recv().await.unwrap();
Ok(response.into())
}
}
}
async fn reload_specifiers(
&mut self,
changed_paths: &[PathBuf],
) -> Result<(), AnyError> {
self.reload_html_entrypoints(changed_paths)?;
self.plugin_handler.reload_specifiers(changed_paths).await?;
Ok(())
}
fn reload_html_entrypoints(
&mut self,
changed_paths: &[PathBuf],
) -> Result<(), AnyError> {
let BundlerInput::EntrypointsWithHtml { html_pages, .. } = &mut self.input
else {
return Ok(());
};
if changed_paths.is_empty() {
return Ok(());
}
for page in html_pages.iter_mut() {
if !changed_paths
.iter()
.any(|changed| changed == &page.path || changed == &page.canonical_path)
{
continue;
}
let updated = html::load_html_entrypoint(&self.cwd, &page.path)?;
let virtual_module_url =
deno_path_util::url_from_file_path(&updated.virtual_module_path)?
.to_string();
self.plugin_handler.update_virtual_module(
&virtual_module_url,
VirtualModule::new(
updated.temp_module.as_bytes().to_vec(),
esbuild_client::BuiltinLoader::Js,
),
);
*page = updated;
}
Ok(())
}
}
fn message_to_error(
message: &esbuild_client::protocol::Message,
current_dir: &Path,
) -> AnyError {
deno_core::anyhow::anyhow!("{}", format_message(message, current_dir))
}
// TODO(nathanwhit): MASSIVE HACK
// See tests::specs::bundle::requires_node_builtin for why this is needed.
// Without this hack, that test would fail with "Dynamic require of "util" is not supported"
fn replace_require_shim(contents: &str, minified: bool) -> String {
if minified {
let re = lazy_regex::regex!(
r#"var (\w+)\s*=\((\w+)\s*=>typeof require<"u"\?require:typeof Proxy<"u"\?new Proxy\((\w+)\,\{get:\(\w+,\w+\)=>\(typeof require<"u"\?require:\w+\)\[l\]\}\):(\w+)\)\(function\(\w+\)\{if\(typeof require<"u"\)return require\.apply\(this\,arguments\);throw Error\('Dynamic require of "'\+\w+\+'" is not supported'\)\}\);"#
);
re.replace(contents, |c: ®ex::Captures<'_>| {
let var_name = c.get(1).unwrap().as_str();
format!("import{{createRequire as __deno_internal_createRequire}} from \"node:module\";var {var_name}=__deno_internal_createRequire(import.meta.url);")
}).into_owned()
} else {
let re = lazy_regex::regex!(
r#"var __require = (/\* @__PURE__ \*/)?\s*\(\(\w+\) => typeof require !== "undefined" \? require : typeof Proxy !== "undefined" \? new Proxy\(\w+, \{\s* get: \(\w+, \w+\) => \(typeof require !== "undefined" \? require : \w+\)\[\w+\]\s*\}\) : \w+\)\(function\(\w+\) \{\s* if \(typeof require !== "undefined"\) return require\.apply\(this, arguments\);\s* throw Error\('Dynamic require of "' \+ \w+ \+ '" is not supported'\);\s*\}\);"#
);
re.replace_all(
contents,
r#"import { createRequire as __deno_internal_createRequire } from "node:module";
var __require = __deno_internal_createRequire(import.meta.url);
"#,
)
.into_owned()
}
}
fn format_location(
location: &esbuild_client::protocol::Location,
current_dir: &Path,
) -> String {
let url =
deno_path_util::resolve_url_or_path(location.file.as_str(), current_dir)
.map(|url| deno_terminal::colors::cyan(url.into()))
.unwrap_or(deno_terminal::colors::cyan(location.file.clone()));
format!(
"{}:{}:{}",
url,
deno_terminal::colors::yellow(location.line),
deno_terminal::colors::yellow(location.column)
)
}
fn format_note(
note: &esbuild_client::protocol::Note,
current_dir: &Path,
) -> String {
format!(
"{}: {}{}",
deno_terminal::colors::magenta("note"),
note.text,
if let Some(location) = ¬e.location {
format!("\n {}", format_location(location, current_dir))
} else {
String::new()
}
)
}
// not very efficient, but it's only for error messages
fn add_indent(s: &str, indent: &str) -> String {
let lines = s
.lines()
.map(|line| format!("{}{}", indent, line))
.collect::<Vec<_>>();
lines.join("\n")
}
fn format_message(
message: &esbuild_client::protocol::Message,
current_dir: &Path,
) -> String {
format!(
"{}{}{}{}",
message.text,
if message.id.is_empty() {
String::new()
} else {
format!("[{}] ", message.id)
},
if let Some(location) = &message.location {
if !message.text.contains(" at ") {
format!("\n at {}", format_location(location, current_dir))
} else {
String::new()
}
} else {
String::new()
},
if !message.notes.is_empty() {
let mut s = String::new();
for note in &message.notes {
s.push('\n');
s.push_str(&add_indent(&format_note(note, current_dir), " "));
}
s
} else {
String::new()
}
)
}
#[derive(Debug, thiserror::Error, JsError)]
#[class(generic)]
enum BundleError {
#[error(transparent)]
Resolver(#[from] deno_resolver::graph::ResolveWithGraphError),
#[error(transparent)]
Url(#[from] deno_core::url::ParseError),
#[error(transparent)]
ResolveNpmPkg(#[from] ResolvePkgFolderFromDenoModuleError),
#[error(transparent)]
SubpathResolve(#[from] PackageSubpathResolveError),
#[error(transparent)]
PathToUrlError(#[from] deno_path_util::PathToUrlError),
#[error(transparent)]
UrlToPathError(#[from] deno_path_util::UrlToFilePathError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
ResolveUrlOrPathError(#[from] deno_path_util::ResolveUrlOrPathError),
#[error(transparent)]
PrepareModuleLoad(#[from] crate::module_loader::PrepareModuleLoadError),
#[error(transparent)]
ResolveReqWithSubPath(#[from] deno_resolver::npm::ResolveReqWithSubPathError),
#[error(transparent)]
PackageReqReferenceParse(
#[from] deno_semver::package::PackageReqReferenceParseError,
),
#[allow(dead_code)]
#[error("Http cache error")]
HttpCache,
}
fn requested_type_from_map(
map: &IndexMap<String, String>,
) -> RequestedModuleType<'_> {
let type_ = map.get("type").map(|s| s.as_str());
match type_ {
Some("json") => RequestedModuleType::Json,
Some("bytes") => RequestedModuleType::Bytes,
Some("text") => RequestedModuleType::Text,
Some(other) => RequestedModuleType::Other(other),
None => RequestedModuleType::None,
}
}
#[derive(Clone)]
pub struct VirtualModule {
contents: Vec<u8>,
loader: esbuild_client::BuiltinLoader,
}
impl VirtualModule {
pub fn new(contents: Vec<u8>, loader: esbuild_client::BuiltinLoader) -> Self {
Self { contents, loader }
}
}
pub struct VirtualModules {
modules: RwLock<IndexMap<String, VirtualModule>>,
}
impl VirtualModules {
pub fn new() -> Self {
Self {
modules: RwLock::new(IndexMap::new()),
}
}
pub fn insert(&self, path: String, contents: VirtualModule) {
self.modules.write().insert(path, contents);
}
pub fn get(&self, path: &str) -> Option<VirtualModule> {
self.modules.read().get(path).cloned()
}
pub fn contains(&self, path: &str) -> bool {
self.modules.read().contains_key(path)
}
}
pub struct DeferredResolveError {
path: String,
error: ResolveWithGraphError,
}
pub struct DenoPluginHandler {
file_fetcher: Arc<CliFileFetcher>,
resolver: Arc<CliResolver>,
module_load_preparer: Arc<ModuleLoadPreparer>,
resolved_roots: Arc<RwLock<Arc<IndexSet<ModuleSpecifier>>>>,
module_graph_container: Arc<MainModuleGraphContainer>,
permissions: PermissionsContainer,
module_loader: Arc<CliDenoResolverModuleLoader>,
externals_matcher: Option<ExternalsMatcher>,
on_end_tx: tokio::sync::mpsc::Sender<esbuild_client::OnEndArgs>,
deferred_resolve_errors: Arc<Mutex<Vec<DeferredResolveError>>>,
virtual_modules: Option<Arc<VirtualModules>>,
parsed_source_cache: Arc<ParsedSourceCache>,
cjs_tracker: Arc<CliCjsTracker>,
emitter: Arc<CliEmitter>,
}
impl DenoPluginHandler {
fn take_deferred_resolve_errors(&self) -> Vec<DeferredResolveError> {
std::mem::take(&mut *self.deferred_resolve_errors.lock())
}
fn update_virtual_module(&self, path: &str, module: VirtualModule) {
if let Some(virtual_modules) = &self.virtual_modules {
virtual_modules.insert(path.to_string(), module);
}
}
}
// TODO(bartlomieju): in Rust 1.90 some structs started getting flagged as not used
#[allow(dead_code)]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
enum PluginImportKind {
EntryPoint,
ImportStatement,
RequireCall,
DynamicImport,
RequireResolve,
ImportRule,
ComposesFrom,
UrlToken,
}
impl From<protocol::ImportKind> for PluginImportKind {
fn from(kind: protocol::ImportKind) -> Self {
match kind {
protocol::ImportKind::EntryPoint => PluginImportKind::EntryPoint,
protocol::ImportKind::ImportStatement => {
PluginImportKind::ImportStatement
}
protocol::ImportKind::RequireCall => PluginImportKind::RequireCall,
protocol::ImportKind::DynamicImport => PluginImportKind::DynamicImport,
protocol::ImportKind::RequireResolve => PluginImportKind::RequireResolve,
protocol::ImportKind::ImportRule => PluginImportKind::ImportRule,
protocol::ImportKind::ComposesFrom => PluginImportKind::ComposesFrom,
protocol::ImportKind::UrlToken => PluginImportKind::UrlToken,
}
}
}
// TODO(bartlomieju): in Rust 1.90 some structs started getting flagged as not used
#[allow(dead_code)]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
struct PluginOnResolveArgs {
path: String,
importer: Option<String>,
kind: PluginImportKind,
namespace: Option<String>,
resolve_dir: Option<String>,
with: IndexMap<String, String>,
}
// TODO(bartlomieju): in Rust 1.90 some structs started getting flagged as not used
#[allow(dead_code)]
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
struct PluginOnLoadArgs {
path: String,
namespace: String,
suffix: String,
with: IndexMap<String, String>,
}
#[async_trait::async_trait(?Send)]
impl esbuild_client::PluginHandler for DenoPluginHandler {
async fn on_resolve(
&self,
args: esbuild_client::OnResolveArgs,
) -> Result<Option<esbuild_client::OnResolveResult>, AnyError> {
log::debug!("{}: {args:?}", deno_terminal::colors::cyan("on_resolve"));
if let Some(virtual_modules) = &self.virtual_modules
&& virtual_modules.contains(&args.path)
{
return Ok(Some(esbuild_client::OnResolveResult {
path: Some(args.path),
plugin_name: Some("deno".to_string()),
namespace: Some("deno".to_string()),
..Default::default()
}));
}
if let Some(matcher) = &self.externals_matcher
&& matcher.is_pre_resolve_match(&args.path)
{
return Ok(Some(esbuild_client::OnResolveResult {
external: Some(true),
path: Some(args.path),
plugin_name: Some("deno".to_string()),
plugin_data: None,
..Default::default()
}));
}
let result = self.bundle_resolve(
&args.path,
args.importer.as_deref(),
args.resolve_dir.as_deref(),
args.kind,
args.with,
);
let result = match result {
Ok(r) => r,
Err(e) => {
return Ok(Some(esbuild_client::OnResolveResult {
errors: Some(vec![esbuild_client::protocol::PartialMessage {
id: "deno_error".into(),
plugin_name: "deno".into(),
text: e.to_string(),
..Default::default()
}]),
..Default::default()
}));
}
};
Ok(result.map(|r| {
// TODO(nathanwhit): remap the resolved path to be relative
// to the output file. It will be tricky to figure out which
// output file this import will end up in. We may have to use the metafile and rewrite at the end
let is_external = r.starts_with("node:")
|| r.starts_with("bun:")
|| self
.externals_matcher
.as_ref()
.map(|matcher| matcher.is_post_resolve_match(&r))
.unwrap_or(false);
esbuild_client::OnResolveResult {
namespace: if r.starts_with("jsr:")
|| r.starts_with("https:")
|| r.starts_with("http:")
|| r.starts_with("data:")
{
Some("deno".into())
} else {
None
},
external: Some(is_external),
path: Some(r),
plugin_name: Some("deno".to_string()),
plugin_data: None,
..Default::default()
}
}))
}
async fn on_load(
&self,
args: esbuild_client::OnLoadArgs,
) -> Result<Option<esbuild_client::OnLoadResult>, AnyError> {
log::debug!("{}: {args:?}", deno_terminal::colors::cyan("on_load"));
if let Some(virtual_modules) = &self.virtual_modules
&& let Some(module) = virtual_modules.get(&args.path)
{
let contents = module.contents.clone();
let loader = module.loader;
return Ok(Some(esbuild_client::OnLoadResult {
contents: Some(contents),
loader: Some(loader),
..Default::default()
}));
}
let result = self
.bundle_load(&args.path, &requested_type_from_map(&args.with))
.await;
let result = match result {
Ok(r) => r,
Err(e) => {
if e.is_unsupported_media_type() {
return Ok(None);
}
return Ok(Some(esbuild_client::OnLoadResult {
errors: Some(vec![esbuild_client::protocol::PartialMessage {
plugin_name: "deno".into(),
text: e.to_string(),
..Default::default()
}]),
plugin_name: Some("deno".to_string()),
..Default::default()
}));
}
};
log::trace!(
"{}: {:?}",
deno_terminal::colors::magenta("on_load"),
result.as_ref().map(|(code, loader)| format!(
"{}: {:?}",
String::from_utf8_lossy(code),
loader
))
);
if let Some((code, loader)) = result {
Ok(Some(esbuild_client::OnLoadResult {
contents: Some(code),
loader: Some(loader),
..Default::default()
}))
} else {
Ok(None)
}
}
async fn on_start(
&self,
_args: esbuild_client::OnStartArgs,
) -> Result<Option<esbuild_client::OnStartResult>, AnyError> {
Ok(None)
}
async fn on_end(
&self,
_args: esbuild_client::OnEndArgs,
) -> Result<Option<esbuild_client::OnEndResult>, AnyError> {
self.on_end_tx.send(_args).await?;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/bundle/provider.rs | cli/tools/bundle/provider.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::Path;
use std::sync::Arc;
use deno_bundle_runtime as rt_bundle;
use deno_bundle_runtime::BundleOptions as RtBundleOptions;
use deno_bundle_runtime::BundleProvider;
use deno_core::error::AnyError;
use crate::args::DenoSubcommand;
use crate::args::Flags;
pub struct CliBundleProvider {
flags: Arc<Flags>,
}
impl CliBundleProvider {
pub fn new(flags: Arc<Flags>) -> Self {
Self { flags }
}
}
impl From<RtBundleOptions> for crate::args::BundleFlags {
fn from(value: RtBundleOptions) -> Self {
Self {
entrypoints: value.entrypoints,
output_path: value.output_path,
output_dir: value.output_dir,
external: value.external,
format: value.format,
minify: value.minify,
code_splitting: value.code_splitting,
platform: value.platform,
watch: false,
sourcemap: value.sourcemap,
inline_imports: value.inline_imports,
packages: value.packages,
}
}
}
fn convert_note(note: esbuild_client::protocol::Note) -> rt_bundle::Note {
rt_bundle::Note {
text: note.text,
location: note.location.map(convert_location),
}
}
fn convert_location(
location: esbuild_client::protocol::Location,
) -> rt_bundle::Location {
rt_bundle::Location {
file: location.file,
namespace: Some(location.namespace),
line: location.line,
column: location.column,
length: Some(location.length),
suggestion: Some(location.suggestion),
}
}
fn convert_message(
message: esbuild_client::protocol::Message,
) -> rt_bundle::Message {
rt_bundle::Message {
text: message.text,
location: message.location.map(convert_location),
notes: message.notes.into_iter().map(convert_note).collect(),
}
}
fn convert_build_output_file(
file: esbuild_client::protocol::BuildOutputFile,
) -> rt_bundle::BuildOutputFile {
rt_bundle::BuildOutputFile {
path: file.path,
contents: Some(file.contents),
hash: file.hash,
}
}
pub fn convert_build_response(
response: esbuild_client::protocol::BuildResponse,
) -> rt_bundle::BuildResponse {
rt_bundle::BuildResponse {
errors: response.errors.into_iter().map(convert_message).collect(),
warnings: response.warnings.into_iter().map(convert_message).collect(),
output_files: response
.output_files
.map(|files| files.into_iter().map(convert_build_output_file).collect()),
}
}
fn hash_contents(contents: &[u8]) -> String {
use base64::prelude::*;
let hash = twox_hash::XxHash64::oneshot(0, contents);
let bytes = hash.to_le_bytes();
base64::engine::general_purpose::STANDARD_NO_PAD.encode(bytes)
}
fn process_output_files(
bundle_flags: &crate::args::BundleFlags,
response: &mut esbuild_client::protocol::BuildResponse,
cwd: &Path,
input: super::BundlerInput,
) -> Result<(), AnyError> {
if let Some(files) = std::mem::take(&mut response.output_files) {
let output_files = super::collect_output_files(
Some(&*files),
cwd,
input,
bundle_flags.output_dir.as_ref().map(Path::new),
)?;
let mut new_files = Vec::new();
for output_file in output_files {
let processed_contents = crate::tools::bundle::maybe_process_contents(
&output_file,
crate::tools::bundle::should_replace_require_shim(
bundle_flags.platform,
),
bundle_flags.minify,
)?;
let contents = processed_contents
.contents
.unwrap_or_else(|| output_file.contents.into_owned());
new_files.push(esbuild_client::protocol::BuildOutputFile {
path: output_file.path.to_string_lossy().into_owned(),
hash: hash_contents(&contents),
contents,
});
}
response.output_files = Some(new_files);
}
Ok(())
}
#[async_trait::async_trait]
impl BundleProvider for CliBundleProvider {
async fn bundle(
&self,
options: RtBundleOptions,
) -> Result<rt_bundle::BuildResponse, AnyError> {
let mut flags_clone = (*self.flags).clone();
flags_clone.type_check_mode = crate::args::TypeCheckMode::None;
let write_output = options.write
&& (options.output_dir.is_some() || options.output_path.is_some());
let bundle_flags: crate::args::BundleFlags = options.into();
flags_clone.subcommand = DenoSubcommand::Bundle(bundle_flags.clone());
let (tx, rx) = tokio::sync::oneshot::channel();
std::thread::spawn(move || {
deno_runtime::tokio_util::create_and_run_current_thread(async move {
let flags = Arc::new(flags_clone);
let bundler = match super::bundle_init(flags, &bundle_flags).await {
Ok(bundler) => bundler,
Err(e) => {
log::trace!("bundle_init error: {e:?}");
let _ = tx.send(Err(e));
return Ok(());
}
};
log::trace!("bundler.build");
let mut result = match bundler.build().await {
Ok(result) => result,
Err(e) => {
log::trace!("bundler.build error: {e:?}");
let _ = tx.send(Err(e));
return Ok(());
}
};
log::trace!("process_result");
if write_output {
super::process_result(
&result,
&bundler.cwd,
crate::tools::bundle::should_replace_require_shim(
bundle_flags.platform,
),
bundle_flags.minify,
bundler.input,
bundle_flags.output_dir.as_ref().map(Path::new),
)?;
result.output_files = None;
} else {
process_output_files(
&bundle_flags,
&mut result,
&bundler.cwd,
bundler.input,
)?;
}
log::trace!("convert_build_response");
let result = convert_build_response(result);
log::trace!("send result");
let _ = tx.send(Ok(result));
Ok::<_, AnyError>(())
})
});
log::trace!("rx.await");
let response = rx.await??;
log::trace!("response: {:?}", response);
Ok(response)
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/run/hmr.rs | cli/tools/run/hmr.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicI32;
use deno_core::LocalInspectorSession;
use deno_core::error::CoreError;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json::json;
use deno_core::serde_json::{self};
use deno_core::url::Url;
use deno_error::JsErrorBox;
use deno_terminal::colors;
use tokio::select;
use tokio::sync::mpsc;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use crate::cdp;
use crate::module_loader::CliEmitter;
use crate::util::file_watcher::WatcherCommunicator;
use crate::util::file_watcher::WatcherRestartMode;
static NEXT_MSG_ID: AtomicI32 = AtomicI32::new(0);
fn next_id() -> i32 {
NEXT_MSG_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
fn explain(response: &cdp::SetScriptSourceResponse) -> String {
match response.status {
cdp::Status::Ok => "OK".to_string(),
cdp::Status::CompileError => {
if let Some(details) = &response.exception_details {
let (message, description) = details.get_message_and_description();
format!(
"compile error: {}{}",
message,
if description == "undefined" {
"".to_string()
} else {
format!(" - {}", description)
}
)
} else {
"compile error: No exception details available".to_string()
}
}
cdp::Status::BlockedByActiveGenerator => {
"blocked by active generator".to_string()
}
cdp::Status::BlockedByActiveFunction => {
"blocked by active function".to_string()
}
cdp::Status::BlockedByTopLevelEsModuleChange => {
"blocked by top-level ES module change".to_string()
}
}
}
fn should_retry(status: &cdp::Status) -> bool {
match status {
cdp::Status::Ok => false,
cdp::Status::CompileError => false,
cdp::Status::BlockedByActiveGenerator => true,
cdp::Status::BlockedByActiveFunction => true,
cdp::Status::BlockedByTopLevelEsModuleChange => false,
}
}
#[derive(Debug)]
enum InspectorMessageState {
Ready(serde_json::Value),
WaitingFor(oneshot::Sender<serde_json::Value>),
}
#[derive(Debug)]
pub struct HmrRunnerInner {
watcher_communicator: Arc<WatcherCommunicator>,
script_ids: HashMap<String, String>,
messages: HashMap<i32, InspectorMessageState>,
emitter: Arc<CliEmitter>,
exception_tx: UnboundedSender<JsErrorBox>,
exception_rx: Option<UnboundedReceiver<JsErrorBox>>,
}
#[derive(Clone, Debug)]
pub struct HmrRunnerState(Arc<Mutex<HmrRunnerInner>>);
impl HmrRunnerState {
pub fn new(
emitter: Arc<CliEmitter>,
watcher_communicator: Arc<WatcherCommunicator>,
) -> Self {
let (exception_tx, exception_rx) = mpsc::unbounded_channel();
Self(Arc::new(Mutex::new(HmrRunnerInner {
emitter,
watcher_communicator,
script_ids: HashMap::new(),
messages: HashMap::new(),
exception_tx,
exception_rx: Some(exception_rx),
})))
}
pub fn callback(&self, msg: deno_core::InspectorMsg) {
let deno_core::InspectorMsgKind::Message(msg_id) = msg.kind else {
let notification = serde_json::from_str(&msg.content).unwrap();
self.handle_notification(notification);
return;
};
let message: serde_json::Value =
serde_json::from_str(&msg.content).unwrap();
let mut state = self.0.lock();
let Some(message_state) = state.messages.remove(&msg_id) else {
state
.messages
.insert(msg_id, InspectorMessageState::Ready(message));
return;
};
let InspectorMessageState::WaitingFor(sender) = message_state else {
return;
};
let _ = sender.send(message);
}
fn handle_notification(&self, notification: cdp::Notification) {
if notification.method == "Runtime.exceptionThrown" {
let exception_thrown =
serde_json::from_value::<cdp::ExceptionThrown>(notification.params)
.unwrap();
// .map_err(JsErrorBox::from_err)?;
let (message, description) = exception_thrown
.exception_details
.get_message_and_description();
let _ = self
.0
.lock()
.exception_tx
.send(JsErrorBox::generic(format!("{} {}", message, description)));
} else if notification.method == "Debugger.scriptParsed" {
let params =
serde_json::from_value::<cdp::ScriptParsed>(notification.params)
.unwrap();
// .map_err(JsErrorBox::from_err)?;
if params.url.starts_with("file://") {
let file_url = Url::parse(¶ms.url).unwrap();
let file_path = file_url.to_file_path().unwrap();
if let Ok(canonicalized_file_path) = file_path.canonicalize() {
let canonicalized_file_url =
Url::from_file_path(canonicalized_file_path).unwrap();
self
.0
.lock()
.script_ids
.insert(canonicalized_file_url.into(), params.script_id);
}
}
}
}
}
/// This structure is responsible for providing Hot Module Replacement
/// functionality.
///
/// It communicates with V8 inspector over a local session and waits for
/// notifications about changed files from the `FileWatcher`.
///
/// Upon receiving such notification, the runner decides if the changed
/// path should be handled the `FileWatcher` itself (as if we were running
/// in `--watch` mode), or if the path is eligible to be hot replaced in the
/// current program.
///
/// Even if the runner decides that a path will be hot-replaced, the V8 isolate
/// can refuse to perform hot replacement, eg. a top-level variable/function
/// of an ES module cannot be hot-replaced. In such situation the runner will
/// force a full restart of a program by notifying the `FileWatcher`.
pub struct HmrRunner {
session: LocalInspectorSession,
state: HmrRunnerState,
}
impl HmrRunner {
pub fn new(state: HmrRunnerState, session: LocalInspectorSession) -> Self {
Self { session, state }
}
pub fn start(&mut self) {
self
.session
.post_message::<()>(next_id(), "Debugger.enable", None);
self
.session
.post_message::<()>(next_id(), "Runtime.enable", None);
}
fn watcher(&self) -> Arc<WatcherCommunicator> {
self.state.0.lock().watcher_communicator.clone()
}
pub fn stop(&mut self) {
self
.watcher()
.change_restart_mode(WatcherRestartMode::Automatic);
}
pub async fn run(&mut self) -> Result<(), CoreError> {
self
.watcher()
.change_restart_mode(WatcherRestartMode::Manual);
let watcher = self.watcher();
let mut exception_rx = self.state.0.lock().exception_rx.take().unwrap();
loop {
select! {
biased;
maybe_error = exception_rx.recv() => {
if let Some(err) = maybe_error {
break Err(err.into());
}
},
changed_paths = watcher.watch_for_changed_paths() => {
let changed_paths = changed_paths.map_err(JsErrorBox::from_err)?;
let Some(changed_paths) = changed_paths else {
let _ = self.watcher().force_restart();
continue;
};
let filtered_paths: Vec<PathBuf> = changed_paths.into_iter().filter(|p| p.extension().is_some_and(|ext| {
let ext_str = ext.to_str().unwrap();
matches!(ext_str, "js" | "ts" | "jsx" | "tsx")
})).collect();
// If after filtering there are no paths it means it's either a file
// we can't HMR or an external file that was passed explicitly to
// `--watch-hmr=<file>` path.
if filtered_paths.is_empty() {
let _ = self.watcher().force_restart();
continue;
}
for path in filtered_paths {
let Some(path_str) = path.to_str() else {
let _ = self.watcher().force_restart();
continue;
};
let Ok(module_url) = Url::from_file_path(path_str) else {
let _ = self.watcher().force_restart();
continue;
};
let Some(id) = self.state.0.lock().script_ids.get(module_url.as_str()).cloned() else {
let _ = self.watcher().force_restart();
continue;
};
let source_code = tokio::fs::read_to_string(deno_path_util::url_to_file_path(&module_url).unwrap()).await?;
let source_code = self.state.0.lock().emitter.emit_for_hmr(
&module_url,
source_code,
)?;
let mut tries = 1;
loop {
let msg_id = self.set_script_source(&id, source_code.as_str());
let value = self.wait_for_response(msg_id).await;
let result: cdp::SetScriptSourceResponse = serde_json::from_value(value).map_err(|e| {
JsErrorBox::from_err(e)
})?;
if matches!(result.status, cdp::Status::Ok) {
self.dispatch_hmr_event(module_url.as_str());
self.watcher().print(format!("Replaced changed module {}", module_url.as_str()));
break;
}
self.watcher().print(format!("Failed to reload module {}: {}.", module_url, colors::gray(&explain(&result))));
if should_retry(&result.status) && tries <= 2 {
tries += 1;
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
continue;
}
let _ = self.watcher().force_restart();
break;
}
}
}
}
}
}
async fn wait_for_response(&self, msg_id: i32) -> serde_json::Value {
if let Some(message_state) = self.state.0.lock().messages.remove(&msg_id) {
let InspectorMessageState::Ready(mut value) = message_state else {
unreachable!();
};
return value["result"].take();
}
let (tx, rx) = oneshot::channel();
self
.state
.0
.lock()
.messages
.insert(msg_id, InspectorMessageState::WaitingFor(tx));
let mut value = rx.await.unwrap();
value["result"].take()
}
fn set_script_source(&mut self, script_id: &str, source: &str) -> i32 {
let msg_id = next_id();
self.session.post_message(
msg_id,
"Debugger.setScriptSource",
Some(json!({
"scriptId": script_id,
"scriptSource": source,
"allowTopFrameEditing": true,
})),
);
msg_id
}
fn dispatch_hmr_event(&mut self, script_id: &str) {
let expr = format!(
"dispatchEvent(new CustomEvent(\"hmr\", {{ detail: {{ path: \"{}\" }} }}));",
script_id
);
self.session.post_message(
next_id(),
"Runtime.evaluate",
Some(json!({
"expression": expr,
"contextId": Some(1),
})),
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/run/mod.rs | cli/tools/run/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::Read;
use std::path::PathBuf;
use std::sync::Arc;
use deno_cache_dir::file_fetcher::File;
use deno_config::deno_json::NodeModulesDirMode;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_lib::standalone::binary::SerializedWorkspaceResolverImportMap;
use deno_lib::worker::LibWorkerFactoryRoots;
use deno_npm_installer::PackageCaching;
use deno_npm_installer::graph::NpmCachingStrategy;
use deno_path_util::resolve_url_or_path;
use deno_runtime::WorkerExecutionMode;
use eszip::EszipV2;
use jsonc_parser::ParseOptions;
use crate::args::EvalFlags;
use crate::args::Flags;
use crate::args::RunFlags;
use crate::args::WatchFlagsWithPaths;
use crate::factory::CliFactory;
use crate::util;
use crate::util::file_watcher::WatcherRestartMode;
use crate::util::watch_env_tracker::WatchEnvTracker;
pub mod hmr;
pub fn check_permission_before_script(flags: &Flags) {
if !flags.has_permission() && flags.has_permission_in_argv() {
log::warn!(
"{}",
crate::colors::yellow(
r#"Permission flags have likely been incorrectly set after the script argument.
To grant permissions, set them before the script argument. For example:
deno run --allow-read=. main.js"#
)
);
}
}
pub fn set_npm_user_agent() {
static ONCE: std::sync::Once = std::sync::Once::new();
ONCE.call_once(|| {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
std::env::set_var(
crate::npm::NPM_CONFIG_USER_AGENT_ENV_VAR,
crate::npm::get_npm_config_user_agent(),
)
};
});
}
pub async fn run_script(
mode: WorkerExecutionMode,
flags: Arc<Flags>,
watch: Option<WatchFlagsWithPaths>,
unconfigured_runtime: Option<deno_runtime::UnconfiguredRuntime>,
roots: LibWorkerFactoryRoots,
) -> Result<i32, AnyError> {
check_permission_before_script(&flags);
if let Some(watch_flags) = watch {
return run_with_watch(mode, flags, watch_flags).boxed_local().await;
}
// TODO(bartlomieju): actually I think it will also fail if there's an import
// map specified and bare specifier is used on the command line
let factory = CliFactory::from_flags(flags);
let cli_options = factory.cli_options()?;
let deno_dir = factory.deno_dir()?;
let http_client = factory.http_client_provider();
let workspace_resolver = factory.workspace_resolver().await?;
let node_resolver = factory.node_resolver().await?;
// Run a background task that checks for available upgrades or output
// if an earlier run of this background task found a new version of Deno.
#[cfg(feature = "upgrade")]
super::upgrade::check_for_upgrades(
http_client.clone(),
deno_dir.upgrade_check_file_path(),
);
let main_module = cli_options.resolve_main_module_with_resolver(Some(
&crate::args::WorkspaceMainModuleResolver::new(
workspace_resolver.clone(),
node_resolver.clone(),
),
))?;
let preload_modules = cli_options.preload_modules()?;
let require_modules = cli_options.require_modules()?;
if main_module.scheme() == "npm" {
set_npm_user_agent();
}
maybe_npm_install(&factory).await?;
let worker_factory = factory
.create_cli_main_worker_factory_with_roots(roots)
.await?;
let mut worker = worker_factory
.create_main_worker_with_unconfigured_runtime(
mode,
main_module.clone(),
preload_modules,
require_modules,
unconfigured_runtime,
)
.await
.inspect_err(|e| deno_telemetry::report_event("boot_failure", e))?;
let exit_code = worker
.run()
.await
.inspect_err(|e| deno_telemetry::report_event("uncaught_exception", e))?;
Ok(exit_code)
}
pub async fn run_from_stdin(
flags: Arc<Flags>,
unconfigured_runtime: Option<deno_runtime::UnconfiguredRuntime>,
roots: LibWorkerFactoryRoots,
) -> Result<i32, AnyError> {
let factory = CliFactory::from_flags(flags);
let cli_options = factory.cli_options()?;
let main_module = cli_options.resolve_main_module()?;
let preload_modules = cli_options.preload_modules()?;
let require_modules = cli_options.require_modules()?;
maybe_npm_install(&factory).await?;
let file_fetcher = factory.file_fetcher()?;
let worker_factory = factory
.create_cli_main_worker_factory_with_roots(roots)
.await?;
let mut source = Vec::new();
std::io::stdin().read_to_end(&mut source)?;
// Save a fake file into file fetcher cache
// to allow module access by TS compiler
file_fetcher.insert_memory_files(File {
url: main_module.clone(),
mtime: None,
maybe_headers: None,
source: source.into(),
loaded_from: deno_cache_dir::file_fetcher::LoadedFrom::Local,
});
let mut worker = worker_factory
.create_main_worker_with_unconfigured_runtime(
WorkerExecutionMode::Run,
main_module.clone(),
preload_modules,
require_modules,
unconfigured_runtime,
)
.await?;
let exit_code = worker.run().await?;
Ok(exit_code)
}
// TODO(bartlomieju): this function is not handling `exit_code` set by the runtime
// code properly.
async fn run_with_watch(
mode: WorkerExecutionMode,
flags: Arc<Flags>,
watch_flags: WatchFlagsWithPaths,
) -> Result<i32, AnyError> {
util::file_watcher::watch_recv(
flags,
util::file_watcher::PrintConfig::new_with_banner(
if watch_flags.hmr { "HMR" } else { "Watcher" },
"Process",
!watch_flags.no_clear_screen,
),
WatcherRestartMode::Automatic,
move |flags, watcher_communicator, changed_paths| {
watcher_communicator.show_path_changed(changed_paths.clone());
let env_file_paths: Option<Vec<std::path::PathBuf>> = flags
.env_file
.as_ref()
.map(|files| files.iter().map(PathBuf::from).collect());
WatchEnvTracker::snapshot().load_env_variables_from_env_files(
env_file_paths.as_ref(),
flags.log_level,
);
Ok(async move {
let factory = CliFactory::from_flags_for_watcher(
flags,
watcher_communicator.clone(),
);
let cli_options = factory.cli_options()?;
let main_module = cli_options.resolve_main_module()?;
let preload_modules = cli_options.preload_modules()?;
let require_modules = cli_options.require_modules()?;
if main_module.scheme() == "npm" {
set_npm_user_agent();
}
maybe_npm_install(&factory).await?;
let _ = watcher_communicator.watch_paths(cli_options.watch_paths());
let mut worker = factory
.create_cli_main_worker_factory()
.await?
.create_main_worker(
mode,
main_module.clone(),
preload_modules,
require_modules,
)
.await?;
if watch_flags.hmr {
worker.run().await?;
} else {
worker.run_for_watcher().await?;
}
Ok(())
})
},
)
.boxed_local()
.await?;
Ok(0)
}
pub async fn eval_command(
flags: Arc<Flags>,
eval_flags: EvalFlags,
) -> Result<i32, AnyError> {
let factory = CliFactory::from_flags(flags);
let cli_options = factory.cli_options()?;
let file_fetcher = factory.file_fetcher()?;
let main_module = cli_options.resolve_main_module()?;
let preload_modules = cli_options.preload_modules()?;
let require_modules = cli_options.require_modules()?;
maybe_npm_install(&factory).await?;
// Create a dummy source file.
let source_code = if eval_flags.print {
format!("console.log({})", eval_flags.code)
} else {
eval_flags.code
};
// Save a fake file into file fetcher cache
// to allow module access by TS compiler.
file_fetcher.insert_memory_files(File {
url: main_module.clone(),
mtime: None,
maybe_headers: None,
source: source_code.into_bytes().into(),
loaded_from: deno_cache_dir::file_fetcher::LoadedFrom::Local,
});
let worker_factory = factory.create_cli_main_worker_factory().await?;
let mut worker = worker_factory
.create_main_worker(
WorkerExecutionMode::Eval,
main_module.clone(),
preload_modules,
require_modules,
)
.await?;
let exit_code = worker.run().await?;
Ok(exit_code)
}
pub async fn maybe_npm_install(factory: &CliFactory) -> Result<(), AnyError> {
let cli_options = factory.cli_options()?;
// ensure an "npm install" is done if the user has explicitly
// opted into using a managed node_modules directory
if cli_options.specified_node_modules_dir()? == Some(NodeModulesDirMode::Auto)
&& let Some(npm_installer) = factory.npm_installer_if_managed().await?
{
let _clear_guard = factory
.text_only_progress_bar()
.deferred_keep_initialize_alive();
let already_done = npm_installer
.ensure_top_level_package_json_install()
.await?;
if !already_done
&& matches!(
cli_options.default_npm_caching_strategy(),
NpmCachingStrategy::Eager
)
{
npm_installer.cache_packages(PackageCaching::All).await?;
}
}
Ok(())
}
pub async fn run_eszip(
flags: Arc<Flags>,
run_flags: RunFlags,
unconfigured_runtime: Option<deno_runtime::UnconfiguredRuntime>,
roots: LibWorkerFactoryRoots,
) -> Result<i32, AnyError> {
// TODO(bartlomieju): actually I think it will also fail if there's an import
// map specified and bare specifier is used on the command line
let factory = CliFactory::from_flags(flags.clone());
let cli_options = factory.cli_options()?;
// entrypoint#path1,path2,...
let (entrypoint, _files) = run_flags
.script
.split_once("#")
.with_context(|| "eszip: invalid script string")?;
let mode = WorkerExecutionMode::Run;
let main_module = resolve_url_or_path(entrypoint, cli_options.initial_cwd())?;
let preload_modules = cli_options.preload_modules()?;
let require_modules = cli_options.require_modules()?;
let worker_factory = factory
.create_cli_main_worker_factory_with_roots(roots)
.await?;
let mut worker = worker_factory
.create_main_worker_with_unconfigured_runtime(
mode,
main_module.clone(),
preload_modules,
require_modules,
unconfigured_runtime,
)
.await?;
let exit_code = worker.run().await?;
Ok(exit_code)
}
#[allow(unused)]
async fn load_import_map(
eszips: &[EszipV2],
specifier: &str,
) -> Result<SerializedWorkspaceResolverImportMap, AnyError> {
let maybe_module = eszips
.iter()
.rev()
.find_map(|eszip| eszip.get_import_map(specifier));
let Some(module) = maybe_module else {
return Err(AnyError::msg(format!("import map not found '{specifier}'")));
};
let base_url = deno_core::url::Url::parse(specifier).map_err(|err| {
AnyError::msg(format!(
"import map specifier '{specifier}' is not a valid url: {err}"
))
})?;
let bytes = module
.source()
.await
.ok_or_else(|| AnyError::msg("import map not found '{specifier}'"))?;
let text = String::from_utf8_lossy(&bytes);
let json_value =
jsonc_parser::parse_to_serde_value(&text, &ParseOptions::default())
.map_err(|err| {
AnyError::msg(format!("import map failed to parse: {err}"))
})?
.ok_or_else(|| AnyError::msg("import map is not valid JSON"))?;
let import_map = import_map::parse_from_value(base_url, json_value)?;
Ok(SerializedWorkspaceResolverImportMap {
specifier: specifier.to_string(),
json: import_map.import_map.to_json(),
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/init/mod.rs | cli/tools/init/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::ffi::OsString;
use std::io::IsTerminal;
use std::io::Write;
use std::path::Path;
use chrono::NaiveDate;
use color_print::cformat;
use color_print::cstr;
use deno_config::deno_json::NodeModulesDirMode;
use deno_core::anyhow::Context;
use deno_core::error::AnyError;
use deno_core::futures::FutureExt;
use deno_core::serde_json::json;
use deno_lib::args::UnstableConfig;
use deno_npm_installer::PackagesAllowedScripts;
use deno_runtime::WorkerExecutionMode;
use log::info;
use crate::args::DenoSubcommand;
use crate::args::Flags;
use crate::args::InitFlags;
use crate::args::InternalFlags;
use crate::args::PermissionFlags;
use crate::args::RunFlags;
use crate::colors;
use crate::util::fs::FsCleaner;
use crate::util::progress_bar::ProgressBar;
pub async fn init_project(init_flags: InitFlags) -> Result<i32, AnyError> {
if let Some(package) = &init_flags.package {
return init_npm(InitNpmOptions {
name: package,
args: init_flags.package_args,
yes: init_flags.yes,
})
.boxed_local()
.await;
}
let cwd =
std::env::current_dir().context("Can't read current working directory.")?;
let dir = if let Some(dir) = &init_flags.dir {
let dir = cwd.join(dir);
std::fs::create_dir_all(&dir)?;
dir
} else {
cwd
};
if init_flags.empty {
create_file(
&dir,
"main.ts",
r#"console.log('Hello world!');
"#,
)?;
create_json_file(
&dir,
"deno.json",
&json!({
"tasks": {
"dev": "deno run --watch main.ts"
}
}),
)?;
} else if init_flags.serve {
create_file(
&dir,
"main.ts",
r#"import { serveDir } from "@std/http";
const userPagePattern = new URLPattern({ pathname: "/users/:id" });
const staticPathPattern = new URLPattern({ pathname: "/static/*" });
export default {
fetch(req) {
const url = new URL(req.url);
if (url.pathname === "/") {
return new Response("Home page");
}
const userPageMatch = userPagePattern.exec(url);
if (userPageMatch) {
return new Response(userPageMatch.pathname.groups.id);
}
if (staticPathPattern.test(url)) {
return serveDir(req);
}
return new Response("Not found", { status: 404 });
},
} satisfies Deno.ServeDefaultExport;
"#,
)?;
create_file(
&dir,
"main_test.ts",
r#"import { assertEquals } from "@std/assert";
import server from "./main.ts";
Deno.test(async function serverFetch() {
const req = new Request("https://deno.land");
const res = await server.fetch(req);
assertEquals(await res.text(), "Home page");
});
Deno.test(async function serverFetchNotFound() {
const req = new Request("https://deno.land/404");
const res = await server.fetch(req);
assertEquals(res.status, 404);
});
Deno.test(async function serverFetchUsers() {
const req = new Request("https://deno.land/users/123");
const res = await server.fetch(req);
assertEquals(await res.text(), "123");
});
Deno.test(async function serverFetchStatic() {
const req = new Request("https://deno.land/static/hello.js");
const res = await server.fetch(req);
assertEquals(await res.text(), 'console.log("Hello, world!");\n');
assertEquals(res.headers.get("content-type"), "text/javascript; charset=UTF-8");
});
"#,
)?;
let static_dir = dir.join("static");
std::fs::create_dir_all(&static_dir)?;
create_file(
&static_dir,
"hello.js",
r#"console.log("Hello, world!");
"#,
)?;
create_json_file(
&dir,
"deno.json",
&json!({
"tasks": {
"dev": "deno serve --watch -R main.ts",
},
"imports": {
"@std/assert": "jsr:@std/assert@1",
"@std/http": "jsr:@std/http@1",
}
}),
)?;
} else if init_flags.lib {
// Extract the directory name to use as the project name
let project_name = dir
.file_name()
.unwrap_or_else(|| dir.as_os_str())
.to_str()
.unwrap();
create_file(
&dir,
"mod.ts",
r#"export function add(a: number, b: number): number {
return a + b;
}
"#,
)?;
create_file(
&dir,
"mod_test.ts",
r#"import { assertEquals } from "@std/assert";
import { add } from "./mod.ts";
Deno.test(function addTest() {
assertEquals(add(2, 3), 5);
});
"#,
)?;
create_json_file(
&dir,
"deno.json",
&json!({
"name": project_name,
"version": "0.1.0",
"exports": "./mod.ts",
"tasks": {
"dev": "deno test --watch"
},
"license": "MIT",
"imports": {
"@std/assert": "jsr:@std/assert@1"
},
}),
)?;
} else {
create_file(
&dir,
"main.ts",
r#"export function add(a: number, b: number): number {
return a + b;
}
// Learn more at https://docs.deno.com/runtime/manual/examples/module_metadata#concepts
if (import.meta.main) {
console.log("Add 2 + 3 =", add(2, 3));
}
"#,
)?;
create_file(
&dir,
"main_test.ts",
r#"import { assertEquals } from "@std/assert";
import { add } from "./main.ts";
Deno.test(function addTest() {
assertEquals(add(2, 3), 5);
});
"#,
)?;
create_json_file(
&dir,
"deno.json",
&json!({
"tasks": {
"dev": "deno run --watch main.ts"
},
"imports": {
"@std/assert": "jsr:@std/assert@1"
}
}),
)?;
}
info!("✅ {}", colors::green("Project initialized"));
info!("");
info!("{}", colors::gray("Run these commands to get started"));
info!("");
if let Some(dir) = init_flags.dir {
info!(" cd {}", dir);
info!("");
}
if init_flags.empty {
info!(" {}", colors::gray("# Run the program"));
info!(" deno run main.ts");
info!("");
info!(
" {}",
colors::gray("# Run the program and watch for file changes")
);
info!(" deno task dev");
} else if init_flags.serve {
info!(" {}", colors::gray("# Run the server"));
info!(" deno serve -R main.ts");
info!("");
info!(
" {}",
colors::gray("# Run the server and watch for file changes")
);
info!(" deno task dev");
info!("");
info!(" {}", colors::gray("# Run the tests"));
info!(" deno test -R");
} else if init_flags.lib {
info!(" {}", colors::gray("# Run the tests"));
info!(" deno test");
info!("");
info!(
" {}",
colors::gray("# Run the tests and watch for file changes")
);
info!(" deno task dev");
info!("");
info!(" {}", colors::gray("# Publish to JSR (dry run)"));
info!(" deno publish --dry-run");
} else {
info!(" {}", colors::gray("# Run the program"));
info!(" deno run main.ts");
info!("");
info!(
" {}",
colors::gray("# Run the program and watch for file changes")
);
info!(" deno task dev");
info!("");
info!(" {}", colors::gray("# Run the tests"));
info!(" deno test");
}
Ok(0)
}
fn npm_name_to_create_package(name: &str) -> String {
let mut s = "npm:".to_string();
let mut scoped = false;
let mut create = false;
for (i, ch) in name.char_indices() {
if i == 0 {
if ch == '@' {
scoped = true;
} else {
create = true;
s.push_str("create-");
}
} else if scoped {
if ch == '/' {
scoped = false;
create = true;
s.push_str("/create-");
continue;
} else if ch == '@' && !create {
scoped = false;
create = true;
s.push_str("/create@");
continue;
}
}
s.push(ch);
}
if !create {
s.push_str("/create");
}
s
}
struct InitNpmOptions<'a> {
name: &'a str,
args: Vec<String>,
yes: bool,
}
async fn init_npm(options: InitNpmOptions<'_>) -> Result<i32, AnyError> {
let script_name = npm_name_to_create_package(options.name);
fn print_manual_usage(script_name: &str, args: &[String]) -> i32 {
log::info!(
"{}",
cformat!(
"You can initialize project manually by running <u>deno run {}</> and applying desired permissions.",
std::iter::once(script_name)
.chain(args.iter().map(|a| a.as_ref()))
.collect::<Vec<_>>()
.join(" ")
)
);
1
}
if !options.yes {
if std::io::stdin().is_terminal() {
log::info!(
cstr!(
"⚠️ Do you fully trust <y>{}</> package? Deno will invoke code from it with all permissions. Do you want to continue? <p(245)>[y/n]</>"
),
script_name
);
loop {
let _ = std::io::stdout().write(b"> ")?;
std::io::stdout().flush()?;
let mut answer = String::new();
if std::io::stdin().read_line(&mut answer).is_ok() {
let answer = answer.trim().to_ascii_lowercase();
if answer != "y" {
return Ok(print_manual_usage(&script_name, &options.args));
} else {
break;
}
}
}
} else {
return Ok(print_manual_usage(&script_name, &options.args));
}
}
let temp_node_modules_parent_tempdir = create_temp_node_modules_parent_dir()
.context("Failed creating temp directory for node_modules folder.")?;
let temp_node_modules_parent_dir = temp_node_modules_parent_tempdir
.path()
.canonicalize()
.ok()
.map(deno_path_util::strip_unc_prefix)
.unwrap_or_else(|| temp_node_modules_parent_tempdir.path().to_path_buf());
let temp_node_modules_dir = temp_node_modules_parent_dir.join("node_modules");
log::debug!(
"Creating node_modules directory at: {}",
temp_node_modules_dir.display()
);
let new_flags = Flags {
permissions: PermissionFlags {
allow_all: true,
..Default::default()
},
allow_scripts: PackagesAllowedScripts::All,
argv: options.args,
node_modules_dir: Some(NodeModulesDirMode::Auto),
subcommand: DenoSubcommand::Run(RunFlags {
script: script_name,
..Default::default()
}),
reload: true,
internal: InternalFlags {
lockfile_skip_write: true,
root_node_modules_dir_override: Some(temp_node_modules_dir),
..Default::default()
},
unstable_config: UnstableConfig {
bare_node_builtins: true,
sloppy_imports: true,
detect_cjs: true,
..Default::default()
},
..Default::default()
};
let result = crate::tools::run::run_script(
WorkerExecutionMode::Run,
new_flags.into(),
None,
None,
Default::default(),
)
.await;
drop(temp_node_modules_parent_tempdir); // explicit drop for clarity
result
}
/// Creates a node_modules directory in a folder with the following format:
///
/// <tmp-dir>/deno_init_nm/<date>/<random-value>
///
/// Old folders are automatically deleted by this function.
fn create_temp_node_modules_parent_dir() -> Result<tempfile::TempDir, AnyError>
{
let root_temp_folder = std::env::temp_dir().join("deno_init_nm");
let today = chrono::Utc::now().date_naive();
// remove any old/stale temp dirs
if let Err(err) =
attempt_temp_dir_garbage_collection(&root_temp_folder, today)
{
log::debug!("Failed init temp folder garbage collection: {:#?}", err);
}
let day_folder = root_temp_folder.join(folder_name_for_date(today));
std::fs::create_dir_all(&day_folder)
.with_context(|| format!("Failed creating '{}'", day_folder.display()))?;
let temp_node_modules_parent_dir = tempfile::TempDir::new_in(&day_folder)?;
// write a package.json to make this be considered a "node" project to deno
let package_json_path =
temp_node_modules_parent_dir.path().join("package.json");
std::fs::write(&package_json_path, "{}").with_context(|| {
format!("Failed creating '{}'", package_json_path.display())
})?;
Ok(temp_node_modules_parent_dir)
}
fn attempt_temp_dir_garbage_collection(
root_temp_folder: &Path,
utc_now: NaiveDate,
) -> Result<(), AnyError> {
let previous_day_str = folder_name_for_date(
utc_now
.checked_sub_days(chrono::Days::new(1))
.unwrap_or(utc_now),
);
let current_day_str = folder_name_for_date(utc_now);
let next_day_str = folder_name_for_date(
utc_now
.checked_add_days(chrono::Days::new(1))
.unwrap_or(utc_now),
);
let progress_bar =
ProgressBar::new(crate::util::progress_bar::ProgressBarStyle::TextOnly);
let update_guard = progress_bar.deferred_update_with_prompt(
crate::util::progress_bar::ProgressMessagePrompt::Cleaning,
"old temp node_modules folders...",
);
// remove any folders that aren't the current date +- 1 day
let mut cleaner = FsCleaner::new(Some(update_guard));
for entry in std::fs::read_dir(root_temp_folder)? {
let Ok(entry) = entry else {
continue;
};
if entry.file_name() != previous_day_str
&& entry.file_name() != current_day_str
&& entry.file_name() != next_day_str
&& let Err(err) = cleaner.rm_rf(&entry.path())
{
log::debug!(
"Failed cleaning '{}': {:#?}",
entry.file_name().display(),
err
);
}
}
Ok(())
}
fn folder_name_for_date(date: chrono::NaiveDate) -> OsString {
OsString::from(date.format("%Y-%m-%d").to_string())
}
fn create_json_file(
dir: &Path,
filename: &str,
value: &deno_core::serde_json::Value,
) -> Result<(), AnyError> {
let mut text = deno_core::serde_json::to_string_pretty(value)?;
text.push('\n');
create_file(dir, filename, &text)
}
fn create_file(
dir: &Path,
filename: &str,
content: &str,
) -> Result<(), AnyError> {
let path = dir.join(filename);
if path.exists() {
info!(
"ℹ️ {}",
colors::gray(format!("Skipped creating {filename} as it already exists"))
);
Ok(())
} else {
let mut file = std::fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(path)
.with_context(|| format!("Failed to create {filename} file"))?;
file.write_all(content.as_bytes())?;
Ok(())
}
}
#[cfg(test)]
mod test {
use test_util::TempDir;
use super::attempt_temp_dir_garbage_collection;
use crate::tools::init::npm_name_to_create_package;
#[test]
fn npm_name_to_create_package_test() {
// See https://docs.npmjs.com/cli/v8/commands/npm-init#description
assert_eq!(
npm_name_to_create_package("foo"),
"npm:create-foo".to_string()
);
assert_eq!(
npm_name_to_create_package("foo@1.0.0"),
"npm:create-foo@1.0.0".to_string()
);
assert_eq!(
npm_name_to_create_package("@foo"),
"npm:@foo/create".to_string()
);
assert_eq!(
npm_name_to_create_package("@foo@1.0.0"),
"npm:@foo/create@1.0.0".to_string()
);
assert_eq!(
npm_name_to_create_package("@foo/bar"),
"npm:@foo/create-bar".to_string()
);
assert_eq!(
npm_name_to_create_package("@foo/bar@1.0.0"),
"npm:@foo/create-bar@1.0.0".to_string()
);
}
#[test]
fn test_attempt_temp_dir_garbage_collection() {
let temp_dir = TempDir::new();
let reference_date = chrono::NaiveDate::from_ymd_opt(2020, 5, 13).unwrap();
temp_dir.path().join("0000-00-00").create_dir_all();
temp_dir
.path()
.join("2020-05-01/sub_dir/sub")
.create_dir_all();
temp_dir
.path()
.join("2020-05-01/sub_dir/sub/test.txt")
.write("");
temp_dir.path().join("2020-05-02/sub_dir").create_dir_all();
temp_dir.path().join("2020-05-11").create_dir_all();
temp_dir.path().join("2020-05-12").create_dir_all();
temp_dir.path().join("2020-05-13").create_dir_all();
temp_dir.path().join("2020-05-14").create_dir_all();
temp_dir.path().join("2020-05-15").create_dir_all();
attempt_temp_dir_garbage_collection(
temp_dir.path().as_path(),
reference_date,
)
.unwrap();
let mut entries = std::fs::read_dir(temp_dir.path())
.unwrap()
.map(|e| e.unwrap().file_name().into_string().unwrap())
.collect::<Vec<_>>();
entries.sort();
// should only have the current day +- 1
assert_eq!(
entries,
vec![
"2020-05-12".to_string(),
"2020-05-13".to_string(),
"2020-05-14".to_string()
]
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/installer/mod.rs | cli/tools/installer/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashSet;
use std::env;
use std::fs;
use std::fs::File;
use std::io;
use std::io::Write;
#[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use dashmap::DashSet;
use deno_cache_dir::file_fetcher::CacheSetting;
use deno_core::anyhow::Context;
use deno_core::anyhow::anyhow;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::url::Url;
use deno_lib::args::CaData;
use deno_npm::NpmPackageId;
use deno_npm_installer::lifecycle_scripts::LifecycleScriptsWarning;
use deno_path_util::resolve_url_or_path;
use deno_resolver::workspace::WorkspaceResolver;
use deno_semver::npm::NpmPackageReqReference;
use log::Level;
use once_cell::sync::Lazy;
use regex::Regex;
use regex::RegexBuilder;
pub use self::bin_name_resolver::BinNameResolver;
use crate::args::AddFlags;
use crate::args::ConfigFlag;
use crate::args::Flags;
use crate::args::InstallEntrypointsFlags;
use crate::args::InstallFlags;
use crate::args::InstallFlagsGlobal;
use crate::args::InstallFlagsLocal;
use crate::args::InstallTopLevelFlags;
use crate::args::TypeCheckMode;
use crate::args::UninstallFlags;
use crate::args::UninstallKind;
use crate::args::resolve_no_prompt;
use crate::factory::CliFactory;
use crate::file_fetcher::CreateCliFileFetcherOptions;
use crate::file_fetcher::create_cli_file_fetcher;
use crate::graph_container::CollectSpecifiersOptions;
use crate::graph_container::ModuleGraphContainer;
use crate::jsr::JsrFetchResolver;
use crate::npm::CliNpmResolver;
use crate::npm::NpmFetchResolver;
use crate::sys::CliSys;
use crate::util::display;
use crate::util::fs::canonicalize_path_maybe_not_exists;
mod bin_name_resolver;
#[derive(Debug, Default)]
pub struct Count {
value: AtomicUsize,
}
impl Count {
pub fn inc(&self) {
self.value.fetch_add(1, Ordering::Relaxed);
}
pub fn get(&self) -> usize {
self.value.load(Ordering::Relaxed)
}
}
#[derive(Default)]
pub struct InstallStats {
pub resolved_jsr: DashSet<String>,
pub downloaded_jsr: DashSet<String>,
pub reused_jsr: DashSet<String>,
pub resolved_npm: DashSet<String>,
pub downloaded_npm: DashSet<String>,
pub intialized_npm: DashSet<String>,
pub reused_npm: Count,
}
impl std::fmt::Debug for InstallStats {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("InstallStats")
.field(
"resolved_jsr",
&self
.resolved_jsr
.iter()
.map(|s| s.as_str().to_string())
.collect::<Vec<_>>(),
)
.field(
"downloaded_jsr",
&self
.downloaded_jsr
.iter()
.map(|s| s.as_str().to_string())
.collect::<Vec<_>>(),
)
.field("resolved_npm", &self.resolved_npm.len())
.field("resolved_jsr_count", &self.resolved_jsr.len())
.field("downloaded_npm", &self.downloaded_npm.len())
.field("downloaded_jsr_count", &self.downloaded_jsr.len())
.field(
"intialized_npm",
&self
.intialized_npm
.iter()
.map(|s| s.as_str().to_string())
.collect::<Vec<_>>(),
)
.field("intialized_npm_count", &self.intialized_npm.len())
.field("reused_npm", &self.reused_npm.get())
.finish()
}
}
#[derive(Debug)]
pub struct InstallReporter {
stats: Arc<InstallStats>,
scripts_warnings: Arc<Mutex<Vec<LifecycleScriptsWarning>>>,
deprecation_messages: Arc<Mutex<Vec<String>>>,
}
impl InstallReporter {
pub fn new() -> Self {
Self {
stats: Arc::new(InstallStats::default()),
scripts_warnings: Arc::new(Mutex::new(Vec::new())),
deprecation_messages: Arc::new(Mutex::new(Vec::new())),
}
}
pub fn take_scripts_warnings(&self) -> Vec<LifecycleScriptsWarning> {
std::mem::take(&mut *self.scripts_warnings.lock())
}
pub fn take_deprecation_message(&self) -> Vec<String> {
std::mem::take(&mut *self.deprecation_messages.lock())
}
}
impl deno_npm_installer::InstallProgressReporter for InstallReporter {
fn initializing(&self, _nv: &deno_semver::package::PackageNv) {}
fn initialized(&self, nv: &deno_semver::package::PackageNv) {
self.stats.intialized_npm.insert(nv.to_string());
}
fn blocking(&self, _message: &str) {}
fn scripts_not_run_warning(
&self,
warning: deno_npm_installer::lifecycle_scripts::LifecycleScriptsWarning,
) {
self.scripts_warnings.lock().push(warning);
}
fn deprecated_message(&self, message: String) {
self.deprecation_messages.lock().push(message);
}
}
fn package_nv_from_url(url: &Url) -> Option<String> {
if !matches!(url.scheme(), "http" | "https") {
return None;
}
if !url.host_str().is_some_and(|h| h.contains("jsr.io")) {
return None;
}
let mut parts = url.path_segments()?;
let scope = parts.next()?;
let name = parts.next()?;
let version = parts.next()?;
if version.ends_with(".json") {
// don't include meta.json urls
return None;
}
Some(format!("{scope}/{name}@{version}"))
}
impl deno_graph::source::Reporter for InstallReporter {
fn on_resolve(
&self,
_req: &deno_semver::package::PackageReq,
package_nv: &deno_semver::package::PackageNv,
) {
self.stats.resolved_jsr.insert(package_nv.to_string());
}
}
impl deno_npm::resolution::Reporter for InstallReporter {
fn on_resolved(
&self,
package_req: &deno_semver::package::PackageReq,
_nv: &deno_semver::package::PackageNv,
) {
self.stats.resolved_npm.insert(package_req.to_string());
}
}
impl deno_npm_cache::TarballCacheReporter for InstallReporter {
fn download_started(&self, _nv: &deno_semver::package::PackageNv) {}
fn downloaded(&self, nv: &deno_semver::package::PackageNv) {
self.stats.downloaded_npm.insert(nv.to_string());
}
fn reused_cache(&self, _nv: &deno_semver::package::PackageNv) {
self.stats.reused_npm.inc();
}
}
impl deno_resolver::file_fetcher::GraphLoaderReporter for InstallReporter {
fn on_load(
&self,
specifier: &Url,
loaded_from: deno_cache_dir::file_fetcher::LoadedFrom,
) {
if let Some(nv) = package_nv_from_url(specifier) {
match loaded_from {
deno_cache_dir::file_fetcher::LoadedFrom::Cache => {
self.stats.reused_jsr.insert(nv);
}
deno_cache_dir::file_fetcher::LoadedFrom::Remote => {
self.stats.downloaded_jsr.insert(nv);
}
_ => {}
}
} else {
// it's a local file or http/https specifier
}
}
}
static EXEC_NAME_RE: Lazy<Regex> = Lazy::new(|| {
RegexBuilder::new(r"^[a-z0-9][\w-]*$")
.case_insensitive(true)
.build()
.expect("invalid regex")
});
fn validate_name(exec_name: &str) -> Result<(), AnyError> {
if EXEC_NAME_RE.is_match(exec_name) {
Ok(())
} else {
Err(anyhow!("Invalid executable name: {exec_name}"))
}
}
#[cfg(windows)]
/// On Windows, 2 files are generated.
/// One compatible with cmd & powershell with a .cmd extension
/// A second compatible with git bash / MINGW64
/// Generate batch script to satisfy that.
fn generate_executable_file(shim_data: &ShimData) -> Result<(), AnyError> {
let args: Vec<String> =
shim_data.args.iter().map(|c| format!("\"{c}\"")).collect();
let template = format!(
"% generated by deno install %\n@deno {} %*\n",
args
.iter()
.map(|arg| arg.replace('%', "%%"))
.collect::<Vec<_>>()
.join(" ")
);
let mut file = File::create(&shim_data.file_path)?;
file.write_all(template.as_bytes())?;
// write file for bash
// create filepath without extensions
let template = format!(
r#"#!/bin/sh
# generated by deno install
deno {} "$@"
"#,
args.join(" "),
);
let mut file = File::create(shim_data.file_path.with_extension(""))?;
file.write_all(template.as_bytes())?;
Ok(())
}
#[cfg(not(windows))]
fn generate_executable_file(shim_data: &ShimData) -> Result<(), AnyError> {
use shell_escape::escape;
let args: Vec<String> = shim_data
.args
.iter()
.map(|c| escape(c.into()).into_owned())
.collect();
let template = format!(
r#"#!/bin/sh
# generated by deno install
exec deno {} "$@"
"#,
args.join(" "),
);
let mut file = File::create(&shim_data.file_path)?;
file.write_all(template.as_bytes())?;
let _metadata = fs::metadata(&shim_data.file_path)?;
let mut permissions = _metadata.permissions();
permissions.set_mode(0o755);
fs::set_permissions(&shim_data.file_path, permissions)?;
Ok(())
}
fn get_installer_bin_dir(
cwd: &Path,
root_flag: Option<&str>,
) -> Result<PathBuf, AnyError> {
let root = if let Some(root) = root_flag {
canonicalize_path_maybe_not_exists(&cwd.join(root))?
} else {
get_installer_root()?
};
Ok(if !root.ends_with("bin") {
root.join("bin")
} else {
root
})
}
fn get_installer_root() -> Result<PathBuf, AnyError> {
if let Some(env_dir) = env::var_os("DENO_INSTALL_ROOT")
&& !env_dir.is_empty()
{
let env_dir = PathBuf::from(env_dir);
return canonicalize_path_maybe_not_exists(&env_dir).with_context(|| {
format!(
"Canonicalizing DENO_INSTALL_ROOT ('{}').",
env_dir.display()
)
});
}
// Note: on Windows, the $HOME environment variable may be set by users or by
// third party software, but it is non-standard and should not be relied upon.
let home_env_var = if cfg!(windows) { "USERPROFILE" } else { "HOME" };
let mut home_path =
env::var_os(home_env_var)
.map(PathBuf::from)
.ok_or_else(|| {
io::Error::new(
io::ErrorKind::NotFound,
format!("${home_env_var} is not defined"),
)
})?;
home_path.push(".deno");
Ok(home_path)
}
pub async fn uninstall(
flags: Arc<Flags>,
uninstall_flags: UninstallFlags,
) -> Result<(), AnyError> {
let uninstall_flags = match uninstall_flags.kind {
UninstallKind::Global(flags) => flags,
UninstallKind::Local(remove_flags) => {
return super::pm::remove(flags, remove_flags).await;
}
};
let cwd = std::env::current_dir().context("Unable to get CWD")?;
let installation_dir =
get_installer_bin_dir(&cwd, uninstall_flags.root.as_deref())?;
// ensure directory exists
if let Ok(metadata) = fs::metadata(&installation_dir)
&& !metadata.is_dir()
{
return Err(anyhow!("Installation path is not a directory"));
}
let file_path = installation_dir.join(&uninstall_flags.name);
let mut removed = remove_file_if_exists(&file_path)?;
if cfg!(windows) {
let file_path = file_path.with_extension("cmd");
removed |= remove_file_if_exists(&file_path)?;
}
if !removed {
return Err(anyhow!(
"No installation found for {}",
uninstall_flags.name
));
}
// There might be some extra files to delete
// Note: tsconfig.json is legacy. We renamed it to deno.json.
// Remove cleaning it up after January 2024
for ext in ["tsconfig.json", "deno.json", "lock.json"] {
let file_path = file_path.with_extension(ext);
remove_file_if_exists(&file_path)?;
}
log::info!("✅ Successfully uninstalled {}", uninstall_flags.name);
Ok(())
}
fn remove_file_if_exists(file_path: &Path) -> Result<bool, AnyError> {
if !file_path.exists() {
return Ok(false);
}
fs::remove_file(file_path)
.with_context(|| format!("Failed removing: {}", file_path.display()))?;
log::info!("deleted {}", file_path.display());
Ok(true)
}
pub(crate) async fn install_from_entrypoints(
flags: Arc<Flags>,
entrypoints_flags: InstallEntrypointsFlags,
) -> Result<(), AnyError> {
let started = std::time::Instant::now();
let factory = CliFactory::from_flags(flags.clone());
let emitter = factory.emitter()?;
let main_graph_container = factory.main_module_graph_container().await?;
let specifiers = main_graph_container.collect_specifiers(
&entrypoints_flags.entrypoints,
CollectSpecifiersOptions {
include_ignored_specified: true,
},
)?;
main_graph_container
.check_specifiers(
&specifiers,
crate::graph_container::CheckSpecifiersOptions {
ext_overwrite: None,
allow_unknown_media_types: true,
},
)
.await?;
emitter
.cache_module_emits(&main_graph_container.graph())
.await?;
print_install_report(
&factory.sys(),
started.elapsed(),
&factory.install_reporter()?.unwrap().clone(),
factory.workspace_resolver().await?,
factory.npm_resolver().await?,
);
Ok(())
}
async fn install_local(
flags: Arc<Flags>,
install_flags: InstallFlagsLocal,
) -> Result<(), AnyError> {
match install_flags {
InstallFlagsLocal::Add(add_flags) => {
super::pm::add(flags, add_flags, super::pm::AddCommandName::Install).await
}
InstallFlagsLocal::Entrypoints(entrypoints) => {
install_from_entrypoints(flags, entrypoints).await
}
InstallFlagsLocal::TopLevel(top_level_flags) => {
install_top_level(flags, top_level_flags).await
}
}
}
#[derive(Debug, Default)]
struct CategorizedInstalledDeps {
normal_deps: Vec<NpmPackageId>,
dev_deps: Vec<NpmPackageId>,
}
fn categorize_installed_npm_deps(
npm_resolver: &CliNpmResolver,
workspace: &WorkspaceResolver<CliSys>,
install_reporter: &InstallReporter,
) -> CategorizedInstalledDeps {
let Some(managed_resolver) = npm_resolver.as_managed() else {
return CategorizedInstalledDeps::default();
};
// compute the summary info
let snapshot = managed_resolver.resolution().snapshot();
let top_level_packages = snapshot.top_level_packages();
// all this nonsense is to categorize into normal and dev deps
let mut normal_deps = HashSet::new();
let mut dev_deps = HashSet::new();
for package_json in workspace.package_jsons() {
let deps = package_json.resolve_local_package_json_deps();
for (_k, v) in deps.dependencies.iter() {
let Ok(s) = v else {
continue;
};
match s {
deno_package_json::PackageJsonDepValue::File(_) => {
// TODO(nathanwhit)
// TODO(bartlomieju)
}
deno_package_json::PackageJsonDepValue::Req(package_req) => {
normal_deps.insert(package_req.name.to_string());
}
deno_package_json::PackageJsonDepValue::Workspace(
_package_json_dep_workspace_req,
) => {
// ignore workspace deps
}
deno_package_json::PackageJsonDepValue::JsrReq(_package_req) => {
// ignore jsr deps
}
}
}
for (_k, v) in deps.dev_dependencies.iter() {
let Ok(s) = v else {
continue;
};
match s {
deno_package_json::PackageJsonDepValue::File(_) => {
// TODO(nathanwhit)
// TODO(bartlomieju)
}
deno_package_json::PackageJsonDepValue::Req(package_req) => {
dev_deps.insert(package_req.name.to_string());
}
deno_package_json::PackageJsonDepValue::Workspace(
_package_json_dep_workspace_req,
) => {
// ignore workspace deps
}
deno_package_json::PackageJsonDepValue::JsrReq(_package_req) => {
// ignore jsr deps
}
}
}
}
let mut installed_normal_deps = Vec::new();
let mut installed_dev_deps = Vec::new();
let npm_installed_set = if npm_resolver.root_node_modules_path().is_some() {
&install_reporter.stats.intialized_npm
} else {
&install_reporter.stats.downloaded_npm
};
for pkg in top_level_packages {
if !npm_installed_set.contains(&pkg.nv.to_string()) {
continue;
}
if normal_deps.contains(&pkg.nv.name.to_string()) {
installed_normal_deps.push(pkg);
} else if dev_deps.contains(&pkg.nv.name.to_string()) {
installed_dev_deps.push(pkg);
} else {
installed_normal_deps.push(pkg);
}
}
installed_normal_deps.sort_by(|a, b| a.nv.name.cmp(&b.nv.name));
installed_dev_deps.sort_by(|a, b| a.nv.name.cmp(&b.nv.name));
CategorizedInstalledDeps {
normal_deps: installed_normal_deps.into_iter().cloned().collect(),
dev_deps: installed_dev_deps.into_iter().cloned().collect(),
}
}
pub fn print_install_report(
sys: &dyn sys_traits::boxed::FsOpenBoxed,
elapsed: std::time::Duration,
install_reporter: &InstallReporter,
workspace: &WorkspaceResolver<CliSys>,
npm_resolver: &CliNpmResolver,
) {
fn human_elapsed(elapsed: u128) -> String {
display::human_elapsed_with_ms_limit(elapsed, 3_000)
}
let rep = install_reporter;
if !rep.stats.intialized_npm.is_empty()
|| !rep.stats.downloaded_jsr.is_empty()
{
let total_installed =
rep.stats.intialized_npm.len() + rep.stats.downloaded_jsr.len();
log::info!(
"{} {} {} {} {}",
deno_terminal::colors::gray("Installed"),
total_installed,
deno_terminal::colors::gray(format!(
"package{}",
if total_installed > 1 { "s" } else { "" },
)),
deno_terminal::colors::gray("in"),
human_elapsed(elapsed.as_millis())
);
let total_reused = rep.stats.reused_npm.get() + rep.stats.reused_jsr.len();
log::info!(
"{} {} {}",
deno_terminal::colors::gray("Reused"),
total_reused,
deno_terminal::colors::gray(format!(
"package{} from cache",
if total_reused == 1 { "" } else { "s" },
)),
);
if total_reused > 0 {
log::info!(
"{}",
deno_terminal::colors::yellow_bold("+".repeat(total_reused))
);
}
let jsr_downloaded = rep.stats.downloaded_jsr.len();
log::info!(
"{} {} {}",
deno_terminal::colors::gray("Downloaded"),
jsr_downloaded,
deno_terminal::colors::gray(format!(
"package{} from JSR",
if jsr_downloaded == 1 { "" } else { "s" },
)),
);
if jsr_downloaded > 0 {
log::info!(
"{}",
deno_terminal::colors::green("+".repeat(jsr_downloaded))
);
}
let npm_download = rep.stats.downloaded_npm.len();
log::info!(
"{} {} {}",
deno_terminal::colors::gray("Downloaded"),
npm_download,
deno_terminal::colors::gray(format!(
"package{} from npm",
if npm_download == 1 { "" } else { "s" },
)),
);
if npm_download > 0 {
log::info!("{}", deno_terminal::colors::green("+".repeat(npm_download)));
}
}
let CategorizedInstalledDeps {
normal_deps: installed_normal_deps,
dev_deps: installed_dev_deps,
} = categorize_installed_npm_deps(npm_resolver, workspace, install_reporter);
if !installed_normal_deps.is_empty() || !rep.stats.downloaded_jsr.is_empty() {
log::info!("");
log::info!("{}", deno_terminal::colors::cyan("Dependencies:"));
let mut jsr_packages = rep
.stats
.downloaded_jsr
.clone()
.into_iter()
.collect::<Vec<_>>();
jsr_packages.sort();
for pkg in jsr_packages {
let (name, version) = pkg.rsplit_once("@").unwrap();
log::info!(
"{} {}{} {}",
deno_terminal::colors::green("+"),
deno_terminal::colors::gray("jsr:"),
name,
deno_terminal::colors::gray(version)
);
}
for pkg in &installed_normal_deps {
log::info!(
"{} {}{} {}",
deno_terminal::colors::green("+"),
deno_terminal::colors::gray("npm:"),
pkg.nv.name,
deno_terminal::colors::gray(pkg.nv.version.to_string())
);
}
log::info!("");
}
if !installed_dev_deps.is_empty() {
log::info!("{}", deno_terminal::colors::cyan("Dev dependencies:"));
for pkg in &installed_dev_deps {
log::info!(
"{} {}{} {}",
deno_terminal::colors::green("+"),
deno_terminal::colors::gray("npm:"),
pkg.nv.name,
deno_terminal::colors::gray(pkg.nv.version.to_string())
);
}
}
let warnings = install_reporter.take_scripts_warnings();
for warning in warnings {
log::warn!("{}", warning.into_message(sys));
}
let deprecation_messages = install_reporter.take_deprecation_message();
for message in deprecation_messages {
log::warn!("{}", message);
}
}
async fn install_top_level(
flags: Arc<Flags>,
top_level_flags: InstallTopLevelFlags,
) -> Result<(), AnyError> {
let start_instant = std::time::Instant::now();
let factory = CliFactory::from_flags(flags);
// surface any errors in the package.json
factory
.npm_installer()
.await?
.ensure_no_pkg_json_dep_errors()?;
let npm_installer = factory.npm_installer().await?;
npm_installer.ensure_no_pkg_json_dep_errors()?;
// the actual work
crate::tools::pm::cache_top_level_deps(
&factory,
None,
crate::tools::pm::CacheTopLevelDepsOptions {
lockfile_only: top_level_flags.lockfile_only,
},
)
.await?;
if let Some(lockfile) = factory.maybe_lockfile().await? {
lockfile.write_if_changed()?;
}
let install_reporter = factory.install_reporter()?.unwrap().clone();
let workspace = factory.workspace_resolver().await?;
let npm_resolver = factory.npm_resolver().await?;
print_install_report(
&factory.sys(),
start_instant.elapsed(),
&install_reporter,
workspace,
npm_resolver,
);
Ok(())
}
fn check_if_installs_a_single_package_globally(
maybe_add_flags: Option<&AddFlags>,
) -> Result<(), AnyError> {
let Some(add_flags) = maybe_add_flags else {
return Ok(());
};
if add_flags.packages.len() != 1 {
return Ok(());
}
let Ok(url) = Url::parse(&add_flags.packages[0]) else {
return Ok(());
};
if matches!(url.scheme(), "http" | "https") {
bail!(
"Failed to install \"{}\" specifier. If you are trying to install {} globally, run again with `-g` flag:\n deno install -g {}",
url.scheme(),
url.as_str(),
url.as_str()
);
}
Ok(())
}
pub async fn install_command(
flags: Arc<Flags>,
install_flags: InstallFlags,
) -> Result<(), AnyError> {
match install_flags {
InstallFlags::Global(global_flags) => {
install_global(flags, global_flags).await
}
InstallFlags::Local(local_flags) => {
if let InstallFlagsLocal::Add(add_flags) = &local_flags {
check_if_installs_a_single_package_globally(Some(add_flags))?;
}
install_local(flags, local_flags).await
}
}
}
async fn install_global(
flags: Arc<Flags>,
install_flags_global: InstallFlagsGlobal,
) -> Result<(), AnyError> {
// ensure the module is cached
let factory = CliFactory::from_flags(flags.clone());
let cli_options = factory.cli_options()?;
let http_client = factory.http_client_provider();
let deps_http_cache = factory.global_http_cache()?;
let deps_file_fetcher = create_cli_file_fetcher(
Default::default(),
deno_cache_dir::GlobalOrLocalHttpCache::Global(deps_http_cache.clone()),
http_client.clone(),
factory.memory_files().clone(),
factory.sys(),
CreateCliFileFetcherOptions {
allow_remote: true,
cache_setting: CacheSetting::ReloadAll,
download_log_level: log::Level::Trace,
progress_bar: None,
},
);
let npmrc = factory.npmrc()?;
let deps_file_fetcher = Arc::new(deps_file_fetcher);
let jsr_resolver = Arc::new(JsrFetchResolver::new(
deps_file_fetcher.clone(),
factory.jsr_version_resolver()?.clone(),
));
let npm_resolver = Arc::new(NpmFetchResolver::new(
deps_file_fetcher.clone(),
npmrc.clone(),
factory.npm_version_resolver()?.clone(),
));
if matches!(flags.config_flag, ConfigFlag::Discover)
&& cli_options.workspace().deno_jsons().next().is_some()
{
log::warn!(
"{} discovered config file will be ignored in the installed command. Use the --config flag if you wish to include it.",
crate::colors::yellow("Warning")
);
}
for (i, module_url) in install_flags_global.module_urls.iter().enumerate() {
let entry_text = module_url;
if !cli_options.initial_cwd().join(entry_text).exists() {
// provide a helpful error message for users migrating from Deno < 3.0
if i == 1
&& install_flags_global.args.is_empty()
&& Url::parse(entry_text).is_err()
{
bail!(
concat!(
"{} is missing a prefix. Deno 3.0 requires `--` before script arguments in `deno install -g`. ",
"Did you mean `deno install -g {} -- {}`? Or maybe provide a `jsr:` or `npm:` prefix?",
),
entry_text,
&install_flags_global.module_urls[0],
install_flags_global.module_urls[1..].join(" "),
)
}
// check for package requirement missing prefix
if let Ok(Err(package_req)) =
super::pm::AddRmPackageReq::parse(entry_text, None)
{
if package_req.name.starts_with("@")
&& jsr_resolver
.req_to_nv(&package_req)
.await
.ok()
.flatten()
.is_some()
{
bail!(
"{entry_text} is missing a prefix. Did you mean `{}`?",
crate::colors::yellow(format!("deno install -g jsr:{package_req}"))
);
} else if npm_resolver
.req_to_nv(&package_req)
.await
.ok()
.flatten()
.is_some()
{
bail!(
"{entry_text} is missing a prefix. Did you mean `{}`?",
crate::colors::yellow(format!("deno install -g npm:{package_req}"))
);
}
}
}
factory
.main_module_graph_container()
.await?
.load_and_type_check_files(
std::slice::from_ref(module_url),
CollectSpecifiersOptions {
include_ignored_specified: true,
},
)
.await?;
let bin_name_resolver = factory.bin_name_resolver()?;
// create the install shim
create_install_shim(
&bin_name_resolver,
cli_options.initial_cwd(),
&flags,
&install_flags_global,
module_url,
)
.await?;
}
Ok(())
}
async fn create_install_shim(
bin_name_resolver: &BinNameResolver<'_>,
cwd: &Path,
flags: &Flags,
install_flags_global: &InstallFlagsGlobal,
module_url: &str,
) -> Result<(), AnyError> {
let shim_data = resolve_shim_data(
bin_name_resolver,
cwd,
flags,
install_flags_global,
module_url,
)
.await?;
// ensure directory exists
if let Ok(metadata) = fs::metadata(&shim_data.installation_dir) {
if !metadata.is_dir() {
return Err(anyhow!("Installation path is not a directory"));
}
} else {
fs::create_dir_all(&shim_data.installation_dir)?;
};
if shim_data.file_path.exists() && !install_flags_global.force {
return Err(anyhow!(
"Existing installation found. Aborting (Use -f to overwrite).",
));
};
generate_executable_file(&shim_data)?;
for (path, contents) in shim_data.extra_files {
fs::write(path, contents)?;
}
log::info!("✅ Successfully installed {}", shim_data.name);
log::info!("{}", shim_data.file_path.display());
if cfg!(windows) {
let display_path = shim_data.file_path.with_extension("");
log::info!("{} (shell)", display_path.display());
}
let installation_dir_str = shim_data.installation_dir.to_string_lossy();
if !is_in_path(&shim_data.installation_dir) {
log::info!("ℹ️ Add {} to PATH", installation_dir_str);
if cfg!(windows) {
log::info!(" set PATH=%PATH%;{}", installation_dir_str);
} else {
log::info!(" export PATH=\"{}:$PATH\"", installation_dir_str);
}
}
Ok(())
}
struct ShimData {
name: String,
installation_dir: PathBuf,
file_path: PathBuf,
args: Vec<String>,
extra_files: Vec<(PathBuf, String)>,
}
async fn resolve_shim_data(
bin_name_resolver: &BinNameResolver<'_>,
cwd: &Path,
flags: &Flags,
install_flags_global: &InstallFlagsGlobal,
module_url: &str,
) -> Result<ShimData, AnyError> {
let installation_dir =
get_installer_bin_dir(cwd, install_flags_global.root.as_deref())?;
// Check if module_url is remote
let module_url = resolve_url_or_path(module_url, cwd)?;
let name = if install_flags_global.name.is_some() {
install_flags_global.name.clone()
} else {
bin_name_resolver.infer_name_from_url(&module_url).await
};
let name = match name {
Some(name) => name,
None => {
return Err(anyhow!(
"An executable name was not provided. One could not be inferred from the URL. Aborting.\n {} {}",
deno_runtime::colors::cyan("hint:"),
"provide one with the `--name` flag"
));
}
};
validate_name(name.as_str())?;
let mut file_path = installation_dir.join(&name);
if cfg!(windows) {
file_path = file_path.with_extension("cmd");
}
let mut extra_files: Vec<(PathBuf, String)> = vec![];
let mut executable_args = vec!["run".to_string()];
executable_args.extend_from_slice(&flags.to_permission_args());
if let Some(url) = flags.location.as_ref() {
executable_args.push("--location".to_string());
executable_args.push(url.to_string());
}
if let Some(CaData::File(ca_file)) = &flags.ca_data {
executable_args.push("--cert".to_string());
executable_args.push(ca_file.to_owned())
}
if let Some(log_level) = flags.log_level {
if log_level == Level::Error {
executable_args.push("--quiet".to_string());
} else {
executable_args.push("--log-level".to_string());
let log_level = match log_level {
Level::Debug => "debug",
Level::Info => "info",
_ => return Err(anyhow!(format!("invalid log level {log_level}"))),
};
executable_args.push(log_level.to_string());
}
}
// we should avoid a default branch here to ensure we continue to cover any
// changes to this flag.
match flags.type_check_mode {
TypeCheckMode::All => executable_args.push("--check=all".to_string()),
TypeCheckMode::None => {}
TypeCheckMode::Local => executable_args.push("--check".to_string()),
}
for feature in &flags.unstable_config.features {
executable_args.push(format!("--unstable-{}", feature));
}
if flags.no_remote {
executable_args.push("--no-remote".to_string());
}
if flags.no_npm {
executable_args.push("--no-npm".to_string());
}
if flags.cached_only {
executable_args.push("--cached-only".to_string());
}
if flags.frozen_lockfile.unwrap_or(false) {
executable_args.push("--frozen".to_string());
}
if resolve_no_prompt(&flags.permissions) {
executable_args.push("--no-prompt".to_string());
}
if !flags.v8_flags.is_empty() {
executable_args.push(format!("--v8-flags={}", flags.v8_flags.join(",")));
}
if let Some(seed) = flags.seed {
executable_args.push("--seed".to_string());
executable_args.push(seed.to_string());
}
if let Some(inspect) = flags.inspect {
executable_args.push(format!("--inspect={inspect}"));
}
if let Some(inspect_brk) = flags.inspect_brk {
executable_args.push(format!("--inspect-brk={inspect_brk}"));
}
if let Some(import_map_path) = &flags.import_map_path {
let import_map_url = resolve_url_or_path(import_map_path, cwd)?;
executable_args.push("--import-map".to_string());
executable_args.push(import_map_url.to_string());
}
if let ConfigFlag::Path(config_path) = &flags.config_flag {
let copy_path = get_hidden_file_with_ext(&file_path, "deno.json");
executable_args.push("--config".to_string());
executable_args.push(copy_path.to_str().unwrap().to_string());
let mut config_text = fs::read_to_string(config_path)
.with_context(|| format!("error reading {config_path}"))?;
// always remove the import map field because when someone specifies `--import-map` we
// don't want that file to be attempted to be loaded and when they don't specify that
// (which is just something we haven't implemented yet)
if let Some(new_text) = remove_import_map_field_from_text(&config_text) {
if flags.import_map_path.is_none() {
log::warn!(
"{} \"importMap\" field in the specified config file we be ignored. Use the --import-map flag instead.",
crate::colors::yellow("Warning"),
);
}
config_text = new_text;
}
extra_files.push((copy_path, config_text));
} else {
executable_args.push("--no-config".to_string());
}
if flags.no_lock {
executable_args.push("--no-lock".to_string());
} else if flags.lock.is_some()
// always use a lockfile for an npm entrypoint unless --no-lock
|| NpmPackageReqReference::from_specifier(&module_url).is_ok()
{
let copy_path = get_hidden_file_with_ext(&file_path, "lock.json");
executable_args.push("--lock".to_string());
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tools/installer/bin_name_resolver.rs | cli/tools/installer/bin_name_resolver.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::PathBuf;
use deno_core::error::AnyError;
use deno_core::url::Url;
use deno_npm::registry::NpmRegistryApi;
use deno_npm::resolution::NpmPackageVersionResolver;
use deno_npm::resolution::NpmVersionResolver;
use deno_semver::npm::NpmPackageReqReference;
use crate::http_util::HttpClientProvider;
pub struct BinNameResolver<'a> {
http_client_provider: &'a HttpClientProvider,
npm_registry_api: &'a dyn NpmRegistryApi,
npm_version_resolver: &'a NpmVersionResolver,
}
impl<'a> BinNameResolver<'a> {
pub fn new(
http_client_provider: &'a HttpClientProvider,
npm_registry_api: &'a dyn NpmRegistryApi,
npm_version_resolver: &'a NpmVersionResolver,
) -> Self {
Self {
http_client_provider,
npm_registry_api,
npm_version_resolver,
}
}
pub async fn infer_name_from_url(&self, url: &Url) -> Option<String> {
// If there's an absolute url with no path, eg. https://my-cli.com
// perform a request, and see if it redirects another file instead.
let mut url = url.clone();
if matches!(url.scheme(), "http" | "https")
&& url.path() == "/"
&& let Ok(client) = self.http_client_provider.get_or_create()
&& let Ok(redirected_url) = client
.get_redirected_url(url.clone(), &Default::default())
.await
{
url = redirected_url;
}
if let Ok(npm_ref) = NpmPackageReqReference::from_specifier(&url) {
if let Some(sub_path) = npm_ref.sub_path()
&& !sub_path.contains('/')
{
return Some(sub_path.to_string());
}
match self.resolve_name_from_npm(&npm_ref).await {
Ok(Some(value)) => return Some(value),
Ok(None) => {}
Err(err) => {
log::warn!(
"{} Failed resolving npm specifier information. {:#}",
deno_runtime::colors::yellow("Warning"),
err
);
}
}
if !npm_ref.req().name.contains('/') {
return Some(npm_ref.into_inner().req.name.into_string());
}
if let Some(scope_and_pkg) = npm_ref.req().name.strip_prefix('@')
&& let Some((scope, package)) = scope_and_pkg.split_once('/')
&& package == "cli"
{
return Some(scope.to_string());
}
return None;
}
let percent_decode =
percent_encoding::percent_decode(url.path().as_bytes());
#[cfg(unix)]
let path = {
use std::os::unix::prelude::OsStringExt;
PathBuf::from(std::ffi::OsString::from_vec(
percent_decode.collect::<Vec<u8>>(),
))
};
#[cfg(windows)]
let path = PathBuf::from(percent_decode.decode_utf8_lossy().as_ref());
let mut stem = path.file_stem()?.to_string_lossy();
if matches!(stem.as_ref(), "main" | "mod" | "index" | "cli")
&& let Some(parent_name) = path.parent().and_then(|p| p.file_name())
{
stem = parent_name.to_string_lossy();
}
// if atmark symbol appears in the index other than 0 (e.g. `foo@bar`) we use
// the former part as the inferred name because the latter part is most likely
// a version number.
match stem.find('@') {
Some(at_index) if at_index > 0 => {
stem = stem.split_at(at_index).0.to_string().into();
}
_ => {}
}
Some(stem.to_string())
}
async fn resolve_name_from_npm(
&self,
npm_ref: &NpmPackageReqReference,
) -> Result<Option<String>, AnyError> {
let package_info = self
.npm_registry_api
.package_info(&npm_ref.req().name)
.await?;
let version_resolver =
self.npm_version_resolver.get_for_package(&package_info);
Ok(self.resolve_name_from_npm_package_info(&version_resolver, npm_ref))
}
fn resolve_name_from_npm_package_info(
&self,
version_resolver: &NpmPackageVersionResolver,
npm_ref: &NpmPackageReqReference,
) -> Option<String> {
let version_info = version_resolver
.resolve_best_package_version_info(
&npm_ref.req().version_req,
Vec::new().into_iter(),
)
.ok()?;
let bin_entries = version_info.bin.as_ref()?;
match bin_entries {
deno_npm::registry::NpmPackageVersionBinEntry::String(_) => {}
deno_npm::registry::NpmPackageVersionBinEntry::Map(data) => {
if data.len() == 1 {
return Some(data.keys().next().unwrap().clone());
}
}
}
None
}
}
#[cfg(test)]
mod test {
use std::collections::HashMap;
use deno_core::url::Url;
use deno_npm::registry::TestNpmRegistryApi;
use deno_npm::resolution::NpmVersionResolver;
use super::BinNameResolver;
use crate::http_util::HttpClientProvider;
async fn infer_name_from_url(url: &Url) -> Option<String> {
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let http_client = HttpClientProvider::new(None, None);
let registry_api = TestNpmRegistryApi::default();
registry_api.with_version_info(("@google/gemini-cli", "1.0.0"), |info| {
info.bin = Some(deno_npm::registry::NpmPackageVersionBinEntry::Map(
HashMap::from([("gemini".to_string(), "./bin.js".to_string())]),
))
});
let npm_version_resolver = NpmVersionResolver::default();
let resolver =
BinNameResolver::new(&http_client, ®istry_api, &npm_version_resolver);
resolver.infer_name_from_url(url).await
}
#[tokio::test]
async fn install_infer_name_from_url() {
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc/server.ts").unwrap()
)
.await,
Some("server".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc/main.ts").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc/mod.ts").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/ab%20c/mod.ts").unwrap()
)
.await,
Some("ab c".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc/index.ts").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc/cli.ts").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("https://example.com/main.ts").unwrap())
.await,
Some("main".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("https://example.com").unwrap()).await,
None
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///abc/server.ts").unwrap()).await,
Some("server".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///abc/main.ts").unwrap()).await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///ab%20c/main.ts").unwrap()).await,
Some("ab c".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///main.ts").unwrap()).await,
Some("main".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///").unwrap()).await,
None
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc@0.1.0").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc@0.1.0/main.ts").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/abc@def@ghi").unwrap()
)
.await,
Some("abc".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("https://example.com/@abc.ts").unwrap())
.await,
Some("@abc".to_string())
);
assert_eq!(
infer_name_from_url(
&Url::parse("https://example.com/@abc/mod.ts").unwrap()
)
.await,
Some("@abc".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///@abc.ts").unwrap()).await,
Some("@abc".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("file:///@abc/cli.ts").unwrap()).await,
Some("@abc".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("npm:cowsay@1.2/cowthink").unwrap())
.await,
Some("cowthink".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("npm:cowsay@1.2/cowthink/test").unwrap())
.await,
Some("cowsay".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("npm:cowsay@1.2").unwrap()).await,
Some("cowsay".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("npm:@types/node@1.2").unwrap()).await,
None
);
assert_eq!(
infer_name_from_url(&Url::parse("npm:@slidev/cli@1.2").unwrap()).await,
Some("slidev".to_string())
);
assert_eq!(
infer_name_from_url(&Url::parse("npm:@google/gemini-cli").unwrap()).await,
Some("gemini".to_string())
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/bench/lsp.rs | cli/bench/lsp.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::path::Path;
use std::str::FromStr;
use std::time::Duration;
use deno_core::serde::Deserialize;
use deno_core::serde_json;
use deno_core::serde_json::Value;
use deno_core::serde_json::json;
use lsp_types::Uri;
use test_util::PathRef;
use test_util::lsp::LspClientBuilder;
use tower_lsp::lsp_types as lsp;
static FIXTURE_CODE_LENS_TS: &str = include_str!("testdata/code_lens.ts");
static FIXTURE_DB_TS: &str = include_str!("testdata/db.ts");
static FIXTURE_DB_MESSAGES: &[u8] = include_bytes!("testdata/db_messages.json");
static FIXTURE_DECO_APPS: &[u8] =
include_bytes!("testdata/deco_apps_requests.json");
#[derive(Debug, Deserialize)]
enum FixtureType {
#[serde(rename = "action")]
Action,
#[serde(rename = "change")]
Change,
#[serde(rename = "completion")]
Completion,
#[serde(rename = "highlight")]
Highlight,
#[serde(rename = "hover")]
Hover,
}
#[derive(Debug, Deserialize)]
struct FixtureMessage {
#[serde(rename = "type")]
fixture_type: FixtureType,
params: Value,
}
/// replaces the root directory in the URIs of the requests
/// with the given root path
fn patch_uris<'a>(
reqs: impl IntoIterator<Item = &'a mut tower_lsp::jsonrpc::Request>,
root: &PathRef,
) {
for req in reqs {
let mut params = req.params().unwrap().clone();
let new_req = if let Some(doc) = params.get_mut("textDocument") {
if let Some(uri_val) = doc.get_mut("uri") {
let uri = uri_val.as_str().unwrap();
*uri_val =
Value::from(uri.replace(
"file:///",
&format!("file://{}/", root.to_string_lossy()),
));
}
let builder = tower_lsp::jsonrpc::Request::build(req.method().to_owned());
let builder = if let Some(id) = req.id() {
builder.id(id.clone())
} else {
builder
};
Some(builder.params(params).finish())
} else {
None
};
if let Some(new_req) = new_req {
*req = new_req.request;
}
}
}
fn bench_deco_apps_edits(deno_exe: &Path) -> Duration {
let mut requests: Vec<tower_lsp::jsonrpc::Request> =
serde_json::from_slice(FIXTURE_DECO_APPS).unwrap();
let apps =
test_util::root_path().join("cli/bench/testdata/lsp_benchdata/apps");
// it's a bit wasteful to do this for every run, but it's the easiest with the way things
// are currently structured
patch_uris(&mut requests, &apps);
let mut client = LspClientBuilder::new()
.set_root_dir(apps.clone())
.deno_exe(deno_exe)
.build();
client.initialize(|c| {
c.set_workspace_folders(vec![lsp_types::WorkspaceFolder {
uri: apps.uri_dir(),
name: "apps".to_string(),
}]);
c.set_deno_enable(true);
c.set_unstable(true);
c.set_preload_limit(1000);
c.set_config(apps.join("deno.json").as_path().to_string_lossy());
});
let start = std::time::Instant::now();
let mut reqs = 0;
for req in requests {
if req.id().is_none() {
client.write_notification(req.method(), req.params());
} else {
reqs += 1;
client.write_jsonrpc(req.method(), req.params());
}
}
for _ in 0..reqs {
let _ = client.read_latest_response();
}
let end = start.elapsed();
// part of the motivation of including this benchmark is to see how we perform
// with a fairly large number of documents in memory.
// make sure that's the case
let res = client.write_request(
"deno/virtualTextDocument",
json!({
"textDocument": {
"uri": "deno:/status.md"
}
}),
);
let open_re = lazy_regex::regex!(r"Open: (\d+)");
let server_re = lazy_regex::regex!(r"Server: (\d+)");
let res = res.as_str().unwrap().to_string();
assert!(res.starts_with("# Deno Language Server Status"));
let open_count = open_re
.captures(&res)
.unwrap()
.get(1)
.unwrap()
.as_str()
.parse::<usize>()
.unwrap();
let server_count = server_re
.captures(&res)
.unwrap()
.get(1)
.unwrap()
.as_str()
.parse::<usize>()
.unwrap();
let count = open_count + server_count;
assert!(count > 1000, "count: {}", count);
client.shutdown();
end
}
/// A benchmark that opens a 8000+ line TypeScript document, adds a function to
/// the end of the document and does a level of hovering and gets quick fix
/// code actions.
fn bench_big_file_edits(deno_exe: &Path) -> Duration {
let mut client = LspClientBuilder::new().deno_exe(deno_exe).build();
client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification(
"textDocument/didOpen",
json!({
"textDocument": {
"uri": "file:///testdata/db.ts",
"languageId": "typescript",
"version": 1,
"text": FIXTURE_DB_TS
}
}),
);
client.diagnostic("file:///testdata/db.ts");
let messages: Vec<FixtureMessage> =
serde_json::from_slice(FIXTURE_DB_MESSAGES).unwrap();
for msg in messages {
match msg.fixture_type {
FixtureType::Action => {
client.write_request("textDocument/codeAction", msg.params);
}
FixtureType::Change => {
client.write_notification("textDocument/didChange", msg.params);
}
FixtureType::Completion => {
client.write_request("textDocument/completion", msg.params);
}
FixtureType::Highlight => {
client.write_request("textDocument/documentHighlight", msg.params);
}
FixtureType::Hover => {
client.write_request("textDocument/hover", msg.params);
}
}
}
client.write_request("shutdown", json!(null));
client.write_notification("exit", json!(null));
client.duration()
}
fn bench_code_lens(deno_exe: &Path) -> Duration {
let mut client = LspClientBuilder::new().deno_exe(deno_exe).build();
client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": {
"enable": true,
"codeLens": {
"implementations": true,
"references": true,
"test": true,
},
} }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification(
"textDocument/didOpen",
json!({
"textDocument": {
"uri": "file:///testdata/code_lens.ts",
"languageId": "typescript",
"version": 1,
"text": FIXTURE_CODE_LENS_TS
}
}),
);
let res = client.write_request_with_res_as::<Vec<lsp::CodeLens>>(
"textDocument/codeLens",
json!({
"textDocument": {
"uri": "file:///testdata/code_lens.ts"
}
}),
);
assert!(!res.is_empty());
for code_lens in res {
client.write_request("codeLens/resolve", code_lens);
}
client.duration()
}
fn bench_find_replace(deno_exe: &Path) -> Duration {
let mut client = LspClientBuilder::new().deno_exe(deno_exe).build();
client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
for i in 0..10 {
client.write_notification(
"textDocument/didOpen",
json!({
"textDocument": {
"uri": format!("file:///a/file_{i}.ts"),
"languageId": "typescript",
"version": 1,
"text": "console.log(\"000\");\n"
}
}),
);
}
for i in 0..10 {
let file_name = format!("file:///a/file_{i}.ts");
client.write_notification(
"textDocument/didChange",
lsp::DidChangeTextDocumentParams {
text_document: lsp::VersionedTextDocumentIdentifier {
uri: Uri::from_str(&file_name).unwrap(),
version: 2,
},
content_changes: vec![lsp::TextDocumentContentChangeEvent {
range: Some(lsp::Range {
start: lsp::Position {
line: 0,
character: 13,
},
end: lsp::Position {
line: 0,
character: 16,
},
}),
range_length: None,
text: "111".to_string(),
}],
},
);
}
for i in 0..10 {
let file_name = format!("file:///a/file_{i}.ts");
client.write_request(
"textDocument/formatting",
lsp::DocumentFormattingParams {
text_document: lsp::TextDocumentIdentifier {
uri: Uri::from_str(&file_name).unwrap(),
},
options: lsp::FormattingOptions {
tab_size: 2,
insert_spaces: true,
..Default::default()
},
work_done_progress_params: Default::default(),
},
);
}
client.write_request("shutdown", json!(null));
client.write_notification("exit", json!(null));
client.duration()
}
/// A test that starts up the LSP, opens a single line document, and exits.
fn bench_startup_shutdown(deno_exe: &Path) -> Duration {
let mut client = LspClientBuilder::new().deno_exe(deno_exe).build();
client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification(
"textDocument/didOpen",
json!({
"textDocument": {
"uri": "file:///a/file.ts",
"languageId": "typescript",
"version": 1,
"text": "console.log(Deno.args);\n"
}
}),
);
client.write_request("shutdown", json!(null));
client.write_notification("exit", json!(null));
client.duration()
}
/// Generate benchmarks for the LSP server.
pub fn benchmarks(deno_exe: &Path) -> HashMap<String, i64> {
println!("-> Start benchmarking lsp");
let mut exec_times = HashMap::new();
println!(" - Simple Startup/Shutdown ");
let mut times = Vec::new();
for _ in 0..10 {
times.push(bench_startup_shutdown(deno_exe));
}
let mean =
(times.iter().sum::<Duration>() / times.len() as u32).as_millis() as i64;
println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("startup_shutdown".to_string(), mean);
println!(" - Big Document/Several Edits ");
let mut times = Vec::new();
for _ in 0..5 {
times.push(bench_big_file_edits(deno_exe));
}
let mean =
(times.iter().sum::<Duration>() / times.len() as u32).as_millis() as i64;
println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("big_file_edits".to_string(), mean);
println!(" - Find/Replace");
let mut times = Vec::new();
for _ in 0..10 {
times.push(bench_find_replace(deno_exe));
}
let mean =
(times.iter().sum::<Duration>() / times.len() as u32).as_millis() as i64;
println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("find_replace".to_string(), mean);
println!(" - Code Lens");
let mut times = Vec::new();
for _ in 0..10 {
times.push(bench_code_lens(deno_exe));
}
let mean =
(times.iter().sum::<Duration>() / times.len() as u32).as_millis() as i64;
println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("code_lens".to_string(), mean);
println!(" - deco-cx/apps Multiple Edits + Navigation");
let mut times = Vec::new();
for _ in 0..5 {
times.push(bench_deco_apps_edits(deno_exe));
}
let mean =
(times.iter().sum::<Duration>() / times.len() as u32).as_millis() as i64;
println!(" ({} runs, mean: {}ms)", times.len(), mean);
exec_times.insert("deco_apps_edits_nav".to_string(), mean);
println!("<- End benchmarking lsp");
exec_times
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/bench/main.rs | cli/bench/main.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#![allow(clippy::print_stdout)]
#![allow(clippy::print_stderr)]
use std::collections::HashMap;
use std::convert::From;
use std::env;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use std::process::Stdio;
use std::time::SystemTime;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::serde_json::Value;
use test_util::PathRef;
mod lsp;
fn read_json(filename: &Path) -> Result<Value> {
let f = fs::File::open(filename)?;
Ok(serde_json::from_reader(f)?)
}
fn write_json(filename: &Path, value: &Value) -> Result<()> {
let f = fs::File::create(filename)?;
serde_json::to_writer(f, value)?;
Ok(())
}
/// The list of the tuples of the benchmark name, arguments and return code
const EXEC_TIME_BENCHMARKS: &[(&str, &[&str], Option<i32>)] = &[
// we need to run the cold_* benchmarks before the _warm_ ones as they ensure
// the cache is properly populated, instead of other tests possibly
// invalidating that cache.
(
"cold_hello",
&["run", "--reload", "tests/testdata/run/002_hello.ts"],
None,
),
(
"cold_relative_import",
&[
"run",
"--reload",
"tests/testdata/run/003_relative_import.ts",
],
None,
),
("hello", &["run", "tests/testdata/run/002_hello.ts"], None),
(
"relative_import",
&["run", "tests/testdata/run/003_relative_import.ts"],
None,
),
(
"error_001",
&["run", "tests/testdata/run/error_001.ts"],
Some(1),
),
(
"no_check_hello",
&[
"run",
"--reload",
"--no-check",
"tests/testdata/run/002_hello.ts",
],
None,
),
(
"workers_startup",
&[
"run",
"--allow-read",
"tests/testdata/workers/bench_startup.ts",
],
None,
),
(
"workers_round_robin",
&[
"run",
"--allow-read",
"tests/testdata/workers/bench_round_robin.ts",
],
None,
),
(
"workers_large_message",
&[
"run",
"--allow-read",
"tests/testdata/workers/bench_large_message.ts",
],
None,
),
(
"text_decoder",
&["run", "tests/testdata/benches/text_decoder_perf.js"],
None,
),
(
"text_encoder",
&["run", "tests/testdata/benches/text_encoder_perf.js"],
None,
),
(
"text_encoder_into",
&["run", "tests/testdata/benches/text_encoder_into_perf.js"],
None,
),
(
"response_string",
&["run", "tests/testdata/benches/response_string_perf.js"],
None,
),
// TODO(bartlomieju): temporarily disabled, because we can't upgrade `tests/util/std` submodule
// due to needing it to be published.
// (
// "check",
// &[
// "check",
// "--reload",
// "--unstable",
// "--config",
// "tests/config/deno.json",
// "tests/util/std/http/file_server_test.ts",
// ],
// None,
// ),
// (
// "no_check",
// &[
// "cache",
// "--reload",
// "--no-check",
// "--unstable",
// "--config",
// "tests/config/deno.json",
// "tests/util/std/http/file_server_test.ts",
// ],
// None,
// ),
];
const RESULT_KEYS: &[&str] =
&["mean", "stddev", "user", "system", "min", "max"];
fn run_exec_time(
deno_exe: &Path,
target_dir: &PathRef,
) -> Result<HashMap<String, HashMap<String, f64>>> {
let hyperfine_exe = test_util::prebuilt_tool_path("hyperfine").to_string();
let benchmark_file = target_dir.join("hyperfine_results.json");
let benchmark_file_str = benchmark_file.to_string();
let mut command = [
hyperfine_exe.as_str(),
"--export-json",
benchmark_file_str.as_str(),
"--warmup",
"3",
]
.iter()
.map(|s| s.to_string())
.collect::<Vec<_>>();
for (_, args, return_code) in EXEC_TIME_BENCHMARKS {
let ret_code_test = if let Some(code) = return_code {
// Bash test which asserts the return code value of the previous command
// $? contains the return code of the previous command
format!("; test $? -eq {code}")
} else {
"".to_string()
};
command.push(format!(
"{} {} {}",
deno_exe.to_str().unwrap(),
args.join(" "),
ret_code_test
));
}
test_util::run(
&command.iter().map(|s| s.as_ref()).collect::<Vec<_>>(),
None,
None,
None,
true,
);
let mut results = HashMap::<String, HashMap<String, f64>>::new();
let hyperfine_results = read_json(benchmark_file.as_path())?;
for ((name, _, _), data) in EXEC_TIME_BENCHMARKS.iter().zip(
hyperfine_results
.as_object()
.unwrap()
.get("results")
.unwrap()
.as_array()
.unwrap(),
) {
let data = data.as_object().unwrap().clone();
results.insert(
name.to_string(),
data
.into_iter()
.filter(|(key, _)| RESULT_KEYS.contains(&key.as_str()))
.map(|(key, val)| (key, val.as_f64().unwrap()))
.collect(),
);
}
Ok(results)
}
fn rlib_size(target_dir: &std::path::Path, prefix: &str) -> i64 {
let mut size = 0;
let mut seen = std::collections::HashSet::new();
for entry in std::fs::read_dir(target_dir.join("deps")).unwrap() {
let entry = entry.unwrap();
let os_str = entry.file_name();
let name = os_str.to_str().unwrap();
if name.starts_with(prefix) && name.ends_with(".rlib") {
let start = name.split('-').next().unwrap().to_string();
if seen.contains(&start) {
println!("skip {name}");
} else {
seen.insert(start);
size += entry.metadata().unwrap().len();
println!("check size {name} {size}");
}
}
}
assert!(size > 0);
size as i64
}
const BINARY_TARGET_FILES: &[&str] = &[
"CLI_SNAPSHOT.bin",
"RUNTIME_SNAPSHOT.bin",
"COMPILER_SNAPSHOT.bin",
];
fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, i64>> {
let mut sizes = HashMap::<String, i64>::new();
let mut mtimes = HashMap::<String, SystemTime>::new();
sizes.insert(
"deno".to_string(),
test_util::deno_exe_path().as_path().metadata()?.len() as i64,
);
// add up size for everything in target/release/deps/libswc*
let swc_size = rlib_size(target_dir, "libswc");
println!("swc {swc_size} bytes");
sizes.insert("swc_rlib".to_string(), swc_size);
let v8_size = rlib_size(target_dir, "libv8");
println!("v8 {v8_size} bytes");
sizes.insert("rusty_v8_rlib".to_string(), v8_size);
// Because cargo's OUT_DIR is not predictable, search the build tree for
// snapshot related files.
for file in walkdir::WalkDir::new(target_dir) {
let file = match file {
Ok(file) => file,
Err(_) => continue,
};
let filename = file.file_name().to_str().unwrap().to_string();
if !BINARY_TARGET_FILES.contains(&filename.as_str()) {
continue;
}
let meta = file.metadata()?;
let file_mtime = meta.modified()?;
// If multiple copies of a file are found, use the most recent one.
if let Some(stored_mtime) = mtimes.get(&filename)
&& *stored_mtime > file_mtime
{
continue;
}
mtimes.insert(filename.clone(), file_mtime);
sizes.insert(filename, meta.len() as i64);
}
Ok(sizes)
}
fn run_max_mem_benchmark(deno_exe: &Path) -> Result<HashMap<String, i64>> {
let mut results = HashMap::<String, i64>::new();
for (name, args, return_code) in EXEC_TIME_BENCHMARKS {
let proc = Command::new("time")
.args(["-v", deno_exe.to_str().unwrap()])
.args(args.iter())
.stdout(Stdio::null())
.stderr(Stdio::piped())
.spawn()?;
let proc_result = proc.wait_with_output()?;
if let Some(code) = return_code {
assert_eq!(proc_result.status.code().unwrap(), *code);
}
let out = String::from_utf8(proc_result.stderr)?;
results.insert(
name.to_string(),
test_util::parse_max_mem(&out).unwrap() as i64,
);
}
Ok(results)
}
fn cargo_deps() -> usize {
let cargo_lock = test_util::root_path().join("Cargo.lock");
let mut count = 0;
let file = std::fs::File::open(cargo_lock).unwrap();
use std::io::BufRead;
for line in std::io::BufReader::new(file).lines() {
if line.unwrap().starts_with("[[package]]") {
count += 1
}
}
println!("cargo_deps {count}");
assert!(count > 10); // Sanity check.
count
}
// TODO(@littledivy): Remove this, denoland/benchmark_data is deprecated.
#[derive(Default, serde::Serialize)]
struct BenchResult {
created_at: String,
sha1: String,
// TODO(ry) The "benchmark" benchmark should actually be called "exec_time".
// When this is changed, the historical data in gh-pages branch needs to be
// changed too.
benchmark: HashMap<String, HashMap<String, f64>>,
binary_size: HashMap<String, i64>,
bundle_size: HashMap<String, i64>,
cargo_deps: usize,
// TODO(bartlomieju): remove
max_latency: HashMap<String, f64>,
max_memory: HashMap<String, i64>,
lsp_exec_time: HashMap<String, i64>,
// TODO(bartlomieju): remove
req_per_sec: HashMap<String, i64>,
syscall_count: HashMap<String, i64>,
thread_count: HashMap<String, i64>,
}
#[tokio::main]
async fn main() -> Result<()> {
let mut args = env::args();
let mut benchmarks = vec![
"exec_time",
"binary_size",
"cargo_deps",
"lsp",
"strace",
"mem_usage",
];
let mut found_bench = false;
let filter = args.nth(1);
if let Some(filter) = filter {
if filter != "--bench" {
benchmarks.retain(|s| s == &filter);
} else {
found_bench = true;
}
}
if !found_bench && !args.any(|s| s == "--bench") {
return Ok(());
}
println!("Starting Deno benchmark");
let target_dir = test_util::target_dir();
let deno_exe = if let Ok(p) = std::env::var("DENO_BENCH_EXE") {
PathBuf::from(p)
} else {
test_util::deno_exe_path().to_path_buf()
};
env::set_current_dir(test_util::root_path())?;
let mut new_data = BenchResult {
created_at: chrono::Utc::now()
.to_rfc3339_opts(chrono::SecondsFormat::Secs, true),
sha1: test_util::run_collect(
&["git", "rev-parse", "HEAD"],
None,
None,
None,
true,
)
.0
.trim()
.to_string(),
..Default::default()
};
if benchmarks.contains(&"exec_time") {
let exec_times = run_exec_time(&deno_exe, &target_dir)?;
new_data.benchmark = exec_times;
}
if benchmarks.contains(&"binary_size") {
let binary_sizes = get_binary_sizes(target_dir.as_path())?;
new_data.binary_size = binary_sizes;
}
if benchmarks.contains(&"cargo_deps") {
let cargo_deps = cargo_deps();
new_data.cargo_deps = cargo_deps;
}
if benchmarks.contains(&"lsp") {
let lsp_exec_times = lsp::benchmarks(&deno_exe);
new_data.lsp_exec_time = lsp_exec_times;
}
if cfg!(target_os = "linux") && benchmarks.contains(&"strace") {
use std::io::Read;
let mut thread_count = HashMap::<String, i64>::new();
let mut syscall_count = HashMap::<String, i64>::new();
for (name, args, expected_exit_code) in EXEC_TIME_BENCHMARKS {
let mut file = tempfile::NamedTempFile::new()?;
let exit_status = Command::new("strace")
.args([
"-c",
"-f",
"-o",
file.path().to_str().unwrap(),
deno_exe.to_str().unwrap(),
])
.args(args.iter())
.stdout(Stdio::null())
.env("LC_NUMERIC", "C")
.spawn()?
.wait()?;
let expected_exit_code = expected_exit_code.unwrap_or(0);
assert_eq!(exit_status.code(), Some(expected_exit_code));
let mut output = String::new();
file.as_file_mut().read_to_string(&mut output)?;
let strace_result = test_util::parse_strace_output(&output);
let clone =
strace_result
.get("clone")
.map(|d| d.calls)
.unwrap_or_else(|| {
strace_result.get("clone3").map(|d| d.calls).unwrap_or(0)
})
+ 1;
let total = strace_result.get("total").unwrap().calls;
thread_count.insert(name.to_string(), clone as i64);
syscall_count.insert(name.to_string(), total as i64);
}
new_data.thread_count = thread_count;
new_data.syscall_count = syscall_count;
}
if benchmarks.contains(&"mem_usage") {
let max_memory = run_max_mem_benchmark(&deno_exe)?;
new_data.max_memory = max_memory;
}
write_json(
target_dir.join("bench.json").as_path(),
&serde_json::to_value(&new_data)?,
)?;
Ok(())
}
pub type Result<T> = std::result::Result<T, AnyError>;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/bench/lsp_bench_standalone.rs | cli/bench/lsp_bench_standalone.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_bench_util::bencher::Bencher;
use deno_bench_util::bencher::benchmark_group;
use deno_bench_util::bencher::benchmark_main;
use deno_core::serde_json::Value;
use deno_core::serde_json::json;
use test_util::lsp::LspClientBuilder;
// Intended to match the benchmark in quick-lint-js
// https://github.com/quick-lint/quick-lint-js/blob/35207e6616267c6c81be63f47ce97ec2452d60df/benchmark/benchmark-lsp/lsp-benchmarks.cpp#L223-L268
fn incremental_change_wait(bench: &mut Bencher) {
let mut client = LspClientBuilder::new().build();
client.initialize_default();
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.change_configuration(json!({ "deno": { "enable": true } }));
let (method, _): (String, Option<Value>) = client.read_notification();
assert_eq!(method, "deno/didRefreshDenoConfigurationTree");
client.write_notification(
"textDocument/didOpen",
json!({
"textDocument": {
"uri": "file:///testdata/express-router.js",
"languageId": "javascript",
"version": 0,
"text": include_str!("testdata/express-router.js")
}
}),
);
client.diagnostic("file:///testdata/express-router.js");
let mut document_version: u64 = 0;
bench.iter(|| {
let text = format!("m{document_version:05}");
client
.write_notification(
"textDocument/didChange",
json!({
"textDocument": {
"version": document_version,
"uri":"file:///testdata/express-router.js"
},
"contentChanges": [
{"text": text, "range":{"start":{"line":506,"character":39},"end":{"line":506,"character":45}}},
{"text": text, "range":{"start":{"line":507,"character":8},"end":{"line":507,"character":14}}},
{"text": text, "range":{"start":{"line":509,"character":10},"end":{"line":509,"character":16}}}
]
})
);
client.diagnostic("file:///testdata/express-router.js");
document_version += 1;
})
}
benchmark_group!(benches, incremental_change_wait);
benchmark_main!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/node.rs | cli/cache/node.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_resolver::cjs::analyzer::DenoCjsAnalysis;
use deno_resolver::cjs::analyzer::NodeAnalysisCache;
use deno_resolver::cjs::analyzer::NodeAnalysisCacheSourceHash;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::CacheDBHash;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheFailure;
pub static NODE_ANALYSIS_CACHE_DB: CacheDBConfiguration =
CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS cjsanalysiscache (",
"specifier TEXT PRIMARY KEY,",
"source_hash INTEGER NOT NULL,",
"data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM cjsanalysiscache;",
preheat_queries: &[],
on_failure: CacheFailure::InMemory,
};
#[derive(Clone)]
pub struct SqliteNodeAnalysisCache {
inner: NodeAnalysisCacheInner,
}
impl SqliteNodeAnalysisCache {
pub fn new(db: CacheDB) -> Self {
Self {
inner: NodeAnalysisCacheInner::new(db),
}
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res {
Ok(x) => x,
Err(err) => {
// TODO(mmastrac): This behavior was inherited from before the refactoring but it probably makes sense to move it into the cache
// at some point.
// should never error here, but if it ever does don't fail
if cfg!(debug_assertions) {
panic!("Error using esm analysis: {err:#}");
} else {
log::debug!("Error using esm analysis: {:#}", err);
}
T::default()
}
}
}
}
impl NodeAnalysisCache for SqliteNodeAnalysisCache {
fn compute_source_hash(&self, source: &str) -> NodeAnalysisCacheSourceHash {
NodeAnalysisCacheSourceHash(CacheDBHash::from_hashable(source).inner())
}
fn get_cjs_analysis(
&self,
specifier: &deno_ast::ModuleSpecifier,
source_hash: NodeAnalysisCacheSourceHash,
) -> Option<DenoCjsAnalysis> {
Self::ensure_ok(
self
.inner
.get_cjs_analysis(specifier.as_str(), CacheDBHash::new(source_hash.0)),
)
}
fn set_cjs_analysis(
&self,
specifier: &deno_ast::ModuleSpecifier,
source_hash: NodeAnalysisCacheSourceHash,
analysis: &DenoCjsAnalysis,
) {
Self::ensure_ok(self.inner.set_cjs_analysis(
specifier.as_str(),
CacheDBHash::new(source_hash.0),
analysis,
));
}
}
#[derive(Clone)]
struct NodeAnalysisCacheInner {
conn: CacheDB,
}
impl NodeAnalysisCacheInner {
pub fn new(conn: CacheDB) -> Self {
Self { conn }
}
pub fn get_cjs_analysis(
&self,
specifier: &str,
expected_source_hash: CacheDBHash,
) -> Result<Option<DenoCjsAnalysis>, AnyError> {
let query = "
SELECT
data
FROM
cjsanalysiscache
WHERE
specifier=?1
AND source_hash=?2
LIMIT 1";
let res = self.conn.query_row(
query,
params![specifier, expected_source_hash],
|row| {
let analysis_info: String = row.get(0)?;
Ok(serde_json::from_str(&analysis_info)?)
},
)?;
Ok(res)
}
pub fn set_cjs_analysis(
&self,
specifier: &str,
source_hash: CacheDBHash,
cjs_analysis: &DenoCjsAnalysis,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
cjsanalysiscache (specifier, source_hash, data)
VALUES
(?1, ?2, ?3)";
self.conn.execute(
sql,
params![
specifier,
source_hash,
&serde_json::to_string(&cjs_analysis)?,
],
)?;
Ok(())
}
}
#[cfg(test)]
mod test {
use deno_resolver::cjs::analyzer::ModuleExportsAndReExports;
use super::*;
#[test]
pub fn node_analysis_cache_general_use() {
let conn = CacheDB::in_memory(&NODE_ANALYSIS_CACHE_DB, "1.0.0");
let cache = NodeAnalysisCacheInner::new(conn);
assert!(
cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none()
);
let cjs_analysis = DenoCjsAnalysis::Cjs(ModuleExportsAndReExports {
exports: vec!["export1".to_string()],
reexports: vec!["re-export1".to_string()],
});
cache
.set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap();
assert!(
cache
.get_cjs_analysis("file.js", CacheDBHash::new(3))
.unwrap()
.is_none()
); // different hash
let actual_cjs_analysis = cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap();
assert_eq!(actual_cjs_analysis, cjs_analysis);
// adding when already exists should not cause issue
cache
.set_cjs_analysis("file.js", CacheDBHash::new(2), &cjs_analysis)
.unwrap();
// recreating with same cli version should still have it
let conn = cache.conn.recreate_with_version("1.0.0");
let cache = NodeAnalysisCacheInner::new(conn);
let actual_analysis = cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.unwrap();
assert_eq!(actual_analysis, cjs_analysis);
// now changing the cli version should clear it
let conn = cache.conn.recreate_with_version("2.0.0");
let cache = NodeAnalysisCacheInner::new(conn);
assert!(
cache
.get_cjs_analysis("file.js", CacheDBHash::new(2))
.unwrap()
.is_none()
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/caches.rs | cli/cache/caches.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::PathBuf;
use std::sync::Arc;
use deno_lib::version::DENO_VERSION_INFO;
use once_cell::sync::OnceCell;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::check::TYPE_CHECK_CACHE_DB;
use super::code_cache::CODE_CACHE_DB;
use super::fast_check::FAST_CHECK_CACHE_DB;
use super::incremental::INCREMENTAL_CACHE_DB;
use super::module_info::MODULE_INFO_CACHE_DB;
use super::node::NODE_ANALYSIS_CACHE_DB;
use crate::cache::DenoDirProvider;
pub struct Caches {
dir_provider: Arc<DenoDirProvider>,
fmt_incremental_cache_db: OnceCell<CacheDB>,
lint_incremental_cache_db: OnceCell<CacheDB>,
dep_analysis_db: OnceCell<CacheDB>,
fast_check_db: OnceCell<CacheDB>,
node_analysis_db: OnceCell<CacheDB>,
type_checking_cache_db: OnceCell<CacheDB>,
code_cache_db: OnceCell<CacheDB>,
}
impl Caches {
pub fn new(dir: Arc<DenoDirProvider>) -> Self {
Self {
dir_provider: dir,
fmt_incremental_cache_db: Default::default(),
lint_incremental_cache_db: Default::default(),
dep_analysis_db: Default::default(),
fast_check_db: Default::default(),
node_analysis_db: Default::default(),
type_checking_cache_db: Default::default(),
code_cache_db: Default::default(),
}
}
fn make_db(
cell: &OnceCell<CacheDB>,
config: &'static CacheDBConfiguration,
path: Option<PathBuf>,
) -> CacheDB {
cell
.get_or_init(|| {
if let Some(path) = path {
CacheDB::from_path(config, path, DENO_VERSION_INFO.deno)
} else {
CacheDB::in_memory(config, DENO_VERSION_INFO.deno)
}
})
.clone()
}
pub fn fmt_incremental_cache_db(&self) -> CacheDB {
Self::make_db(
&self.fmt_incremental_cache_db,
&INCREMENTAL_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.fmt_incremental_cache_db_file_path()),
)
}
pub fn lint_incremental_cache_db(&self) -> CacheDB {
Self::make_db(
&self.lint_incremental_cache_db,
&INCREMENTAL_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.lint_incremental_cache_db_file_path()),
)
}
pub fn dep_analysis_db(&self) -> CacheDB {
Self::make_db(
&self.dep_analysis_db,
&MODULE_INFO_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.dep_analysis_db_file_path()),
)
}
pub fn fast_check_db(&self) -> CacheDB {
Self::make_db(
&self.fast_check_db,
&FAST_CHECK_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.fast_check_cache_db_file_path()),
)
}
pub fn node_analysis_db(&self) -> CacheDB {
Self::make_db(
&self.node_analysis_db,
&NODE_ANALYSIS_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.node_analysis_db_file_path()),
)
}
pub fn type_checking_cache_db(&self) -> CacheDB {
Self::make_db(
&self.type_checking_cache_db,
&TYPE_CHECK_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.type_checking_cache_db_file_path()),
)
}
pub fn code_cache_db(&self) -> CacheDB {
Self::make_db(
&self.code_cache_db,
&CODE_CACHE_DB,
self
.dir_provider
.get_or_create()
.ok()
.map(|dir| dir.code_cache_db_file_path()),
)
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/fast_check.rs | cli/cache/fast_check.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::error::AnyError;
use deno_graph::fast_check::FastCheckCacheItem;
use deno_graph::fast_check::FastCheckCacheKey;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static FAST_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS fastcheckcache (",
"hash INTEGER PRIMARY KEY,",
"data TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM fastcheckcache;",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
};
#[derive(Clone)]
pub struct FastCheckCache {
inner: FastCheckCacheInner,
}
impl FastCheckCache {
pub fn new(db: CacheDB) -> Self {
Self {
inner: FastCheckCacheInner::new(db),
}
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res {
Ok(x) => x,
Err(err) => {
// TODO(mmastrac): This behavior was inherited from before the refactoring but it probably makes sense to move it into the cache
// at some point.
// should never error here, but if it ever does don't fail
if cfg!(debug_assertions) {
panic!("Error using fast check cache: {err:#}");
} else {
log::debug!("Error using fast check cache: {:#}", err);
}
T::default()
}
}
}
}
impl deno_graph::fast_check::FastCheckCache for FastCheckCache {
fn get(&self, key: FastCheckCacheKey) -> Option<FastCheckCacheItem> {
Self::ensure_ok(self.inner.get(key))
}
fn set(&self, key: FastCheckCacheKey, value: FastCheckCacheItem) {
Self::ensure_ok(self.inner.set(key, &value));
}
}
#[derive(Clone)]
struct FastCheckCacheInner {
conn: CacheDB,
}
impl FastCheckCacheInner {
pub fn new(conn: CacheDB) -> Self {
Self { conn }
}
pub fn get(
&self,
key: FastCheckCacheKey,
) -> Result<Option<FastCheckCacheItem>, AnyError> {
let query = "
SELECT
data
FROM
fastcheckcache
WHERE
hash=?1
LIMIT 1";
let res = self.conn.query_row(
query,
params![CacheDBHash::new(key.as_u64())],
|row| {
let value: Vec<u8> = row.get(0)?;
Ok(bincode::deserialize::<FastCheckCacheItem>(&value)?)
},
)?;
Ok(res)
}
pub fn set(
&self,
key: FastCheckCacheKey,
data: &FastCheckCacheItem,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
fastcheckcache (hash, data)
VALUES
(?1, ?2)";
self.conn.execute(
sql,
params![CacheDBHash::new(key.as_u64()), &bincode::serialize(data)?],
)?;
Ok(())
}
}
#[cfg(test)]
mod test {
use std::collections::BTreeSet;
use deno_ast::ModuleSpecifier;
use deno_graph::fast_check::FastCheckCache as _;
use deno_graph::fast_check::FastCheckCacheModuleItem;
use deno_graph::fast_check::FastCheckCacheModuleItemDiagnostic;
use deno_semver::package::PackageNv;
use super::*;
#[test]
pub fn cache_general_use() {
let conn = CacheDB::in_memory(&FAST_CHECK_CACHE_DB, "1.0.0");
let cache = FastCheckCache::new(conn);
let key = FastCheckCacheKey::build(
cache.hash_seed(),
&PackageNv::from_str("@scope/a@1.0.0").unwrap(),
&Default::default(),
);
let cache = cache.inner;
assert!(cache.get(key).unwrap().is_none());
let value = FastCheckCacheItem {
dependencies: BTreeSet::from([
PackageNv::from_str("@scope/b@1.0.0").unwrap()
]),
modules: vec![(
ModuleSpecifier::parse("https://jsr.io/test.ts").unwrap(),
FastCheckCacheModuleItem::Diagnostic(
FastCheckCacheModuleItemDiagnostic { source_hash: 123 },
),
)],
};
cache.set(key, &value).unwrap();
let stored_value = cache.get(key).unwrap().unwrap();
assert_eq!(stored_value, value);
// adding when already exists should not cause issue
cache.set(key, &value).unwrap();
// recreating with same cli version should still have it
let conn = cache.conn.recreate_with_version("1.0.0");
let cache = FastCheckCacheInner::new(conn);
let stored_value = cache.get(key).unwrap().unwrap();
assert_eq!(stored_value, value);
// now changing the cli version should clear it
let conn = cache.conn.recreate_with_version("2.0.0");
let cache = FastCheckCacheInner::new(conn);
assert!(cache.get(key).unwrap().is_none());
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/check.rs | cli/cache/check.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static TYPE_CHECK_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS checkcache (",
"check_hash INT PRIMARY KEY",
");",
"CREATE TABLE IF NOT EXISTS tsbuildinfo (",
"specifier TEXT PRIMARY KEY,",
"text TEXT NOT NULL",
");",
),
on_version_change: concat!(
"DELETE FROM checkcache;",
"DELETE FROM tsbuildinfo;"
),
preheat_queries: &[],
// If the cache fails, just ignore all caching attempts
on_failure: CacheFailure::Blackhole,
};
/// The cache used to tell whether type checking should occur again.
///
/// This simply stores a hash of the inputs of each successful type check
/// and only clears them out when changing CLI versions.
pub struct TypeCheckCache(CacheDB);
impl TypeCheckCache {
pub fn new(db: CacheDB) -> Self {
Self(db)
}
pub fn has_check_hash(&self, hash: CacheDBHash) -> bool {
match self.hash_check_hash_result(hash) {
Ok(val) => val,
Err(err) => {
if cfg!(debug_assertions) {
panic!("Error retrieving hash: {err}");
} else {
log::debug!("Error retrieving hash: {}", err);
// fail silently when not debugging
false
}
}
}
}
fn hash_check_hash_result(
&self,
hash: CacheDBHash,
) -> Result<bool, AnyError> {
self.0.exists(
"SELECT * FROM checkcache WHERE check_hash=?1 LIMIT 1",
params![hash],
)
}
pub fn add_check_hash(&self, check_hash: CacheDBHash) {
if let Err(err) = self.add_check_hash_result(check_hash) {
if cfg!(debug_assertions) {
panic!("Error saving check hash: {err}");
} else {
log::debug!("Error saving check hash: {}", err);
}
}
}
fn add_check_hash_result(
&self,
check_hash: CacheDBHash,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
checkcache (check_hash)
VALUES
(?1)";
self.0.execute(sql, params![check_hash])?;
Ok(())
}
pub fn get_tsbuildinfo(&self, specifier: &ModuleSpecifier) -> Option<String> {
self
.0
.query_row(
"SELECT text FROM tsbuildinfo WHERE specifier=?1 LIMIT 1",
params![specifier.as_str()],
|row| Ok(row.get::<_, String>(0)?),
)
.ok()?
}
pub fn set_tsbuildinfo(&self, specifier: &ModuleSpecifier, text: &str) {
if let Err(err) = self.set_tsbuildinfo_result(specifier, text) {
// should never error here, but if it ever does don't fail
if cfg!(debug_assertions) {
panic!("Error saving tsbuildinfo: {err}");
} else {
log::debug!("Error saving tsbuildinfo: {}", err);
}
}
}
fn set_tsbuildinfo_result(
&self,
specifier: &ModuleSpecifier,
text: &str,
) -> Result<(), AnyError> {
self.0.execute(
"INSERT OR REPLACE INTO tsbuildinfo (specifier, text) VALUES (?1, ?2)",
params![specifier.as_str(), text],
)?;
Ok(())
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn check_cache_general_use() {
let conn = CacheDB::in_memory(&TYPE_CHECK_CACHE_DB, "1.0.0");
let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(CacheDBHash::new(2)));
let specifier1 = ModuleSpecifier::parse("file:///test.json").unwrap();
assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
cache.set_tsbuildinfo(&specifier1, "test");
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
// try changing the cli version (should clear)
let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn);
assert!(!cache.has_check_hash(CacheDBHash::new(1)));
cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), None);
cache.set_tsbuildinfo(&specifier1, "test");
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
// recreating the cache should not remove the data because the CLI version is the same
let conn = cache.0.recreate_with_version("2.0.0");
let cache = TypeCheckCache::new(conn);
assert!(cache.has_check_hash(CacheDBHash::new(1)));
assert!(!cache.has_check_hash(CacheDBHash::new(2)));
assert_eq!(cache.get_tsbuildinfo(&specifier1), Some("test".to_string()));
// adding when already exists should not cause issue
cache.add_check_hash(CacheDBHash::new(1));
assert!(cache.has_check_hash(CacheDBHash::new(1)));
cache.set_tsbuildinfo(&specifier1, "other");
assert_eq!(
cache.get_tsbuildinfo(&specifier1),
Some("other".to_string())
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/code_cache.rs | cli/cache/code_cache.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError;
use deno_runtime::code_cache;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static CODE_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS codecache (",
"specifier TEXT NOT NULL,",
"type INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL,",
"data BLOB NOT NULL,",
"PRIMARY KEY (specifier, type)",
");"
),
on_version_change: "DELETE FROM codecache;",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
};
pub struct CodeCache {
inner: CodeCacheInner,
}
impl CodeCache {
pub fn new(db: CacheDB) -> Self {
Self {
inner: CodeCacheInner::new(db),
}
}
fn ensure_ok<T: Default>(res: Result<T, AnyError>) -> T {
match res {
Ok(x) => x,
Err(err) => {
// TODO(mmastrac): This behavior was inherited from before the refactoring but it probably makes sense to move it into the cache
// at some point.
// should never error here, but if it ever does don't fail
if cfg!(debug_assertions) {
panic!("Error using code cache: {err:#}");
} else {
log::debug!("Error using code cache: {:#}", err);
}
T::default()
}
}
}
pub fn get_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
Self::ensure_ok(self.inner.get_sync(
specifier.as_str(),
code_cache_type,
CacheDBHash::new(source_hash),
))
}
pub fn set_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
data: &[u8],
) {
Self::ensure_ok(self.inner.set_sync(
specifier.as_str(),
code_cache_type,
CacheDBHash::new(source_hash),
data,
));
}
}
impl code_cache::CodeCache for CodeCache {
fn get_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
self.get_sync(specifier, code_cache_type, source_hash)
}
fn set_sync(
&self,
specifier: ModuleSpecifier,
code_cache_type: code_cache::CodeCacheType,
source_hash: u64,
data: &[u8],
) {
self.set_sync(&specifier, code_cache_type, source_hash, data);
}
}
struct CodeCacheInner {
conn: CacheDB,
}
impl CodeCacheInner {
pub fn new(conn: CacheDB) -> Self {
Self { conn }
}
pub fn get_sync(
&self,
specifier: &str,
code_cache_type: code_cache::CodeCacheType,
source_hash: CacheDBHash,
) -> Result<Option<Vec<u8>>, AnyError> {
let query = "
SELECT
data
FROM
codecache
WHERE
specifier=?1 AND type=?2 AND source_hash=?3
LIMIT 1";
let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
];
self.conn.query_row(query, params, |row| {
let value: Vec<u8> = row.get(0)?;
Ok(value)
})
}
pub fn set_sync(
&self,
specifier: &str,
code_cache_type: code_cache::CodeCacheType,
source_hash: CacheDBHash,
data: &[u8],
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
codecache (specifier, type, source_hash, data)
VALUES
(?1, ?2, ?3, ?4)";
let params = params![
specifier,
serialize_code_cache_type(code_cache_type),
source_hash,
data
];
self.conn.execute(sql, params)?;
Ok(())
}
}
fn serialize_code_cache_type(
code_cache_type: code_cache::CodeCacheType,
) -> i64 {
match code_cache_type {
code_cache::CodeCacheType::Script => 0,
code_cache::CodeCacheType::EsModule => 1,
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn end_to_end() {
let conn = CacheDB::in_memory(&CODE_CACHE_DB, "1.0.0");
let cache = CodeCacheInner::new(conn);
assert!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
)
.unwrap()
.is_none()
);
let data_esm = vec![1, 2, 3];
cache
.set_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
&data_esm,
)
.unwrap();
assert_eq!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
data_esm
);
assert!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
CacheDBHash::new(1),
)
.unwrap()
.is_none()
);
let data_script = vec![4, 5, 6];
cache
.set_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
CacheDBHash::new(1),
&data_script,
)
.unwrap();
assert_eq!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::Script,
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
data_script
);
assert_eq!(
cache
.get_sync(
"file:///foo/bar.js",
code_cache::CodeCacheType::EsModule,
CacheDBHash::new(1),
)
.unwrap()
.unwrap(),
data_esm
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/module_info.rs | cli/cache/module_info.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::Arc;
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_error::JsErrorBox;
use deno_graph::analysis::ModuleInfo;
use deno_graph::ast::ParserModuleAnalyzer;
use deno_resolver::cache::ParsedSourceCache;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
const SELECT_MODULE_INFO: &str = "
SELECT
module_info
FROM
moduleinfocache
WHERE
specifier=?1
AND media_type=?2
AND source_hash=?3
LIMIT 1";
pub static MODULE_INFO_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS moduleinfocache (",
"specifier TEXT PRIMARY KEY,",
"media_type INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL,",
"module_info TEXT NOT NULL",
");"
),
on_version_change: "DELETE FROM moduleinfocache;",
preheat_queries: &[SELECT_MODULE_INFO],
on_failure: CacheFailure::InMemory,
};
/// A cache of `deno_graph::ModuleInfo` objects. Using this leads to a considerable
/// performance improvement because when it exists we can skip parsing a module for
/// deno_graph.
#[derive(Debug)]
pub struct ModuleInfoCache {
conn: CacheDB,
parsed_source_cache: Arc<ParsedSourceCache>,
}
impl ModuleInfoCache {
#[cfg(test)]
pub fn new_in_memory(
version: &'static str,
parsed_source_cache: Arc<ParsedSourceCache>,
) -> Self {
Self::new(
CacheDB::in_memory(&MODULE_INFO_CACHE_DB, version),
parsed_source_cache,
)
}
pub fn new(
conn: CacheDB,
parsed_source_cache: Arc<ParsedSourceCache>,
) -> Self {
Self {
conn,
parsed_source_cache,
}
}
/// Useful for testing: re-create this cache DB with a different current version.
#[cfg(test)]
pub(crate) fn recreate_with_version(self, version: &'static str) -> Self {
Self {
conn: self.conn.recreate_with_version(version),
parsed_source_cache: self.parsed_source_cache,
}
}
pub fn get_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
expected_source_hash: CacheDBHash,
) -> Result<Option<ModuleInfo>, AnyError> {
let query = SELECT_MODULE_INFO;
let res = self.conn.query_row(
query,
params![
&specifier.as_str(),
serialize_media_type(media_type),
expected_source_hash,
],
|row| {
let module_info: String = row.get(0)?;
let module_info = serde_json::from_str(&module_info)?;
Ok(module_info)
},
)?;
Ok(res)
}
pub fn set_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source_hash: CacheDBHash,
module_info: &ModuleInfo,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
moduleinfocache (specifier, media_type, source_hash, module_info)
VALUES
(?1, ?2, ?3, ?4)";
self.conn.execute(
sql,
params![
specifier.as_str(),
serialize_media_type(media_type),
source_hash,
&serde_json::to_string(&module_info)?,
],
)?;
Ok(())
}
pub fn as_module_analyzer(&self) -> ModuleInfoCacheModuleAnalyzer<'_> {
ModuleInfoCacheModuleAnalyzer {
module_info_cache: self,
parsed_source_cache: &self.parsed_source_cache,
}
}
}
impl deno_graph::source::ModuleInfoCacher for ModuleInfoCache {
fn cache_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source: &Arc<[u8]>,
module_info: &deno_graph::analysis::ModuleInfo,
) {
log::debug!("Caching module info for {}", specifier);
let source_hash = CacheDBHash::from_hashable(source);
let result =
self.set_module_info(specifier, media_type, source_hash, module_info);
if let Err(err) = result {
log::debug!(
"Error saving module cache info for {}. {:#}",
specifier,
err
);
}
}
}
pub struct ModuleInfoCacheModuleAnalyzer<'a> {
module_info_cache: &'a ModuleInfoCache,
parsed_source_cache: &'a Arc<ParsedSourceCache>,
}
impl ModuleInfoCacheModuleAnalyzer<'_> {
fn load_cached_module_info(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source_hash: CacheDBHash,
) -> Option<ModuleInfo> {
match self.module_info_cache.get_module_info(
specifier,
media_type,
source_hash,
) {
Ok(Some(info)) => Some(info),
Ok(None) => None,
Err(err) => {
log::debug!(
"Error loading module cache info for {}. {:#}",
specifier,
err
);
None
}
}
}
fn save_module_info_to_cache(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source_hash: CacheDBHash,
module_info: &ModuleInfo,
) {
if let Err(err) = self.module_info_cache.set_module_info(
specifier,
media_type,
source_hash,
module_info,
) {
log::debug!(
"Error saving module cache info for {}. {:#}",
specifier,
err
);
}
}
#[allow(clippy::result_large_err)]
pub fn analyze_sync(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
source: &Arc<str>,
) -> Result<ModuleInfo, deno_ast::ParseDiagnostic> {
// attempt to load from the cache
let source_hash = CacheDBHash::from_hashable(source);
if let Some(info) =
self.load_cached_module_info(specifier, media_type, source_hash)
{
return Ok(info);
}
// otherwise, get the module info from the parsed source cache
let parser = self.parsed_source_cache.as_capturing_parser();
let analyzer = ParserModuleAnalyzer::new(&parser);
let module_info =
analyzer.analyze_sync(specifier, source.clone(), media_type)?;
// then attempt to cache it
self.save_module_info_to_cache(
specifier,
media_type,
source_hash,
&module_info,
);
Ok(module_info)
}
}
#[async_trait::async_trait(?Send)]
impl deno_graph::analysis::ModuleAnalyzer
for ModuleInfoCacheModuleAnalyzer<'_>
{
async fn analyze(
&self,
specifier: &ModuleSpecifier,
source: Arc<str>,
media_type: MediaType,
) -> Result<ModuleInfo, JsErrorBox> {
// attempt to load from the cache
let source_hash = CacheDBHash::from_hashable(&source);
if let Some(info) =
self.load_cached_module_info(specifier, media_type, source_hash)
{
return Ok(info);
}
// otherwise, get the module info from the parsed source cache
let module_info = deno_core::unsync::spawn_blocking({
let cache = self.parsed_source_cache.clone();
let specifier = specifier.clone();
move || {
let parser = cache.as_capturing_parser();
let analyzer = ParserModuleAnalyzer::new(&parser);
analyzer
.analyze_sync(&specifier, source, media_type)
.map_err(JsErrorBox::from_err)
}
})
.await
.unwrap()?;
// then attempt to cache it
self.save_module_info_to_cache(
specifier,
media_type,
source_hash,
&module_info,
);
Ok(module_info)
}
}
// note: there is no deserialize for this because this is only ever
// saved in the db and then used for comparisons
fn serialize_media_type(media_type: MediaType) -> i64 {
use MediaType::*;
match media_type {
JavaScript => 1,
Jsx => 2,
Mjs => 3,
Cjs => 4,
TypeScript => 5,
Mts => 6,
Cts => 7,
Dts => 8,
Dmts => 9,
Dcts => 10,
Tsx => 11,
Json => 12,
Jsonc => 13,
Json5 => 14,
Wasm => 15,
Css => 16,
Html => 17,
SourceMap => 18,
Sql => 19,
Unknown => 20,
}
}
#[cfg(test)]
mod test {
use deno_graph::PositionRange;
use deno_graph::analysis::JsDocImportInfo;
use deno_graph::analysis::SpecifierWithRange;
use super::*;
#[test]
pub fn module_info_cache_general_use() {
let cache = ModuleInfoCache::new_in_memory("1.0.0", Default::default());
let specifier1 =
ModuleSpecifier::parse("https://localhost/mod.ts").unwrap();
let specifier2 =
ModuleSpecifier::parse("https://localhost/mod2.ts").unwrap();
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
None
);
let mut module_info = ModuleInfo::default();
module_info.jsdoc_imports.push(JsDocImportInfo {
specifier: SpecifierWithRange {
range: PositionRange {
start: deno_graph::Position {
line: 0,
character: 3,
},
end: deno_graph::Position {
line: 1,
character: 2,
},
},
text: "test".to_string(),
},
resolution_mode: None,
});
cache
.set_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1),
&module_info,
)
.unwrap();
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
Some(module_info.clone())
);
assert_eq!(
cache
.get_module_info(
&specifier2,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
None,
);
// different media type
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::TypeScript,
CacheDBHash::new(1)
)
.unwrap(),
None,
);
// different source hash
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(2)
)
.unwrap(),
None,
);
// try recreating with the same version
let cache = cache.recreate_with_version("1.0.0");
// should get it
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
Some(module_info)
);
// try recreating with a different version
let cache = cache.recreate_with_version("1.0.1");
// should no longer exist
assert_eq!(
cache
.get_module_info(
&specifier1,
MediaType::JavaScript,
CacheDBHash::new(1)
)
.unwrap(),
None,
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/mod.rs | cli/cache/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod cache_db;
mod caches;
mod check;
mod code_cache;
mod fast_check;
mod incremental;
mod module_info;
mod node;
pub type DenoDir = deno_resolver::cache::DenoDir<CliSys>;
pub type DenoDirProvider = deno_resolver::cache::DenoDirProvider<CliSys>;
pub use cache_db::CacheDBHash;
pub use caches::Caches;
pub use check::TypeCheckCache;
pub use code_cache::CodeCache;
/// Permissions used to save a file in the disk caches.
pub use deno_cache_dir::CACHE_PERM;
pub use fast_check::FastCheckCache;
pub use incremental::IncrementalCache;
pub use module_info::ModuleInfoCache;
pub use node::SqliteNodeAnalysisCache;
use crate::sys::CliSys;
pub type GlobalHttpCache = deno_cache_dir::GlobalHttpCache<CliSys>;
pub type LocalLspHttpCache = deno_cache_dir::LocalLspHttpCache<CliSys>;
pub use deno_cache_dir::HttpCache;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/cache_db.rs | cli/cache/cache_db.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::IsTerminal;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::parking_lot::MutexGuard;
use deno_core::unsync::spawn_blocking;
use deno_lib::util::hash::FastInsecureHasher;
use deno_runtime::deno_webstorage::rusqlite;
use deno_runtime::deno_webstorage::rusqlite::Connection;
use deno_runtime::deno_webstorage::rusqlite::OptionalExtension;
use deno_runtime::deno_webstorage::rusqlite::Params;
use once_cell::sync::OnceCell;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct CacheDBHash(u64);
impl CacheDBHash {
pub fn new(hash: u64) -> Self {
Self(hash)
}
pub fn from_hashable(hashable: impl std::hash::Hash) -> Self {
Self::new(
// always write in the deno version just in case
// the clearing on deno version change doesn't work
FastInsecureHasher::new_deno_versioned()
.write_hashable(hashable)
.finish(),
)
}
pub fn inner(&self) -> u64 {
self.0
}
}
impl rusqlite::types::ToSql for CacheDBHash {
fn to_sql(&self) -> rusqlite::Result<rusqlite::types::ToSqlOutput<'_>> {
Ok(rusqlite::types::ToSqlOutput::Owned(
// sqlite doesn't support u64, but it does support i64 so store
// this value "incorrectly" as i64 then convert back to u64 on read
rusqlite::types::Value::Integer(self.0 as i64),
))
}
}
impl rusqlite::types::FromSql for CacheDBHash {
fn column_result(
value: rusqlite::types::ValueRef,
) -> rusqlite::types::FromSqlResult<Self> {
match value {
rusqlite::types::ValueRef::Integer(i) => Ok(Self::new(i as u64)),
_ => Err(rusqlite::types::FromSqlError::InvalidType),
}
}
}
/// What should the cache should do on failure?
#[derive(Debug, Default)]
pub enum CacheFailure {
/// Return errors if failure mode otherwise unspecified.
#[default]
Error,
/// Create an in-memory cache that is not persistent.
InMemory,
/// Create a blackhole cache that ignores writes and returns empty reads.
Blackhole,
}
/// Configuration SQL and other parameters for a [`CacheDB`].
#[derive(Debug)]
pub struct CacheDBConfiguration {
/// SQL to run for a new database.
pub table_initializer: &'static str,
/// SQL to run when the version from [`crate::version::deno()`] changes.
pub on_version_change: &'static str,
/// Prepared statements to pre-heat while initializing the database.
pub preheat_queries: &'static [&'static str],
/// What the cache should do on failure.
pub on_failure: CacheFailure,
}
impl CacheDBConfiguration {
fn create_combined_sql(&self) -> String {
format!(
concat!(
"PRAGMA journal_mode=WAL;",
"PRAGMA synchronous=NORMAL;",
"PRAGMA temp_store=memory;",
"PRAGMA page_size=4096;",
"PRAGMA mmap_size=6000000;",
"PRAGMA optimize;",
"CREATE TABLE IF NOT EXISTS info (key TEXT PRIMARY KEY, value TEXT NOT NULL);",
"{}",
),
self.table_initializer
)
}
}
#[derive(Debug)]
enum ConnectionState {
Connected(Connection),
Blackhole,
Error(Arc<AnyError>),
}
/// A cache database that eagerly initializes itself off-thread, preventing initialization operations
/// from blocking the main thread.
#[derive(Debug, Clone)]
pub struct CacheDB {
// TODO(mmastrac): We can probably simplify our thread-safe implementation here
conn: Arc<Mutex<OnceCell<ConnectionState>>>,
path: Option<PathBuf>,
config: &'static CacheDBConfiguration,
version: &'static str,
}
impl Drop for CacheDB {
fn drop(&mut self) {
// No need to clean up an in-memory cache in an way -- just drop and go.
let path = match self.path.take() {
Some(path) => path,
_ => return,
};
// If Deno is panicking, tokio is sometimes gone before we have a chance to shutdown. In
// that case, we just allow the drop to happen as expected.
if tokio::runtime::Handle::try_current().is_err() {
return;
}
// For on-disk caches, see if we're the last holder of the Arc.
let arc = std::mem::take(&mut self.conn);
if let Ok(inner) = Arc::try_unwrap(arc) {
// Hand off SQLite connection to another thread to do the surprisingly expensive cleanup
let inner = inner.into_inner().into_inner();
if let Some(conn) = inner {
spawn_blocking(move || {
drop(conn);
log::trace!(
"Cleaned up SQLite connection at {}",
path.to_string_lossy()
);
});
}
}
}
}
impl CacheDB {
pub fn in_memory(
config: &'static CacheDBConfiguration,
version: &'static str,
) -> Self {
CacheDB {
conn: Arc::new(Mutex::new(OnceCell::new())),
path: None,
config,
version,
}
}
pub fn from_path(
config: &'static CacheDBConfiguration,
path: PathBuf,
version: &'static str,
) -> Self {
log::debug!("Opening cache {}...", path.to_string_lossy());
let new = Self {
conn: Arc::new(Mutex::new(OnceCell::new())),
path: Some(path),
config,
version,
};
new.spawn_eager_init_thread();
new
}
/// Useful for testing: re-create this cache DB with a different current version.
#[cfg(test)]
pub(crate) fn recreate_with_version(mut self, version: &'static str) -> Self {
// By taking the lock, we know there are no initialization threads alive
drop(self.conn.lock());
let arc = std::mem::take(&mut self.conn);
let conn = match Arc::try_unwrap(arc) {
Err(_) => panic!("Failed to unwrap connection"),
Ok(conn) => match conn.into_inner().into_inner() {
Some(ConnectionState::Connected(conn)) => conn,
_ => panic!("Connection had failed and cannot be unwrapped"),
},
};
Self::initialize_connection(self.config, &conn, version).unwrap();
let cell = OnceCell::new();
_ = cell.set(ConnectionState::Connected(conn));
Self {
conn: Arc::new(Mutex::new(cell)),
path: self.path.clone(),
config: self.config,
version,
}
}
fn spawn_eager_init_thread(&self) {
let clone = self.clone();
debug_assert!(tokio::runtime::Handle::try_current().is_ok());
spawn_blocking(move || {
let lock = clone.conn.lock();
clone.initialize(&lock);
});
}
/// Open the connection in memory or on disk.
fn actually_open_connection(
&self,
path: Option<&Path>,
) -> Result<Connection, rusqlite::Error> {
match path {
// This should never fail unless something is very wrong
None => Connection::open_in_memory(),
Some(path) => Connection::open(path),
}
}
/// Attempt to initialize that connection.
fn initialize_connection(
config: &CacheDBConfiguration,
conn: &Connection,
version: &str,
) -> Result<(), rusqlite::Error> {
let sql = config.create_combined_sql();
conn.execute_batch(&sql)?;
// Check the version
let existing_version = conn
.query_row(
"SELECT value FROM info WHERE key='CLI_VERSION' LIMIT 1",
[],
|row| row.get::<_, String>(0),
)
.optional()?
.unwrap_or_default();
// If Deno has been upgraded, run the SQL to update the version
if existing_version != version {
conn.execute_batch(config.on_version_change)?;
let mut stmt = conn
.prepare("INSERT OR REPLACE INTO info (key, value) VALUES (?1, ?2)")?;
stmt.execute(["CLI_VERSION", version])?;
}
// Preheat any prepared queries
for preheat in config.preheat_queries {
drop(conn.prepare_cached(preheat)?);
}
Ok(())
}
/// Open and initialize a connection.
fn open_connection_and_init(
&self,
path: Option<&Path>,
) -> Result<Connection, rusqlite::Error> {
let conn = self.actually_open_connection(path)?;
Self::initialize_connection(self.config, &conn, self.version)?;
Ok(conn)
}
/// This function represents the policy for dealing with corrupted cache files. We try fairly aggressively
/// to repair the situation, and if we can't, we prefer to log noisily and continue with in-memory caches.
fn open_connection(&self) -> Result<ConnectionState, AnyError> {
open_connection(self.config, self.path.as_deref(), |maybe_path| {
self.open_connection_and_init(maybe_path)
})
}
fn initialize<'a>(
&self,
lock: &'a MutexGuard<OnceCell<ConnectionState>>,
) -> &'a ConnectionState {
lock.get_or_init(|| match self.open_connection() {
Ok(conn) => conn,
Err(e) => ConnectionState::Error(e.into()),
})
}
pub fn with_connection<T: Default>(
&self,
f: impl FnOnce(&Connection) -> Result<T, AnyError>,
) -> Result<T, AnyError> {
let lock = self.conn.lock();
let conn = self.initialize(&lock);
match conn {
ConnectionState::Blackhole => {
// Cache is a blackhole - nothing in or out.
Ok(T::default())
}
ConnectionState::Error(e) => {
// This isn't ideal because we lose the original underlying error
let err = AnyError::msg(e.clone().to_string());
Err(err)
}
ConnectionState::Connected(conn) => f(conn),
}
}
#[cfg(test)]
pub fn ensure_connected(&self) -> Result<(), AnyError> {
self.with_connection(|_| Ok(()))
}
pub fn execute(
&self,
sql: &'static str,
params: impl Params,
) -> Result<usize, AnyError> {
self.with_connection(|conn| {
let mut stmt = conn.prepare_cached(sql)?;
let res = stmt.execute(params)?;
Ok(res)
})
}
pub fn exists(
&self,
sql: &'static str,
params: impl Params,
) -> Result<bool, AnyError> {
self.with_connection(|conn| {
let mut stmt = conn.prepare_cached(sql)?;
let res = stmt.exists(params)?;
Ok(res)
})
}
/// Query a row from the database with a mapping function.
pub fn query_row<T, F>(
&self,
sql: &'static str,
params: impl Params,
f: F,
) -> Result<Option<T>, AnyError>
where
F: FnOnce(&rusqlite::Row<'_>) -> Result<T, AnyError>,
{
let res = self.with_connection(|conn| {
let mut stmt = conn.prepare_cached(sql)?;
let mut rows = stmt.query(params)?;
if let Some(row) = rows.next()? {
let res = f(row)?;
Ok(Some(res))
} else {
Ok(None)
}
})?;
Ok(res)
}
}
/// This function represents the policy for dealing with corrupted cache files. We try fairly aggressively
/// to repair the situation, and if we can't, we prefer to log noisily and continue with in-memory caches.
fn open_connection(
config: &CacheDBConfiguration,
path: Option<&Path>,
open_connection_and_init: impl Fn(
Option<&Path>,
) -> Result<Connection, rusqlite::Error>,
) -> Result<ConnectionState, AnyError> {
// Success on first try? We hope that this is the case.
let err = match open_connection_and_init(path) {
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
Err(err) => err,
};
let Some(path) = path.as_ref() else {
// If an in-memory DB fails, that's game over
log::error!("Failed to initialize in-memory cache database.");
return Err(err.into());
};
// reduce logging for readonly file system
if let rusqlite::Error::SqliteFailure(ffi_err, _) = &err
&& ffi_err.code == rusqlite::ErrorCode::ReadOnly
{
log::debug!(
"Failed creating cache db. Folder readonly: {}",
path.display()
);
return handle_failure_mode(config, err, open_connection_and_init);
}
// ensure the parent directory exists
if let Some(parent) = path.parent() {
match std::fs::create_dir_all(parent) {
Ok(_) => {
log::debug!("Created parent directory for cache db.");
}
Err(err) => {
log::debug!("Failed creating the cache db parent dir: {:#}", err);
}
}
}
// There are rare times in the tests when we can't initialize a cache DB the first time, but it succeeds the second time, so
// we don't log these at a debug level.
log::trace!(
"Could not initialize cache database '{}', retrying... ({err:?})",
path.to_string_lossy(),
);
// Try a second time
let err = match open_connection_and_init(Some(path)) {
Ok(conn) => return Ok(ConnectionState::Connected(conn)),
Err(err) => err,
};
// Failed, try deleting it
let is_tty = std::io::stderr().is_terminal();
log::log!(
if is_tty {
log::Level::Warn
} else {
log::Level::Trace
},
"Could not initialize cache database '{}', deleting and retrying... ({err:?})",
path.to_string_lossy()
);
if std::fs::remove_file(path).is_ok() {
// Try a third time if we successfully deleted it
let res = open_connection_and_init(Some(path));
if let Ok(conn) = res {
return Ok(ConnectionState::Connected(conn));
};
}
log_failure_mode(path, is_tty, config);
handle_failure_mode(config, err, open_connection_and_init)
}
fn log_failure_mode(path: &Path, is_tty: bool, config: &CacheDBConfiguration) {
match config.on_failure {
CacheFailure::InMemory => {
log::log!(
if is_tty {
log::Level::Error
} else {
log::Level::Trace
},
"Failed to open cache file '{}', opening in-memory cache.",
path.display()
);
}
CacheFailure::Blackhole => {
log::log!(
if is_tty {
log::Level::Error
} else {
log::Level::Trace
},
"Failed to open cache file '{}', performance may be degraded.",
path.display()
);
}
CacheFailure::Error => {
log::error!(
"Failed to open cache file '{}', expect further errors.",
path.display()
);
}
}
}
fn handle_failure_mode(
config: &CacheDBConfiguration,
err: rusqlite::Error,
open_connection_and_init: impl Fn(
Option<&Path>,
) -> Result<Connection, rusqlite::Error>,
) -> Result<ConnectionState, AnyError> {
match config.on_failure {
CacheFailure::InMemory => {
Ok(ConnectionState::Connected(open_connection_and_init(None)?))
}
CacheFailure::Blackhole => Ok(ConnectionState::Blackhole),
CacheFailure::Error => Err(err.into()),
}
}
#[cfg(test)]
mod tests {
use test_util::TempDir;
use super::*;
static TEST_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "create table if not exists test(value TEXT);",
on_version_change: "delete from test;",
preheat_queries: &[],
on_failure: CacheFailure::InMemory,
};
static TEST_DB_BLACKHOLE: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "syntax error", // intentionally cause an error
on_version_change: "",
preheat_queries: &[],
on_failure: CacheFailure::Blackhole,
};
static TEST_DB_ERROR: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "syntax error", // intentionally cause an error
on_version_change: "",
preheat_queries: &[],
on_failure: CacheFailure::Error,
};
static BAD_SQL_TEST_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: "bad sql;",
on_version_change: "delete from test;",
preheat_queries: &[],
on_failure: CacheFailure::InMemory,
};
#[tokio::test]
async fn simple_database() {
let db = CacheDB::in_memory(&TEST_DB, "1.0");
db.ensure_connected()
.expect("Failed to initialize in-memory database");
db.execute("insert into test values (?1)", [1]).unwrap();
let res = db
.query_row("select * from test", [], |row| {
Ok(row.get::<_, String>(0).unwrap())
})
.unwrap();
assert_eq!(res, Some("1".into()));
}
#[tokio::test]
async fn bad_sql() {
let db = CacheDB::in_memory(&BAD_SQL_TEST_DB, "1.0");
db.ensure_connected()
.expect_err("Expected to fail, but succeeded");
}
#[tokio::test]
async fn failure_mode_in_memory() {
let temp_dir = TempDir::new();
let path = temp_dir.path().join("data").to_path_buf();
let state = open_connection(&TEST_DB, Some(path.as_path()), |maybe_path| {
match maybe_path {
// this error was chosen because it was an error easy to construct
Some(_) => Err(rusqlite::Error::SqliteSingleThreadedMode),
None => Ok(Connection::open_in_memory().unwrap()),
}
})
.unwrap();
assert!(matches!(state, ConnectionState::Connected(_)));
}
#[tokio::test]
async fn failure_mode_blackhole() {
let temp_dir = TempDir::new();
let path = temp_dir.path().join("data");
let db = CacheDB::from_path(&TEST_DB_BLACKHOLE, path.to_path_buf(), "1.0");
db.ensure_connected()
.expect("Should have created a database");
db.execute("insert into test values (?1)", [1]).unwrap();
let res = db
.query_row("select * from test", [], |row| {
Ok(row.get::<_, String>(0).unwrap())
})
.unwrap();
assert_eq!(res, None);
}
#[tokio::test]
async fn failure_mode_error() {
let temp_dir = TempDir::new();
let path = temp_dir.path().join("data");
let db = CacheDB::from_path(&TEST_DB_ERROR, path.to_path_buf(), "1.0");
db.ensure_connected().expect_err("Should have failed");
db.execute("insert into test values (?1)", [1])
.expect_err("Should have failed");
db.query_row("select * from test", [], |row| {
Ok(row.get::<_, String>(0).unwrap())
})
.expect_err("Should have failed");
}
#[test]
fn cache_db_hash_max_u64_value() {
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MAX - 1));
assert_same_serialize_deserialize(CacheDBHash::new(u64::MIN));
assert_same_serialize_deserialize(CacheDBHash::new(1));
}
fn assert_same_serialize_deserialize(original_hash: CacheDBHash) {
use rusqlite::ToSql;
use rusqlite::types::FromSql;
use rusqlite::types::ValueRef;
let value = original_hash.to_sql().unwrap();
match value {
rusqlite::types::ToSqlOutput::Owned(rusqlite::types::Value::Integer(
value,
)) => {
let value_ref = ValueRef::Integer(value);
assert_eq!(
original_hash,
CacheDBHash::column_result(value_ref).unwrap()
);
}
_ => unreachable!(),
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/cache/incremental.rs | cli/cache/incremental.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::unsync::JoinHandle;
use deno_core::unsync::spawn;
use deno_runtime::deno_webstorage::rusqlite::params;
use super::cache_db::CacheDB;
use super::cache_db::CacheDBConfiguration;
use super::cache_db::CacheDBHash;
use super::cache_db::CacheFailure;
pub static INCREMENTAL_CACHE_DB: CacheDBConfiguration = CacheDBConfiguration {
table_initializer: concat!(
"CREATE TABLE IF NOT EXISTS incrementalcache (",
"file_path TEXT PRIMARY KEY,",
"state_hash INTEGER NOT NULL,",
"source_hash INTEGER NOT NULL",
");"
),
on_version_change: "DELETE FROM incrementalcache;",
preheat_queries: &[],
// If the cache fails, just ignore all caching attempts
on_failure: CacheFailure::Blackhole,
};
/// Cache used to skip formatting/linting a file again when we
/// know it is already formatted or has no lint diagnostics.
pub struct IncrementalCache(IncrementalCacheInner);
impl IncrementalCache {
pub fn new(
db: CacheDB,
state_hash: CacheDBHash,
initial_file_paths: &[PathBuf],
) -> Self {
IncrementalCache(IncrementalCacheInner::new(
db,
state_hash,
initial_file_paths,
))
}
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
self.0.is_file_same(file_path, file_text)
}
pub fn update_file(&self, file_path: &Path, file_text: &str) {
self.0.update_file(file_path, file_text)
}
pub async fn wait_completion(&self) {
self.0.wait_completion().await;
}
}
enum ReceiverMessage {
Update(PathBuf, CacheDBHash),
Exit,
}
struct IncrementalCacheInner {
previous_hashes: HashMap<PathBuf, CacheDBHash>,
sender: tokio::sync::mpsc::UnboundedSender<ReceiverMessage>,
handle: Mutex<Option<JoinHandle<()>>>,
}
impl IncrementalCacheInner {
pub fn new(
db: CacheDB,
state_hash: CacheDBHash,
initial_file_paths: &[PathBuf],
) -> Self {
let sql_cache = SqlIncrementalCache::new(db, state_hash);
Self::from_sql_incremental_cache(sql_cache, initial_file_paths)
}
fn from_sql_incremental_cache(
cache: SqlIncrementalCache,
initial_file_paths: &[PathBuf],
) -> Self {
let mut previous_hashes = HashMap::new();
for path in initial_file_paths {
if let Some(hash) = cache.get_source_hash(path) {
previous_hashes.insert(path.to_path_buf(), hash);
}
}
let (sender, mut receiver) =
tokio::sync::mpsc::unbounded_channel::<ReceiverMessage>();
// sqlite isn't `Sync`, so we do all the updating on a dedicated task
let handle = spawn(async move {
while let Some(message) = receiver.recv().await {
match message {
ReceiverMessage::Update(path, hash) => {
let _ = cache.set_source_hash(&path, hash);
}
ReceiverMessage::Exit => break,
}
}
});
IncrementalCacheInner {
previous_hashes,
sender,
handle: Mutex::new(Some(handle)),
}
}
pub fn is_file_same(&self, file_path: &Path, file_text: &str) -> bool {
match self.previous_hashes.get(file_path) {
Some(hash) => *hash == CacheDBHash::from_hashable(file_text),
None => false,
}
}
pub fn update_file(&self, file_path: &Path, file_text: &str) {
let hash = CacheDBHash::from_hashable(file_text);
if let Some(previous_hash) = self.previous_hashes.get(file_path)
&& *previous_hash == hash
{
return; // do not bother updating the db file because nothing has changed
}
let _ = self
.sender
.send(ReceiverMessage::Update(file_path.to_path_buf(), hash));
}
pub async fn wait_completion(&self) {
if self.sender.send(ReceiverMessage::Exit).is_err() {
return;
}
let handle = self.handle.lock().take();
if let Some(handle) = handle {
handle.await.unwrap();
}
}
}
struct SqlIncrementalCache {
conn: CacheDB,
/// A hash of the state used to produce the formatting/linting other than
/// the CLI version. This state is a hash of the configuration and ensures
/// we format/lint a file when the configuration changes.
state_hash: CacheDBHash,
}
impl SqlIncrementalCache {
pub fn new(conn: CacheDB, state_hash: CacheDBHash) -> Self {
Self { conn, state_hash }
}
pub fn get_source_hash(&self, path: &Path) -> Option<CacheDBHash> {
match self.get_source_hash_result(path) {
Ok(option) => option,
Err(err) => {
if cfg!(debug_assertions) {
panic!("Error retrieving hash: {err}");
} else {
// fail silently when not debugging
None
}
}
}
}
fn get_source_hash_result(
&self,
path: &Path,
) -> Result<Option<CacheDBHash>, AnyError> {
let query = "
SELECT
source_hash
FROM
incrementalcache
WHERE
file_path=?1
AND state_hash=?2
LIMIT 1";
let res = self.conn.query_row(
query,
params![path.to_string_lossy(), self.state_hash],
|row| {
let hash: CacheDBHash = row.get(0)?;
Ok(hash)
},
)?;
Ok(res)
}
pub fn set_source_hash(
&self,
path: &Path,
source_hash: CacheDBHash,
) -> Result<(), AnyError> {
let sql = "
INSERT OR REPLACE INTO
incrementalcache (file_path, state_hash, source_hash)
VALUES
(?1, ?2, ?3)";
self.conn.execute(
sql,
params![path.to_string_lossy(), self.state_hash, source_hash],
)?;
Ok(())
}
}
#[cfg(test)]
mod test {
use std::path::PathBuf;
use super::*;
#[test]
pub fn sql_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let path = PathBuf::from("/mod.ts");
assert_eq!(cache.get_source_hash(&path), None);
cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the cli version (should clear)
let conn = cache.conn.recreate_with_version("2.0.0");
let mut cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), None);
// add back the file to the cache
cache.set_source_hash(&path, CacheDBHash::new(2)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// try changing the state hash
cache.state_hash = CacheDBHash::new(2);
assert_eq!(cache.get_source_hash(&path), None);
cache.state_hash = CacheDBHash::new(1);
// should return now that everything is back
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// recreating the cache should not remove the data because the CLI version and state hash is the same
let conn = cache.conn.recreate_with_version("2.0.0");
let cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(2)));
// now try replacing and using another path
cache.set_source_hash(&path, CacheDBHash::new(3)).unwrap();
cache.set_source_hash(&path, CacheDBHash::new(4)).unwrap();
let path2 = PathBuf::from("/mod2.ts");
cache.set_source_hash(&path2, CacheDBHash::new(5)).unwrap();
assert_eq!(cache.get_source_hash(&path), Some(CacheDBHash::new(4)));
assert_eq!(cache.get_source_hash(&path2), Some(CacheDBHash::new(5)));
}
#[tokio::test]
pub async fn incremental_cache_general_use() {
let conn = CacheDB::in_memory(&INCREMENTAL_CACHE_DB, "1.0.0");
let sql_cache = SqlIncrementalCache::new(conn, CacheDBHash::new(1));
let file_path = PathBuf::from("/mod.ts");
let file_text = "test";
let file_hash = CacheDBHash::from_hashable(file_text);
sql_cache.set_source_hash(&file_path, file_hash).unwrap();
let cache = IncrementalCacheInner::from_sql_incremental_cache(
sql_cache,
std::slice::from_ref(&file_path),
);
assert!(cache.is_file_same(&file_path, "test"));
assert!(!cache.is_file_same(&file_path, "other"));
// just ensure this doesn't panic
cache.update_file(&file_path, "other");
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/node.rs | cli/rt/node.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::sync::Arc;
use deno_core::url::Url;
use deno_error::JsErrorBox;
use deno_lib::standalone::binary::CjsExportAnalysisEntry;
use deno_media_type::MediaType;
use deno_resolver::loader::NpmModuleLoader;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_resolver::npm::NpmReqResolver;
use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_permissions::CheckedPath;
use node_resolver::DenoIsBuiltInNodeModuleChecker;
use node_resolver::analyze::CjsAnalysis;
use node_resolver::analyze::CjsAnalysisExports;
use node_resolver::analyze::EsmAnalysisMode;
use node_resolver::analyze::NodeCodeTranslator;
use crate::binary::StandaloneModules;
use crate::file_system::DenoRtSys;
pub type DenoRtCjsTracker =
deno_resolver::cjs::CjsTracker<DenoInNpmPackageChecker, DenoRtSys>;
pub type DenoRtNpmResolver = deno_resolver::npm::NpmResolver<DenoRtSys>;
pub type DenoRtNpmModuleLoader = NpmModuleLoader<
CjsCodeAnalyzer,
DenoInNpmPackageChecker,
DenoIsBuiltInNodeModuleChecker,
DenoRtNpmResolver,
DenoRtSys,
>;
pub type DenoRtNodeCodeTranslator = NodeCodeTranslator<
CjsCodeAnalyzer,
DenoInNpmPackageChecker,
DenoIsBuiltInNodeModuleChecker,
DenoRtNpmResolver,
DenoRtSys,
>;
pub type DenoRtNodeResolver = deno_runtime::deno_node::NodeResolver<
DenoInNpmPackageChecker,
DenoRtNpmResolver,
DenoRtSys,
>;
pub type DenoRtNpmReqResolver = NpmReqResolver<
DenoInNpmPackageChecker,
DenoIsBuiltInNodeModuleChecker,
DenoRtNpmResolver,
DenoRtSys,
>;
pub struct CjsCodeAnalyzer {
cjs_tracker: Arc<DenoRtCjsTracker>,
modules: Arc<StandaloneModules>,
sys: DenoRtSys,
}
impl CjsCodeAnalyzer {
pub fn new(
cjs_tracker: Arc<DenoRtCjsTracker>,
modules: Arc<StandaloneModules>,
sys: DenoRtSys,
) -> Self {
Self {
cjs_tracker,
modules,
sys,
}
}
fn inner_cjs_analysis<'a>(
&self,
specifier: &Url,
source: Cow<'a, str>,
) -> Result<CjsAnalysis<'a>, JsErrorBox> {
let media_type = MediaType::from_specifier(specifier);
if media_type == MediaType::Json {
return Ok(CjsAnalysis::Cjs(CjsAnalysisExports {
exports: vec![],
reexports: vec![],
}));
}
let cjs_tracker = self.cjs_tracker.clone();
let is_maybe_cjs = cjs_tracker
.is_maybe_cjs(specifier, media_type)
.map_err(JsErrorBox::from_err)?;
let analysis = if is_maybe_cjs {
let data = self
.modules
.read(specifier)?
.and_then(|d| d.cjs_export_analysis);
match data {
Some(data) => {
let data: CjsExportAnalysisEntry = bincode::deserialize(&data)
.map_err(|err| JsErrorBox::generic(err.to_string()))?;
match data {
CjsExportAnalysisEntry::Esm => {
cjs_tracker.set_is_known_script(specifier, false);
CjsAnalysis::Esm(source, None)
}
CjsExportAnalysisEntry::Cjs(exports) => {
cjs_tracker.set_is_known_script(specifier, true);
CjsAnalysis::Cjs(CjsAnalysisExports {
exports,
reexports: Vec::new(), // already resolved
})
}
CjsExportAnalysisEntry::Error(err) => {
return Err(JsErrorBox::generic(err));
}
}
}
None => {
if log::log_enabled!(log::Level::Debug) {
if self.sys.is_specifier_in_vfs(specifier) {
log::debug!(
"No CJS export analysis was stored for '{}'. Assuming ESM. This might indicate a bug in Deno.",
specifier
);
} else {
log::debug!(
"Analyzing potentially CommonJS files is not supported at runtime in a compiled executable ({}). Assuming ESM.",
specifier
);
}
}
// assume ESM as we don't have access to swc here
CjsAnalysis::Esm(source, None)
}
}
} else {
CjsAnalysis::Esm(source, None)
};
Ok(analysis)
}
}
#[async_trait::async_trait(?Send)]
impl node_resolver::analyze::CjsCodeAnalyzer for CjsCodeAnalyzer {
async fn analyze_cjs<'a>(
&self,
specifier: &Url,
source: Option<Cow<'a, str>>,
_esm_analysis_mode: EsmAnalysisMode,
) -> Result<CjsAnalysis<'a>, JsErrorBox> {
let source = match source {
Some(source) => source,
None => {
if let Ok(path) = deno_path_util::url_to_file_path(specifier) {
// PERMISSIONS: This is ok because it's just being used for cjs analysis
let path = CheckedPath::unsafe_new(Cow::Owned(path));
// todo(dsherret): should this use the sync method instead?
if let Ok(source_from_file) =
self.sys.read_text_file_lossy_async(path.into_owned()).await
{
source_from_file
} else {
return Ok(CjsAnalysis::Cjs(CjsAnalysisExports {
exports: vec![],
reexports: vec![],
}));
}
} else {
return Ok(CjsAnalysis::Cjs(CjsAnalysisExports {
exports: vec![],
reexports: vec![],
}));
}
}
};
self.inner_cjs_analysis(specifier, source)
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/file_system.rs | cli/rt/file_system.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashSet;
use std::io::ErrorKind;
use std::io::SeekFrom;
use std::ops::Range;
use std::path::Path;
use std::path::PathBuf;
#[cfg(unix)]
use std::process::Stdio as StdStdio;
use std::rc::Rc;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use deno_core::BufMutView;
use deno_core::BufView;
use deno_core::ResourceHandleFd;
use deno_lib::standalone::virtual_fs::FileSystemCaseSensitivity;
use deno_lib::standalone::virtual_fs::OffsetWithLength;
use deno_lib::standalone::virtual_fs::VfsEntry;
use deno_lib::standalone::virtual_fs::VfsEntryRef;
use deno_lib::standalone::virtual_fs::VirtualDirectory;
use deno_lib::standalone::virtual_fs::VirtualFile;
use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_fs::FsDirEntry;
use deno_runtime::deno_fs::FsFileType;
use deno_runtime::deno_fs::OpenOptions;
use deno_runtime::deno_fs::RealFs;
use deno_runtime::deno_io;
use deno_runtime::deno_io::fs::File as DenoFile;
use deno_runtime::deno_io::fs::FsError;
use deno_runtime::deno_io::fs::FsResult;
use deno_runtime::deno_io::fs::FsStat;
use deno_runtime::deno_napi::DenoRtNativeAddonLoader;
use deno_runtime::deno_napi::DenoRtNativeAddonLoaderRc;
use deno_runtime::deno_permissions::CheckedPath;
use deno_runtime::deno_permissions::CheckedPathBuf;
#[cfg(windows)]
use deno_subprocess_windows::Stdio as StdStdio;
use sys_traits::FsCopy;
use sys_traits::boxed::BoxedFsDirEntry;
use sys_traits::boxed::BoxedFsMetadataValue;
use sys_traits::boxed::FsMetadataBoxed;
use sys_traits::boxed::FsReadDirBoxed;
use url::Url;
#[derive(Debug, Clone)]
pub struct DenoRtSys(Arc<FileBackedVfs>);
impl DenoRtSys {
pub fn new(vfs: Arc<FileBackedVfs>) -> Self {
Self(vfs)
}
pub fn as_deno_rt_native_addon_loader(&self) -> DenoRtNativeAddonLoaderRc {
self.0.clone()
}
pub fn is_specifier_in_vfs(&self, specifier: &Url) -> bool {
deno_path_util::url_to_file_path(specifier)
.map(|p| self.is_in_vfs(&p))
.unwrap_or(false)
}
pub fn is_in_vfs(&self, path: &Path) -> bool {
self.0.is_path_within(path)
}
fn error_if_in_vfs(&self, path: &Path) -> FsResult<()> {
if self.0.is_path_within(path) {
Err(FsError::NotSupported)
} else {
Ok(())
}
}
fn copy_to_real_path(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> std::io::Result<u64> {
let old_file = self.0.file_entry(oldpath)?;
let old_file_bytes = self.0.read_file_all(old_file)?;
let len = old_file_bytes.len() as u64;
RealFs
.write_file_sync(
newpath,
OpenOptions {
read: false,
write: true,
create: true,
truncate: true,
append: false,
create_new: false,
custom_flags: None,
mode: None,
},
&old_file_bytes,
)
.map_err(|err| err.into_io_error())?;
Ok(len)
}
}
#[async_trait::async_trait(?Send)]
impl FileSystem for DenoRtSys {
fn cwd(&self) -> FsResult<PathBuf> {
RealFs.cwd()
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
RealFs.tmp_dir()
}
fn chdir(&self, path: &CheckedPath) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.chdir(path)
}
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
RealFs.umask(mask)
}
fn open_sync(
&self,
path: &CheckedPath,
options: OpenOptions,
) -> FsResult<Rc<dyn DenoFile>> {
if self.0.is_path_within(path) {
Ok(Rc::new(self.0.open_file(path)?))
} else {
RealFs.open_sync(path, options)
}
}
async fn open_async<'a>(
&'a self,
path: CheckedPathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn DenoFile>> {
if self.0.is_path_within(&path) {
Ok(Rc::new(self.0.open_file(&path)?))
} else {
RealFs.open_async(path, options).await
}
}
fn mkdir_sync(
&self,
path: &CheckedPath,
recursive: bool,
mode: Option<u32>,
) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.mkdir_sync(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: CheckedPathBuf,
recursive: bool,
mode: Option<u32>,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.mkdir_async(path, recursive, mode).await
}
#[cfg(unix)]
fn chmod_sync(&self, path: &CheckedPath, mode: u32) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.chmod_sync(path, mode)
}
#[cfg(not(unix))]
fn chmod_sync(&self, path: &CheckedPath, mode: i32) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.chmod_sync(path, mode)
}
#[cfg(unix)]
async fn chmod_async(&self, path: CheckedPathBuf, mode: u32) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.chmod_async(path, mode).await
}
#[cfg(not(unix))]
async fn chmod_async(&self, path: CheckedPathBuf, mode: i32) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.chmod_async(path, mode).await
}
fn chown_sync(
&self,
path: &CheckedPath,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.chown_sync(path, uid, gid)
}
async fn chown_async(
&self,
path: CheckedPathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.chown_async(path, uid, gid).await
}
fn exists_sync(&self, path: &CheckedPath) -> bool {
if self.0.is_path_within(path) {
self.0.exists(path)
} else {
RealFs.exists_sync(path)
}
}
async fn exists_async(&self, path: CheckedPathBuf) -> FsResult<bool> {
if self.0.is_path_within(&path) {
Ok(self.0.exists(&path))
} else {
RealFs.exists_async(path).await
}
}
fn lchmod_sync(&self, path: &CheckedPath, mode: u32) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.lchmod_sync(path, mode)
}
async fn lchmod_async(
&self,
path: CheckedPathBuf,
mode: u32,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.lchmod_async(path, mode).await
}
fn lchown_sync(
&self,
path: &CheckedPath,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.lchown_sync(path, uid, gid)
}
async fn lchown_async(
&self,
path: CheckedPathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.lchown_async(path, uid, gid).await
}
fn remove_sync(&self, path: &CheckedPath, recursive: bool) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.remove_sync(path, recursive)
}
async fn remove_async(
&self,
path: CheckedPathBuf,
recursive: bool,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.remove_async(path, recursive).await
}
fn copy_file_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()> {
self.error_if_in_vfs(newpath)?;
if self.0.is_path_within(oldpath) {
self
.copy_to_real_path(oldpath, newpath)
.map(|_| ())
.map_err(FsError::Io)
} else {
RealFs.copy_file_sync(oldpath, newpath)
}
}
async fn copy_file_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()> {
self.error_if_in_vfs(&newpath)?;
if self.0.is_path_within(&oldpath) {
let fs = self.clone();
tokio::task::spawn_blocking(move || {
fs.copy_to_real_path(
&oldpath.as_checked_path(),
&newpath.as_checked_path(),
)
.map(|_| ())
.map_err(FsError::Io)
})
.await?
} else {
RealFs.copy_file_async(oldpath, newpath).await
}
}
fn cp_sync(&self, from: &CheckedPath, to: &CheckedPath) -> FsResult<()> {
self.error_if_in_vfs(to)?;
RealFs.cp_sync(from, to)
}
async fn cp_async(
&self,
from: CheckedPathBuf,
to: CheckedPathBuf,
) -> FsResult<()> {
self.error_if_in_vfs(&to)?;
RealFs.cp_async(from, to).await
}
fn stat_sync(&self, path: &CheckedPath) -> FsResult<FsStat> {
if self.0.is_path_within(path) {
Ok(self.0.stat(path)?.as_fs_stat())
} else {
RealFs.stat_sync(path)
}
}
async fn stat_async(&self, path: CheckedPathBuf) -> FsResult<FsStat> {
if self.0.is_path_within(&path) {
Ok(self.0.stat(&path)?.as_fs_stat())
} else {
RealFs.stat_async(path).await
}
}
fn lstat_sync(&self, path: &CheckedPath) -> FsResult<FsStat> {
if self.0.is_path_within(path) {
Ok(self.0.lstat(path)?.as_fs_stat())
} else {
RealFs.lstat_sync(path)
}
}
async fn lstat_async(&self, path: CheckedPathBuf) -> FsResult<FsStat> {
if self.0.is_path_within(&path) {
Ok(self.0.lstat(&path)?.as_fs_stat())
} else {
RealFs.lstat_async(path).await
}
}
fn realpath_sync(&self, path: &CheckedPath) -> FsResult<PathBuf> {
if self.0.is_path_within(path) {
Ok(self.0.canonicalize(path)?)
} else {
RealFs.realpath_sync(path)
}
}
async fn realpath_async(&self, path: CheckedPathBuf) -> FsResult<PathBuf> {
if self.0.is_path_within(&path) {
Ok(self.0.canonicalize(&path)?)
} else {
RealFs.realpath_async(path).await
}
}
fn read_dir_sync(&self, path: &CheckedPath) -> FsResult<Vec<FsDirEntry>> {
if self.0.is_path_within(path) {
Ok(self.0.read_dir(path)?)
} else {
RealFs.read_dir_sync(path)
}
}
async fn read_dir_async(
&self,
path: CheckedPathBuf,
) -> FsResult<Vec<FsDirEntry>> {
if self.0.is_path_within(&path) {
Ok(self.0.read_dir(&path)?)
} else {
RealFs.read_dir_async(path).await
}
}
fn rename_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()> {
self.error_if_in_vfs(oldpath)?;
self.error_if_in_vfs(newpath)?;
RealFs.rename_sync(oldpath, newpath)
}
async fn rename_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()> {
self.error_if_in_vfs(&oldpath)?;
self.error_if_in_vfs(&newpath)?;
RealFs.rename_async(oldpath, newpath).await
}
fn link_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
) -> FsResult<()> {
self.error_if_in_vfs(oldpath)?;
self.error_if_in_vfs(newpath)?;
RealFs.link_sync(oldpath, newpath)
}
async fn link_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
) -> FsResult<()> {
self.error_if_in_vfs(&oldpath)?;
self.error_if_in_vfs(&newpath)?;
RealFs.link_async(oldpath, newpath).await
}
fn symlink_sync(
&self,
oldpath: &CheckedPath,
newpath: &CheckedPath,
file_type: Option<FsFileType>,
) -> FsResult<()> {
self.error_if_in_vfs(oldpath)?;
self.error_if_in_vfs(newpath)?;
RealFs.symlink_sync(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: CheckedPathBuf,
newpath: CheckedPathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
self.error_if_in_vfs(&oldpath)?;
self.error_if_in_vfs(&newpath)?;
RealFs.symlink_async(oldpath, newpath, file_type).await
}
fn read_link_sync(&self, path: &CheckedPath) -> FsResult<PathBuf> {
if self.0.is_path_within(path) {
Ok(self.0.read_link(path)?)
} else {
RealFs.read_link_sync(path)
}
}
async fn read_link_async(&self, path: CheckedPathBuf) -> FsResult<PathBuf> {
if self.0.is_path_within(&path) {
Ok(self.0.read_link(&path)?)
} else {
RealFs.read_link_async(path).await
}
}
fn truncate_sync(&self, path: &CheckedPath, len: u64) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.truncate_sync(path, len)
}
async fn truncate_async(
&self,
path: CheckedPathBuf,
len: u64,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs.truncate_async(path, len).await
}
fn utime_sync(
&self,
path: &CheckedPath,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.utime_sync(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
}
async fn utime_async(
&self,
path: CheckedPathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs
.utime_async(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
.await
}
fn lutime_sync(
&self,
path: &CheckedPath,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
self.error_if_in_vfs(path)?;
RealFs.lutime_sync(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
}
async fn lutime_async(
&self,
path: CheckedPathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
self.error_if_in_vfs(&path)?;
RealFs
.lutime_async(path, atime_secs, atime_nanos, mtime_secs, mtime_nanos)
.await
}
}
impl sys_traits::BaseFsHardLink for DenoRtSys {
#[inline]
fn base_fs_hard_link(&self, src: &Path, dst: &Path) -> std::io::Result<()> {
self
.link_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(src)),
&CheckedPath::unsafe_new(Cow::Borrowed(dst)),
)
.map_err(|err| err.into_io_error())
}
}
impl sys_traits::BaseFsRead for DenoRtSys {
#[inline]
fn base_fs_read(&self, path: &Path) -> std::io::Result<Cow<'static, [u8]>> {
self
.read_file_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(path)),
OpenOptions::read(),
)
.map_err(|err| err.into_io_error())
}
}
impl sys_traits::FsMetadataValue for FileBackedVfsMetadata {
fn file_type(&self) -> sys_traits::FileType {
self.file_type
}
fn len(&self) -> u64 {
self.len
}
fn accessed(&self) -> std::io::Result<SystemTime> {
Err(not_supported("accessed time"))
}
fn created(&self) -> std::io::Result<SystemTime> {
Err(not_supported("created time"))
}
fn changed(&self) -> std::io::Result<SystemTime> {
Err(not_supported("changed time"))
}
fn modified(&self) -> std::io::Result<SystemTime> {
Err(not_supported("modified time"))
}
fn dev(&self) -> std::io::Result<u64> {
Ok(0)
}
fn ino(&self) -> std::io::Result<u64> {
Ok(0)
}
fn mode(&self) -> std::io::Result<u32> {
Ok(0)
}
fn nlink(&self) -> std::io::Result<u64> {
Ok(0)
}
fn uid(&self) -> std::io::Result<u32> {
Ok(0)
}
fn gid(&self) -> std::io::Result<u32> {
Ok(0)
}
fn rdev(&self) -> std::io::Result<u64> {
Ok(0)
}
fn blksize(&self) -> std::io::Result<u64> {
Ok(0)
}
fn blocks(&self) -> std::io::Result<u64> {
Ok(0)
}
fn is_block_device(&self) -> std::io::Result<bool> {
Ok(false)
}
fn is_char_device(&self) -> std::io::Result<bool> {
Ok(false)
}
fn is_fifo(&self) -> std::io::Result<bool> {
Ok(false)
}
fn is_socket(&self) -> std::io::Result<bool> {
Ok(false)
}
fn file_attributes(&self) -> std::io::Result<u32> {
Ok(0)
}
}
fn not_supported(name: &str) -> std::io::Error {
std::io::Error::new(
ErrorKind::Unsupported,
format!(
"{} is not supported for an embedded deno compile file",
name
),
)
}
impl sys_traits::FsDirEntry for FileBackedVfsDirEntry {
type Metadata = BoxedFsMetadataValue;
#[allow(mismatched_lifetime_syntaxes)]
fn file_name(&self) -> Cow<std::ffi::OsStr> {
Cow::Borrowed(self.metadata.name.as_ref())
}
fn file_type(&self) -> std::io::Result<sys_traits::FileType> {
Ok(self.metadata.file_type)
}
fn metadata(&self) -> std::io::Result<Self::Metadata> {
Ok(BoxedFsMetadataValue(Box::new(self.metadata.clone())))
}
fn path(&self) -> Cow<'_, Path> {
Cow::Owned(self.parent_path.join(&self.metadata.name))
}
}
impl sys_traits::BaseFsReadDir for DenoRtSys {
type ReadDirEntry = BoxedFsDirEntry;
fn base_fs_read_dir(
&self,
path: &Path,
) -> std::io::Result<
Box<dyn Iterator<Item = std::io::Result<Self::ReadDirEntry>>>,
> {
if self.0.is_path_within(path) {
let entries = self.0.read_dir_with_metadata(path)?;
Ok(Box::new(
entries.map(|entry| Ok(BoxedFsDirEntry::new(entry))),
))
} else {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.fs_read_dir_boxed(path)
}
}
}
impl sys_traits::BaseFsCanonicalize for DenoRtSys {
#[inline]
fn base_fs_canonicalize(&self, path: &Path) -> std::io::Result<PathBuf> {
self
.realpath_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(path)),
)
.map_err(|err| err.into_io_error())
}
}
impl sys_traits::BaseFsMetadata for DenoRtSys {
type Metadata = BoxedFsMetadataValue;
#[inline]
fn base_fs_metadata(&self, path: &Path) -> std::io::Result<Self::Metadata> {
if self.0.is_path_within(path) {
Ok(BoxedFsMetadataValue::new(self.0.stat(path)?))
} else {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.fs_metadata_boxed(path)
}
}
#[inline]
fn base_fs_symlink_metadata(
&self,
path: &Path,
) -> std::io::Result<Self::Metadata> {
if self.0.is_path_within(path) {
Ok(BoxedFsMetadataValue::new(self.0.lstat(path)?))
} else {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.fs_symlink_metadata_boxed(path)
}
}
}
impl sys_traits::BaseFsCopy for DenoRtSys {
#[inline]
fn base_fs_copy(&self, from: &Path, to: &Path) -> std::io::Result<u64> {
self
.error_if_in_vfs(to)
.map_err(|err| err.into_io_error())?;
if self.0.is_path_within(from) {
self.copy_to_real_path(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(from)),
&CheckedPath::unsafe_new(Cow::Borrowed(to)),
)
} else {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.fs_copy(from, to)
}
}
}
impl sys_traits::BaseFsCloneFile for DenoRtSys {
fn base_fs_clone_file(
&self,
_from: &Path,
_to: &Path,
) -> std::io::Result<()> {
// will cause a fallback in the code that uses this
Err(not_supported("cloning files"))
}
}
impl sys_traits::BaseFsCreateDir for DenoRtSys {
#[inline]
fn base_fs_create_dir(
&self,
path: &Path,
options: &sys_traits::CreateDirOptions,
) -> std::io::Result<()> {
self
.mkdir_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(path)),
options.recursive,
options.mode,
)
.map_err(|err| err.into_io_error())
}
}
impl sys_traits::BaseFsRemoveFile for DenoRtSys {
#[inline]
fn base_fs_remove_file(&self, path: &Path) -> std::io::Result<()> {
self
.remove_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(path)),
false,
)
.map_err(|err| err.into_io_error())
}
}
impl sys_traits::BaseFsRename for DenoRtSys {
#[inline]
fn base_fs_rename(&self, from: &Path, to: &Path) -> std::io::Result<()> {
self
.rename_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(from)),
&CheckedPath::unsafe_new(Cow::Borrowed(to)),
)
.map_err(|err| err.into_io_error())
}
}
pub enum FsFileAdapter {
Real(sys_traits::impls::RealFsFile),
Vfs(FileBackedVfsFile),
}
impl sys_traits::FsFile for FsFileAdapter {}
impl sys_traits::FsFileAsRaw for FsFileAdapter {
#[cfg(windows)]
fn fs_file_as_raw_handle(&self) -> Option<std::os::windows::io::RawHandle> {
match self {
Self::Real(file) => file.fs_file_as_raw_handle(),
Self::Vfs(_) => None,
}
}
#[cfg(unix)]
fn fs_file_as_raw_fd(&self) -> Option<std::os::fd::RawFd> {
match self {
Self::Real(file) => file.fs_file_as_raw_fd(),
Self::Vfs(_) => None,
}
}
}
impl sys_traits::FsFileMetadata for FsFileAdapter {
#[inline]
fn fs_file_metadata(&self) -> std::io::Result<BoxedFsMetadataValue> {
match self {
Self::Real(file) => file.fs_file_metadata(),
Self::Vfs(file) => Ok(BoxedFsMetadataValue::new(FileBackedVfsMetadata {
file_type: sys_traits::FileType::File,
name: file.file.name.clone(),
len: file.file.offset.len,
mtime: file.file.mtime,
})),
}
}
}
impl sys_traits::FsFileSyncData for FsFileAdapter {
fn fs_file_sync_data(&mut self) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_sync_data(),
Self::Vfs(_) => Ok(()),
}
}
}
impl sys_traits::FsFileSyncAll for FsFileAdapter {
fn fs_file_sync_all(&mut self) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_sync_all(),
Self::Vfs(_) => Ok(()),
}
}
}
impl sys_traits::FsFileSetPermissions for FsFileAdapter {
#[inline]
fn fs_file_set_permissions(&mut self, mode: u32) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_set_permissions(mode),
Self::Vfs(_) => Ok(()),
}
}
}
impl std::io::Read for FsFileAdapter {
#[inline]
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
match self {
Self::Real(file) => file.read(buf),
Self::Vfs(file) => file.read_to_buf(buf),
}
}
}
impl std::io::Seek for FsFileAdapter {
fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
match self {
Self::Real(file) => file.seek(pos),
Self::Vfs(file) => file.seek(pos),
}
}
}
impl std::io::Write for FsFileAdapter {
#[inline]
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
match self {
Self::Real(file) => file.write(buf),
Self::Vfs(_) => Err(not_supported("writing files")),
}
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
match self {
Self::Real(file) => file.flush(),
Self::Vfs(_) => Err(not_supported("writing files")),
}
}
}
impl sys_traits::FsFileSetLen for FsFileAdapter {
#[inline]
fn fs_file_set_len(&mut self, len: u64) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_set_len(len),
Self::Vfs(_) => Err(not_supported("setting file length")),
}
}
}
impl sys_traits::FsFileSetTimes for FsFileAdapter {
fn fs_file_set_times(
&mut self,
times: sys_traits::FsFileTimes,
) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_set_times(times),
Self::Vfs(_) => Err(not_supported("setting file times")),
}
}
}
impl sys_traits::FsFileLock for FsFileAdapter {
fn fs_file_lock(
&mut self,
mode: sys_traits::FsFileLockMode,
) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_lock(mode),
Self::Vfs(_) => Err(not_supported("locking files")),
}
}
fn fs_file_try_lock(
&mut self,
mode: sys_traits::FsFileLockMode,
) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_try_lock(mode),
Self::Vfs(_) => Err(not_supported("locking files")),
}
}
fn fs_file_unlock(&mut self) -> std::io::Result<()> {
match self {
Self::Real(file) => file.fs_file_unlock(),
Self::Vfs(_) => Err(not_supported("unlocking files")),
}
}
}
impl sys_traits::FsFileIsTerminal for FsFileAdapter {
#[inline]
fn fs_file_is_terminal(&self) -> bool {
match self {
Self::Real(file) => file.fs_file_is_terminal(),
Self::Vfs(_) => false,
}
}
}
impl sys_traits::BaseFsOpen for DenoRtSys {
type File = FsFileAdapter;
fn base_fs_open(
&self,
path: &Path,
options: &sys_traits::OpenOptions,
) -> std::io::Result<Self::File> {
if self.0.is_path_within(path) {
Ok(FsFileAdapter::Vfs(self.0.open_file(path)?))
} else {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
Ok(FsFileAdapter::Real(
sys_traits::impls::RealSys.base_fs_open(path, options)?,
))
}
}
}
impl sys_traits::BaseFsSymlinkDir for DenoRtSys {
fn base_fs_symlink_dir(&self, src: &Path, dst: &Path) -> std::io::Result<()> {
self
.symlink_sync(
// PERMISSIONS: this is ok because JS code will never use sys_traits. Probably
// we should flip this so that the `deno_fs::FileSystem` implementation uses `sys_traits`
// rather than this calling into `deno_fs::FileSystem`
&CheckedPath::unsafe_new(Cow::Borrowed(src)),
&CheckedPath::unsafe_new(Cow::Borrowed(dst)),
Some(FsFileType::Directory),
)
.map_err(|err| err.into_io_error())
}
}
impl sys_traits::SystemRandom for DenoRtSys {
#[inline]
fn sys_random(&self, buf: &mut [u8]) -> std::io::Result<()> {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.sys_random(buf)
}
}
impl sys_traits::SystemTimeNow for DenoRtSys {
#[inline]
fn sys_time_now(&self) -> SystemTime {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.sys_time_now()
}
}
impl sys_traits::ThreadSleep for DenoRtSys {
#[inline]
fn thread_sleep(&self, dur: Duration) {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.thread_sleep(dur)
}
}
impl sys_traits::EnvCurrentDir for DenoRtSys {
#[inline]
fn env_current_dir(&self) -> std::io::Result<PathBuf> {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.env_current_dir()
}
}
impl sys_traits::EnvHomeDir for DenoRtSys {
#[inline]
fn env_home_dir(&self) -> Option<PathBuf> {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.env_home_dir()
}
}
impl sys_traits::BaseEnvVar for DenoRtSys {
fn base_env_var_os(
&self,
key: &std::ffi::OsStr,
) -> Option<std::ffi::OsString> {
#[allow(clippy::disallowed_types)] // ok because we're implementing the fs
sys_traits::impls::RealSys.base_env_var_os(key)
}
}
#[derive(Debug)]
pub struct VfsRoot {
pub dir: VirtualDirectory,
pub root_path: PathBuf,
pub start_file_offset: u64,
}
impl VfsRoot {
fn find_entry<'a>(
&'a self,
path: &Path,
case_sensitivity: FileSystemCaseSensitivity,
) -> std::io::Result<(PathBuf, VfsEntryRef<'a>)> {
self.find_entry_inner(path, &mut HashSet::new(), case_sensitivity)
}
fn find_entry_inner<'a>(
&'a self,
path: &Path,
seen: &mut HashSet<PathBuf>,
case_sensitivity: FileSystemCaseSensitivity,
) -> std::io::Result<(PathBuf, VfsEntryRef<'a>)> {
let mut path = Cow::Borrowed(path);
loop {
let (resolved_path, entry) =
self.find_entry_no_follow_inner(&path, seen, case_sensitivity)?;
match entry {
VfsEntryRef::Symlink(symlink) => {
if !seen.insert(path.to_path_buf()) {
return Err(std::io::Error::other("circular symlinks"));
}
path = Cow::Owned(symlink.resolve_dest_from_root(&self.root_path));
}
_ => {
return Ok((resolved_path, entry));
}
}
}
}
fn find_entry_no_follow(
&self,
path: &Path,
case_sensitivity: FileSystemCaseSensitivity,
) -> std::io::Result<(PathBuf, VfsEntryRef<'_>)> {
self.find_entry_no_follow_inner(path, &mut HashSet::new(), case_sensitivity)
}
fn find_entry_no_follow_inner<'a>(
&'a self,
path: &Path,
seen: &mut HashSet<PathBuf>,
case_sensitivity: FileSystemCaseSensitivity,
) -> std::io::Result<(PathBuf, VfsEntryRef<'a>)> {
let relative_path = match path.strip_prefix(&self.root_path) {
Ok(p) => p,
Err(_) => {
return Err(std::io::Error::new(
std::io::ErrorKind::NotFound,
"path not found",
));
}
};
let mut final_path = self.root_path.clone();
let mut current_entry = VfsEntryRef::Dir(&self.dir);
for component in relative_path.components() {
let component = component.as_os_str();
let current_dir = match current_entry {
VfsEntryRef::Dir(dir) => {
final_path.push(component);
dir
}
VfsEntryRef::Symlink(symlink) => {
let dest = symlink.resolve_dest_from_root(&self.root_path);
let (resolved_path, entry) =
self.find_entry_inner(&dest, seen, case_sensitivity)?;
final_path = resolved_path; // overwrite with the new resolved path
match entry {
VfsEntryRef::Dir(dir) => {
final_path.push(component);
dir
}
_ => {
return Err(std::io::Error::new(
std::io::ErrorKind::NotFound,
"path not found",
));
}
}
}
_ => {
return Err(std::io::Error::new(
std::io::ErrorKind::NotFound,
"path not found",
));
}
};
let component = component.to_string_lossy();
current_entry = current_dir
.entries
.get_by_name(&component, case_sensitivity)
.ok_or_else(|| {
std::io::Error::new(std::io::ErrorKind::NotFound, "path not found")
})?
.as_ref();
}
Ok((final_path, current_entry))
}
}
pub struct FileBackedVfsFile {
file: VirtualFile,
pos: RefCell<u64>,
vfs: Arc<FileBackedVfs>,
}
impl FileBackedVfsFile {
pub fn seek(&self, pos: SeekFrom) -> std::io::Result<u64> {
match pos {
SeekFrom::Start(pos) => {
*self.pos.borrow_mut() = pos;
Ok(pos)
}
SeekFrom::End(offset) => {
if offset < 0 && -offset as u64 > self.file.offset.len {
let msg = "An attempt was made to move the file pointer before the beginning of the file.";
Err(std::io::Error::new(
std::io::ErrorKind::PermissionDenied,
msg,
))
} else {
let mut current_pos = self.pos.borrow_mut();
*current_pos = if offset >= 0 {
self.file.offset.len - (offset as u64)
} else {
self.file.offset.len + (-offset as u64)
};
Ok(*current_pos)
}
}
SeekFrom::Current(offset) => {
let mut current_pos = self.pos.borrow_mut();
if offset >= 0 {
*current_pos += offset as u64;
} else if -offset as u64 > *current_pos {
return Err(std::io::Error::new(
std::io::ErrorKind::PermissionDenied,
"An attempt was made to move the file pointer before the beginning of the file.",
));
} else {
*current_pos -= -offset as u64;
}
Ok(*current_pos)
}
}
}
pub fn read_to_buf(&self, buf: &mut [u8]) -> std::io::Result<usize> {
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/code_cache.rs | cli/rt/code_cache.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::collections::HashMap;
use std::io::BufReader;
use std::io::BufWriter;
use std::io::Read;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::parking_lot::Mutex;
use deno_core::unsync::sync::AtomicFlag;
use deno_lib::util::hash::FastInsecureHasher;
use deno_path_util::get_atomic_path;
use deno_runtime::code_cache::CodeCache;
use deno_runtime::code_cache::CodeCacheType;
use url::Url;
enum CodeCacheStrategy {
FirstRun(FirstRunCodeCacheStrategy),
SubsequentRun(SubsequentRunCodeCacheStrategy),
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DenoCompileCodeCacheEntry {
pub source_hash: u64,
pub data: Vec<u8>,
}
pub struct DenoCompileCodeCache {
strategy: CodeCacheStrategy,
}
impl DenoCompileCodeCache {
pub fn new(file_path: PathBuf, cache_key: u64) -> Self {
// attempt to deserialize the cache data
match deserialize(&file_path, cache_key) {
Ok(data) => {
log::debug!(
"Loaded {} code cache entries from {}",
data.len(),
file_path.display()
);
Self {
strategy: CodeCacheStrategy::SubsequentRun(
SubsequentRunCodeCacheStrategy {
is_finished: AtomicFlag::lowered(),
data: Mutex::new(data),
},
),
}
}
Err(err) => {
log::debug!(
"Failed to deserialize code cache from {}: {:#}",
file_path.display(),
err
);
Self {
strategy: CodeCacheStrategy::FirstRun(FirstRunCodeCacheStrategy {
cache_key,
file_path,
is_finished: AtomicFlag::lowered(),
data: Mutex::new(FirstRunCodeCacheData {
cache: HashMap::new(),
add_count: 0,
}),
}),
}
}
}
}
pub fn for_deno_core(self: Arc<Self>) -> Arc<dyn CodeCache> {
self.clone()
}
pub fn enabled(&self) -> bool {
match &self.strategy {
CodeCacheStrategy::FirstRun(strategy) => {
!strategy.is_finished.is_raised()
}
CodeCacheStrategy::SubsequentRun(strategy) => {
!strategy.is_finished.is_raised()
}
}
}
}
impl CodeCache for DenoCompileCodeCache {
fn get_sync(
&self,
specifier: &Url,
code_cache_type: CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
match &self.strategy {
CodeCacheStrategy::FirstRun(strategy) => {
if !strategy.is_finished.is_raised() {
// we keep track of how many times the cache is requested
// then serialize the cache when we get that number of
// "set" calls
strategy.data.lock().add_count += 1;
}
None
}
CodeCacheStrategy::SubsequentRun(strategy) => {
if strategy.is_finished.is_raised() {
return None;
}
strategy.take_from_cache(specifier, code_cache_type, source_hash)
}
}
}
fn set_sync(
&self,
specifier: Url,
code_cache_type: CodeCacheType,
source_hash: u64,
bytes: &[u8],
) {
match &self.strategy {
CodeCacheStrategy::FirstRun(strategy) => {
if strategy.is_finished.is_raised() {
return;
}
let data_to_serialize = {
let mut data = strategy.data.lock();
data.cache.insert(
(specifier.to_string(), code_cache_type),
DenoCompileCodeCacheEntry {
source_hash,
data: bytes.to_vec(),
},
);
if data.add_count != 0 {
data.add_count -= 1;
}
if data.add_count == 0 {
// don't allow using the cache anymore
strategy.is_finished.raise();
if data.cache.is_empty() {
None
} else {
Some(std::mem::take(&mut data.cache))
}
} else {
None
}
};
if let Some(cache_data) = &data_to_serialize {
strategy.write_cache_data(cache_data);
}
}
CodeCacheStrategy::SubsequentRun(_) => {
// do nothing
}
}
}
}
type CodeCacheKey = (String, CodeCacheType);
struct FirstRunCodeCacheData {
cache: HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
add_count: usize,
}
struct FirstRunCodeCacheStrategy {
cache_key: u64,
file_path: PathBuf,
is_finished: AtomicFlag,
data: Mutex<FirstRunCodeCacheData>,
}
impl FirstRunCodeCacheStrategy {
fn write_cache_data(
&self,
cache_data: &HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
) {
let count = cache_data.len();
let temp_file =
get_atomic_path(&sys_traits::impls::RealSys, &self.file_path);
match serialize(&temp_file, self.cache_key, cache_data) {
Ok(()) => {
if let Err(err) = std::fs::rename(&temp_file, &self.file_path) {
log::debug!("Failed to rename code cache: {}", err);
let _ = std::fs::remove_file(&temp_file);
} else {
log::debug!("Serialized {} code cache entries", count);
}
}
Err(err) => {
let _ = std::fs::remove_file(&temp_file);
log::debug!("Failed to serialize code cache: {}", err);
}
}
}
}
struct SubsequentRunCodeCacheStrategy {
is_finished: AtomicFlag,
data: Mutex<HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>>,
}
impl SubsequentRunCodeCacheStrategy {
fn take_from_cache(
&self,
specifier: &Url,
code_cache_type: CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
let mut data = self.data.lock();
// todo(dsherret): how to avoid the clone here?
let entry = data.remove(&(specifier.to_string(), code_cache_type))?;
if entry.source_hash != source_hash {
return None;
}
if data.is_empty() {
self.is_finished.raise();
}
Some(entry.data)
}
}
/// File format:
/// - <header>
/// - <cache key>
/// - <u32: number of entries>
/// - <[entry length]> - u64 * number of entries
/// - <[entry]>
/// - <[u8]: entry data>
/// - <String: specifier>
/// - <u8>: code cache type
/// - <u32: specifier length>
/// - <u64: source hash>
/// - <u64: entry data hash>
fn serialize(
file_path: &Path,
cache_key: u64,
cache: &HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
) -> Result<(), AnyError> {
let cache_file = std::fs::OpenOptions::new()
.create(true)
.truncate(true)
.write(true)
.open(file_path)?;
let mut writer = BufWriter::new(cache_file);
serialize_with_writer(&mut writer, cache_key, cache)
}
fn serialize_with_writer<T: Write>(
writer: &mut BufWriter<T>,
cache_key: u64,
cache: &HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>,
) -> Result<(), AnyError> {
// header
writer.write_all(&cache_key.to_le_bytes())?;
writer.write_all(&(cache.len() as u32).to_le_bytes())?;
// lengths of each entry
for ((specifier, _), entry) in cache {
let len: u64 =
entry.data.len() as u64 + specifier.len() as u64 + 1 + 4 + 8 + 8;
writer.write_all(&len.to_le_bytes())?;
}
// entries
for ((specifier, code_cache_type), entry) in cache {
writer.write_all(&entry.data)?;
writer.write_all(&[match code_cache_type {
CodeCacheType::EsModule => 0,
CodeCacheType::Script => 1,
}])?;
writer.write_all(specifier.as_bytes())?;
writer.write_all(&(specifier.len() as u32).to_le_bytes())?;
writer.write_all(&entry.source_hash.to_le_bytes())?;
let hash: u64 = FastInsecureHasher::new_without_deno_version()
.write(&entry.data)
.finish();
writer.write_all(&hash.to_le_bytes())?;
}
writer.flush()?;
Ok(())
}
fn deserialize(
file_path: &Path,
expected_cache_key: u64,
) -> Result<HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>, AnyError> {
let cache_file = std::fs::File::open(file_path)?;
let mut reader = BufReader::new(cache_file);
deserialize_with_reader(&mut reader, expected_cache_key)
}
fn deserialize_with_reader<T: Read>(
reader: &mut BufReader<T>,
expected_cache_key: u64,
) -> Result<HashMap<CodeCacheKey, DenoCompileCodeCacheEntry>, AnyError> {
// it's very important to use this below so that a corrupt cache file
// doesn't cause a memory allocation error
fn new_vec_sized<T: Clone>(
capacity: usize,
default_value: T,
) -> Result<Vec<T>, AnyError> {
let mut vec = Vec::new();
vec.try_reserve(capacity)?;
vec.resize(capacity, default_value);
Ok(vec)
}
fn try_subtract(a: usize, b: usize) -> Result<usize, AnyError> {
if a < b {
bail!("Integer underflow");
}
Ok(a - b)
}
let mut header_bytes = vec![0; 8 + 4];
reader.read_exact(&mut header_bytes)?;
let actual_cache_key = u64::from_le_bytes(header_bytes[..8].try_into()?);
if actual_cache_key != expected_cache_key {
// cache bust
bail!("Cache key mismatch");
}
let len = u32::from_le_bytes(header_bytes[8..].try_into()?) as usize;
// read the lengths for each entry found in the file
let entry_len_bytes_capacity = len * 8;
let mut entry_len_bytes = new_vec_sized(entry_len_bytes_capacity, 0)?;
reader.read_exact(&mut entry_len_bytes)?;
let mut lengths = Vec::new();
lengths.try_reserve(len)?;
for i in 0..len {
let pos = i * 8;
lengths.push(
u64::from_le_bytes(entry_len_bytes[pos..pos + 8].try_into()?) as usize,
);
}
let mut map = HashMap::new();
map.try_reserve(len)?;
for len in lengths {
let mut buffer = new_vec_sized(len, 0)?;
reader.read_exact(&mut buffer)?;
let entry_data_hash_start_pos = try_subtract(buffer.len(), 8)?;
let expected_entry_data_hash =
u64::from_le_bytes(buffer[entry_data_hash_start_pos..].try_into()?);
let source_hash_start_pos = try_subtract(entry_data_hash_start_pos, 8)?;
let source_hash = u64::from_le_bytes(
buffer[source_hash_start_pos..entry_data_hash_start_pos].try_into()?,
);
let specifier_end_pos = try_subtract(source_hash_start_pos, 4)?;
let specifier_len = u32::from_le_bytes(
buffer[specifier_end_pos..source_hash_start_pos].try_into()?,
) as usize;
let specifier_start_pos = try_subtract(specifier_end_pos, specifier_len)?;
let specifier = String::from_utf8(
buffer[specifier_start_pos..specifier_end_pos].to_vec(),
)?;
let code_cache_type_pos = try_subtract(specifier_start_pos, 1)?;
let code_cache_type = match buffer[code_cache_type_pos] {
0 => CodeCacheType::EsModule,
1 => CodeCacheType::Script,
_ => bail!("Invalid code cache type"),
};
buffer.truncate(code_cache_type_pos);
let actual_entry_data_hash: u64 =
FastInsecureHasher::new_without_deno_version()
.write(&buffer)
.finish();
if expected_entry_data_hash != actual_entry_data_hash {
bail!("Hash mismatch.")
}
map.insert(
(specifier, code_cache_type),
DenoCompileCodeCacheEntry {
source_hash,
data: buffer,
},
);
}
Ok(map)
}
#[cfg(test)]
mod test {
use test_util::TempDir;
use super::*;
#[test]
fn serialize_deserialize() {
let cache_key = 123456;
let cache = {
let mut cache = HashMap::new();
cache.insert(
("specifier1".to_string(), CodeCacheType::EsModule),
DenoCompileCodeCacheEntry {
source_hash: 1,
data: vec![1, 2, 3],
},
);
cache.insert(
("specifier2".to_string(), CodeCacheType::EsModule),
DenoCompileCodeCacheEntry {
source_hash: 2,
data: vec![4, 5, 6],
},
);
cache.insert(
("specifier2".to_string(), CodeCacheType::Script),
DenoCompileCodeCacheEntry {
source_hash: 2,
data: vec![6, 5, 1],
},
);
cache
};
let mut buffer = Vec::new();
serialize_with_writer(&mut BufWriter::new(&mut buffer), cache_key, &cache)
.unwrap();
let deserialized =
deserialize_with_reader(&mut BufReader::new(&buffer[..]), cache_key)
.unwrap();
assert_eq!(cache, deserialized);
}
#[test]
fn serialize_deserialize_empty() {
let cache_key = 1234;
let cache = HashMap::new();
let mut buffer = Vec::new();
serialize_with_writer(&mut BufWriter::new(&mut buffer), cache_key, &cache)
.unwrap();
let deserialized =
deserialize_with_reader(&mut BufReader::new(&buffer[..]), cache_key)
.unwrap();
assert_eq!(cache, deserialized);
}
#[test]
fn serialize_deserialize_corrupt() {
let buffer = "corrupttestingtestingtesting".as_bytes().to_vec();
let err = deserialize_with_reader(&mut BufReader::new(&buffer[..]), 1234)
.unwrap_err();
assert_eq!(err.to_string(), "Cache key mismatch");
}
#[test]
fn code_cache() {
let temp_dir = TempDir::new();
let file_path = temp_dir.path().join("cache.bin").to_path_buf();
let url1 = Url::parse("https://deno.land/example1.js").unwrap();
let url2 = Url::parse("https://deno.land/example2.js").unwrap();
// first run
{
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 1234);
assert!(
code_cache
.get_sync(&url1, CodeCacheType::EsModule, 0)
.is_none()
);
assert!(
code_cache
.get_sync(&url2, CodeCacheType::EsModule, 1)
.is_none()
);
assert!(code_cache.enabled());
code_cache.set_sync(url1.clone(), CodeCacheType::EsModule, 0, &[1, 2, 3]);
assert!(code_cache.enabled());
assert!(!file_path.exists());
code_cache.set_sync(url2.clone(), CodeCacheType::EsModule, 1, &[2, 1, 3]);
assert!(file_path.exists()); // now the new code cache exists
assert!(!code_cache.enabled()); // no longer enabled
}
// second run
{
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 1234);
assert!(code_cache.enabled());
let result1 = code_cache
.get_sync(&url1, CodeCacheType::EsModule, 0)
.unwrap();
assert!(code_cache.enabled());
let result2 = code_cache
.get_sync(&url2, CodeCacheType::EsModule, 1)
.unwrap();
assert!(!code_cache.enabled()); // no longer enabled
assert_eq!(result1, vec![1, 2, 3]);
assert_eq!(result2, vec![2, 1, 3]);
}
// new cache key first run
{
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 54321);
assert!(
code_cache
.get_sync(&url1, CodeCacheType::EsModule, 0)
.is_none()
);
assert!(
code_cache
.get_sync(&url2, CodeCacheType::EsModule, 1)
.is_none()
);
code_cache.set_sync(url1.clone(), CodeCacheType::EsModule, 0, &[2, 2, 3]);
code_cache.set_sync(url2.clone(), CodeCacheType::EsModule, 1, &[3, 2, 3]);
}
// new cache key second run
{
let code_cache = DenoCompileCodeCache::new(file_path.clone(), 54321);
let result1 = code_cache
.get_sync(&url1, CodeCacheType::EsModule, 0)
.unwrap();
assert_eq!(result1, vec![2, 2, 3]);
assert!(
code_cache
.get_sync(&url2, CodeCacheType::EsModule, 5) // different hash will cause none
.is_none()
);
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/build.rs | cli/rt/build.rs | // Copyright 2018-2025 the Deno authors. MIT license.
fn main() {
// Skip building from docs.rs.
if std::env::var_os("DOCS_RS").is_some() {
return;
}
deno_runtime::deno_napi::print_linker_flags("denort");
deno_runtime::deno_webgpu::print_linker_flags("denort");
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/run.rs | cli/rt/run.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::OnceLock;
use deno_cache_dir::npm::NpmCacheDir;
use deno_config::workspace::ResolverWorkspaceJsrPackage;
use deno_core::FastString;
use deno_core::ModuleLoadOptions;
use deno_core::ModuleLoadReferrer;
use deno_core::ModuleLoader;
use deno_core::ModuleSourceCode;
use deno_core::ModuleType;
use deno_core::RequestedModuleType;
use deno_core::ResolutionKind;
use deno_core::SourceCodeCacheInfo;
use deno_core::error::AnyError;
use deno_core::error::ModuleLoaderError;
use deno_core::futures::FutureExt;
use deno_core::futures::future::LocalBoxFuture;
use deno_core::url::Url;
use deno_core::v8_set_flags;
use deno_error::JsErrorBox;
use deno_lib::args::CaData;
use deno_lib::args::RootCertStoreLoadError;
use deno_lib::args::get_root_cert_store;
use deno_lib::args::npm_pkg_req_ref_to_binary_command;
use deno_lib::loader::as_deno_resolver_requested_module_type;
use deno_lib::loader::loaded_module_source_to_module_source_code;
use deno_lib::loader::module_type_from_media_and_requested_type;
use deno_lib::npm::NpmRegistryReadPermissionChecker;
use deno_lib::npm::NpmRegistryReadPermissionCheckerMode;
use deno_lib::npm::create_npm_process_state_provider;
use deno_lib::standalone::binary::NodeModules;
use deno_lib::util::hash::FastInsecureHasher;
use deno_lib::util::text_encoding::from_utf8_lossy_cow;
use deno_lib::util::text_encoding::from_utf8_lossy_owned;
use deno_lib::util::v8::construct_v8_flags;
use deno_lib::worker::CreateModuleLoaderResult;
use deno_lib::worker::LibMainWorkerFactory;
use deno_lib::worker::LibMainWorkerOptions;
use deno_lib::worker::ModuleLoaderFactory;
use deno_lib::worker::StorageKeyResolver;
use deno_media_type::MediaType;
use deno_npm::npm_rc::ResolvedNpmRc;
use deno_npm::resolution::NpmResolutionSnapshot;
use deno_package_json::PackageJsonDepValue;
use deno_resolver::DenoResolveErrorKind;
use deno_resolver::cjs::CjsTracker;
use deno_resolver::cjs::IsCjsResolutionMode;
use deno_resolver::loader::NpmModuleLoader;
use deno_resolver::npm::ByonmNpmResolverCreateOptions;
use deno_resolver::npm::CreateInNpmPkgCheckerOptions;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_resolver::npm::NpmReqResolver;
use deno_resolver::npm::NpmReqResolverOptions;
use deno_resolver::npm::NpmResolver;
use deno_resolver::npm::NpmResolverCreateOptions;
use deno_resolver::npm::managed::ManagedInNpmPkgCheckerCreateOptions;
use deno_resolver::npm::managed::ManagedNpmResolverCreateOptions;
use deno_resolver::npm::managed::NpmResolutionCell;
use deno_resolver::workspace::MappedResolution;
use deno_resolver::workspace::SloppyImportsOptions;
use deno_resolver::workspace::WorkspaceResolver;
use deno_runtime::FeatureChecker;
use deno_runtime::WorkerExecutionMode;
use deno_runtime::WorkerLogLevel;
use deno_runtime::code_cache::CodeCache;
use deno_runtime::deno_fs::FileSystem;
use deno_runtime::deno_node::NodeRequireLoader;
use deno_runtime::deno_node::create_host_defined_options;
use deno_runtime::deno_permissions::Permissions;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_runtime::deno_tls::RootCertStoreProvider;
use deno_runtime::deno_tls::rustls::RootCertStore;
use deno_runtime::deno_web::BlobStore;
use deno_runtime::permissions::RuntimePermissionDescriptorParser;
use deno_semver::npm::NpmPackageReqReference;
use node_resolver::DenoIsBuiltInNodeModuleChecker;
use node_resolver::NodeResolutionKind;
use node_resolver::NodeResolver;
use node_resolver::PackageJsonResolver;
use node_resolver::PackageJsonThreadLocalCache;
use node_resolver::ResolutionMode;
use node_resolver::analyze::CjsModuleExportAnalyzer;
use node_resolver::analyze::NodeCodeTranslator;
use node_resolver::cache::NodeResolutionSys;
use node_resolver::errors::PackageJsonLoadError;
use crate::binary::DenoCompileModuleSource;
use crate::binary::StandaloneData;
use crate::binary::StandaloneModules;
use crate::code_cache::DenoCompileCodeCache;
use crate::file_system::DenoRtSys;
use crate::file_system::FileBackedVfs;
use crate::node::CjsCodeAnalyzer;
use crate::node::DenoRtCjsTracker;
use crate::node::DenoRtNodeCodeTranslator;
use crate::node::DenoRtNodeResolver;
use crate::node::DenoRtNpmModuleLoader;
use crate::node::DenoRtNpmReqResolver;
struct SharedModuleLoaderState {
cjs_tracker: Arc<DenoRtCjsTracker>,
code_cache: Option<Arc<DenoCompileCodeCache>>,
modules: Arc<StandaloneModules>,
node_code_translator: Arc<DenoRtNodeCodeTranslator>,
node_resolver: Arc<DenoRtNodeResolver>,
npm_module_loader: Arc<DenoRtNpmModuleLoader>,
npm_registry_permission_checker: NpmRegistryReadPermissionChecker<DenoRtSys>,
npm_req_resolver: Arc<DenoRtNpmReqResolver>,
vfs: Arc<FileBackedVfs>,
workspace_resolver: WorkspaceResolver<DenoRtSys>,
}
impl SharedModuleLoaderState {
fn get_code_cache(
&self,
specifier: &Url,
source: &[u8],
) -> Option<SourceCodeCacheInfo> {
let Some(code_cache) = &self.code_cache else {
return None;
};
if !code_cache.enabled() {
return None;
}
// deno version is already included in the root cache key
let hash = FastInsecureHasher::new_without_deno_version()
.write_hashable(source)
.finish();
let data = code_cache.get_sync(
specifier,
deno_runtime::code_cache::CodeCacheType::EsModule,
hash,
);
Some(SourceCodeCacheInfo {
hash,
data: data.map(Cow::Owned),
})
}
}
#[derive(Clone)]
struct EmbeddedModuleLoader {
shared: Arc<SharedModuleLoaderState>,
sys: DenoRtSys,
}
impl std::fmt::Debug for EmbeddedModuleLoader {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("EmbeddedModuleLoader").finish()
}
}
impl ModuleLoader for EmbeddedModuleLoader {
fn resolve(
&self,
raw_specifier: &str,
referrer: &str,
_kind: ResolutionKind,
) -> Result<Url, ModuleLoaderError> {
let referrer = if referrer == "." {
let current_dir = std::env::current_dir().unwrap();
deno_core::resolve_path(".", ¤t_dir)
.map_err(JsErrorBox::from_err)?
} else {
Url::parse(referrer).map_err(|err| {
JsErrorBox::type_error(format!(
"Referrer uses invalid specifier: {}",
err
))
})?
};
let resolution_mode = if self
.shared
.cjs_tracker
.is_maybe_cjs(&referrer, MediaType::from_specifier(&referrer))
.map_err(JsErrorBox::from_err)?
{
ResolutionMode::Require
} else {
ResolutionMode::Import
};
if self.shared.node_resolver.in_npm_package(&referrer) {
return self
.shared
.node_resolver
.resolve(
raw_specifier,
&referrer,
resolution_mode,
NodeResolutionKind::Execution,
)
.and_then(|res| res.into_url())
.map_err(JsErrorBox::from_err);
}
let mapped_resolution = self.shared.workspace_resolver.resolve(
raw_specifier,
&referrer,
deno_resolver::workspace::ResolutionKind::Execution,
);
match mapped_resolution {
Ok(MappedResolution::WorkspaceJsrPackage { specifier, .. }) => {
Ok(specifier)
}
Ok(MappedResolution::WorkspaceNpmPackage {
target_pkg_json: pkg_json,
sub_path,
..
}) => Ok(
self
.shared
.node_resolver
.resolve_package_subpath_from_deno_module(
pkg_json.dir_path(),
sub_path.as_deref(),
Some(&referrer),
resolution_mode,
NodeResolutionKind::Execution,
)
.map_err(JsErrorBox::from_err)
.and_then(|url_or_path| {
url_or_path.into_url().map_err(JsErrorBox::from_err)
})?,
),
Ok(MappedResolution::PackageJson {
dep_result,
sub_path,
alias,
..
}) => match dep_result
.as_ref()
.map_err(|e| JsErrorBox::from_err(e.clone()))?
{
PackageJsonDepValue::File(_) => Err(JsErrorBox::from_err(
DenoResolveErrorKind::UnsupportedPackageJsonFileSpecifier.into_box(),
)),
PackageJsonDepValue::JsrReq(_) => Err(JsErrorBox::from_err(
DenoResolveErrorKind::UnsupportedPackageJsonJsrReq.into_box(),
)),
PackageJsonDepValue::Req(req) => Ok(
self
.shared
.npm_req_resolver
.resolve_req_with_sub_path(
req,
sub_path.as_deref(),
&referrer,
resolution_mode,
NodeResolutionKind::Execution,
)
.map_err(JsErrorBox::from_err)
.and_then(|url_or_path| {
url_or_path.into_url().map_err(JsErrorBox::from_err)
})?,
),
PackageJsonDepValue::Workspace(version_req) => {
let pkg_folder = self
.shared
.workspace_resolver
.resolve_workspace_pkg_json_folder_for_pkg_json_dep(
alias,
version_req,
)
.map_err(JsErrorBox::from_err)?;
Ok(
self
.shared
.node_resolver
.resolve_package_subpath_from_deno_module(
pkg_folder,
sub_path.as_deref(),
Some(&referrer),
resolution_mode,
NodeResolutionKind::Execution,
)
.map_err(JsErrorBox::from_err)
.and_then(|url_or_path| {
url_or_path.into_url().map_err(JsErrorBox::from_err)
})?,
)
}
},
Ok(MappedResolution::PackageJsonImport { pkg_json }) => self
.shared
.node_resolver
.resolve_package_import(
raw_specifier,
Some(&node_resolver::UrlOrPathRef::from_url(&referrer)),
Some(pkg_json),
resolution_mode,
NodeResolutionKind::Execution,
)
.map_err(JsErrorBox::from_err)
.and_then(|url_or_path| {
url_or_path.into_url().map_err(JsErrorBox::from_err)
}),
Ok(MappedResolution::Normal { specifier, .. }) => {
if let Ok(reference) =
NpmPackageReqReference::from_specifier(&specifier)
{
return self
.shared
.npm_req_resolver
.resolve_req_reference(
&reference,
&referrer,
resolution_mode,
NodeResolutionKind::Execution,
)
.map_err(JsErrorBox::from_err)
.and_then(|url_or_path| {
url_or_path.into_url().map_err(JsErrorBox::from_err)
});
}
if specifier.scheme() == "jsr"
&& let Some(specifier) = self
.shared
.modules
.resolve_specifier(&specifier)
.map_err(JsErrorBox::from_err)?
{
return Ok(specifier.clone());
}
Ok(
self
.shared
.node_resolver
.handle_if_in_node_modules(&specifier)
.unwrap_or(specifier),
)
}
Err(err)
if err.is_unmapped_bare_specifier() && referrer.scheme() == "file" =>
{
let maybe_res = self
.shared
.npm_req_resolver
.resolve_if_for_npm_pkg(
raw_specifier,
&referrer,
resolution_mode,
NodeResolutionKind::Execution,
)
.map_err(JsErrorBox::from_err)?;
if let Some(res) = maybe_res {
return res.into_url().map_err(JsErrorBox::from_err);
}
Err(JsErrorBox::from_err(err))
}
Err(err) => Err(JsErrorBox::from_err(err)),
}
}
fn get_host_defined_options<'s>(
&self,
scope: &mut deno_core::v8::PinScope<'s, '_>,
name: &str,
) -> Option<deno_core::v8::Local<'s, deno_core::v8::Data>> {
let name = Url::parse(name).ok()?;
if self.shared.node_resolver.in_npm_package(&name) {
Some(create_host_defined_options(scope))
} else {
None
}
}
fn load(
&self,
original_specifier: &Url,
maybe_referrer: Option<&ModuleLoadReferrer>,
options: ModuleLoadOptions,
) -> deno_core::ModuleLoadResponse {
if original_specifier.scheme() == "data" {
let data_url_text =
match deno_media_type::data_url::RawDataUrl::parse(original_specifier)
.and_then(|url| url.decode())
{
Ok(response) => response,
Err(err) => {
return deno_core::ModuleLoadResponse::Sync(Err(
JsErrorBox::type_error(format!("{:#}", err)),
));
}
};
return deno_core::ModuleLoadResponse::Sync(Ok(
deno_core::ModuleSource::new(
deno_core::ModuleType::JavaScript,
ModuleSourceCode::String(data_url_text.into()),
original_specifier,
None,
),
));
}
if self.shared.node_resolver.in_npm_package(original_specifier) {
let shared = self.shared.clone();
let original_specifier = original_specifier.clone();
let maybe_referrer = maybe_referrer.map(|r| r.specifier.clone());
return deno_core::ModuleLoadResponse::Async(
async move {
let code_source = shared
.npm_module_loader
.load(
Cow::Borrowed(&original_specifier),
maybe_referrer.as_ref(),
&as_deno_resolver_requested_module_type(
&options.requested_module_type,
),
)
.await
.map_err(JsErrorBox::from_err)?;
let code_cache_entry = match options.requested_module_type {
RequestedModuleType::None => shared.get_code_cache(
&code_source.specifier,
code_source.source.as_bytes(),
),
RequestedModuleType::Other(_)
| RequestedModuleType::Json
| RequestedModuleType::Text
| RequestedModuleType::Bytes => None,
};
Ok(deno_core::ModuleSource::new_with_redirect(
module_type_from_media_and_requested_type(
code_source.media_type,
&options.requested_module_type,
),
loaded_module_source_to_module_source_code(code_source.source),
&original_specifier,
&code_source.specifier,
code_cache_entry,
))
}
.boxed_local(),
);
}
match self.shared.modules.read(original_specifier) {
Ok(Some(module)) => {
match options.requested_module_type {
RequestedModuleType::Text | RequestedModuleType::Bytes => {
let module_source = DenoCompileModuleSource::Bytes(module.data);
return deno_core::ModuleLoadResponse::Sync(Ok(
deno_core::ModuleSource::new_with_redirect(
match options.requested_module_type {
RequestedModuleType::Text => ModuleType::Text,
RequestedModuleType::Bytes => ModuleType::Bytes,
_ => unreachable!(),
},
match options.requested_module_type {
RequestedModuleType::Text => module_source.into_for_v8(),
RequestedModuleType::Bytes => {
ModuleSourceCode::Bytes(module_source.into_bytes_for_v8())
}
_ => unreachable!(),
},
original_specifier,
module.specifier,
None,
),
));
}
RequestedModuleType::Other(_)
| RequestedModuleType::None
| RequestedModuleType::Json => {
// ignore
}
}
let media_type = module.media_type;
let (module_specifier, module_type, module_source) =
module.into_parts();
let is_maybe_cjs = match self
.shared
.cjs_tracker
.is_maybe_cjs(original_specifier, media_type)
{
Ok(is_maybe_cjs) => is_maybe_cjs,
Err(err) => {
return deno_core::ModuleLoadResponse::Sync(Err(
JsErrorBox::type_error(format!("{:?}", err)),
));
}
};
if is_maybe_cjs {
let original_specifier = original_specifier.clone();
let module_specifier = module_specifier.clone();
let shared = self.shared.clone();
deno_core::ModuleLoadResponse::Async(
async move {
let source = match module_source {
DenoCompileModuleSource::String(string) => {
Cow::Borrowed(string)
}
DenoCompileModuleSource::Bytes(module_code_bytes) => {
match module_code_bytes {
Cow::Owned(bytes) => {
Cow::Owned(from_utf8_lossy_owned(bytes))
}
Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes),
}
}
};
let source = shared
.node_code_translator
.translate_cjs_to_esm(&module_specifier, Some(source))
.await
.map_err(JsErrorBox::from_err)?;
let module_source = match source {
Cow::Owned(source) => ModuleSourceCode::String(source.into()),
Cow::Borrowed(source) => {
ModuleSourceCode::String(FastString::from_static(source))
}
};
let code_cache_entry = shared
.get_code_cache(&module_specifier, module_source.as_bytes());
Ok(deno_core::ModuleSource::new_with_redirect(
module_type,
module_source,
&original_specifier,
&module_specifier,
code_cache_entry,
))
}
.boxed_local(),
)
} else {
let module_source = module_source.into_for_v8();
let code_cache_entry = self
.shared
.get_code_cache(module_specifier, module_source.as_bytes());
deno_core::ModuleLoadResponse::Sync(Ok(
deno_core::ModuleSource::new_with_redirect(
module_type,
module_source,
original_specifier,
module_specifier,
code_cache_entry,
),
))
}
}
Ok(None) => {
deno_core::ModuleLoadResponse::Sync(Err(JsErrorBox::type_error(
format!("Module not found: {}", original_specifier),
)))
}
Err(err) => deno_core::ModuleLoadResponse::Sync(Err(
JsErrorBox::type_error(format!("{:?}", err)),
)),
}
}
fn code_cache_ready(
&self,
specifier: Url,
source_hash: u64,
code_cache_data: &[u8],
) -> LocalBoxFuture<'static, ()> {
if let Some(code_cache) = &self.shared.code_cache {
code_cache.set_sync(
specifier,
deno_runtime::code_cache::CodeCacheType::EsModule,
source_hash,
code_cache_data,
);
}
std::future::ready(()).boxed_local()
}
fn get_source_map(&self, file_name: &str) -> Option<Cow<'_, [u8]>> {
let url = Url::parse(file_name).ok()?;
let data = self.shared.modules.read(&url).ok()??;
data.source_map
}
fn load_external_source_map(
&self,
source_map_url: &str,
) -> Option<Cow<'_, [u8]>> {
let url = Url::parse(source_map_url).ok()?;
let data = self.shared.modules.read(&url).ok()??;
Some(Cow::Owned(data.data.to_vec()))
}
fn source_map_source_exists(&self, source_url: &str) -> Option<bool> {
use sys_traits::FsMetadata;
let specifier = Url::parse(source_url).ok()?;
// only bother checking this for npm packages that might depend on this
if self.shared.node_resolver.in_npm_package(&specifier)
&& let Ok(path) = deno_path_util::url_to_file_path(&specifier)
{
return self.sys.fs_is_file(path).ok();
}
Some(true)
}
fn get_source_mapped_source_line(
&self,
file_name: &str,
line_number: usize,
) -> Option<String> {
let specifier = Url::parse(file_name).ok()?;
let data = self.shared.modules.read(&specifier).ok()??;
let source = String::from_utf8_lossy(&data.data);
// Do NOT use .lines(): it skips the terminating empty line.
// (due to internally using_terminator() instead of .split())
let lines: Vec<&str> = source.split('\n').collect();
if line_number >= lines.len() {
Some(format!(
"{} Couldn't format source line: Line {} is out of bounds (source may have changed at runtime)",
crate::colors::yellow("Warning"),
line_number + 1,
))
} else {
Some(lines[line_number].to_string())
}
}
}
impl NodeRequireLoader for EmbeddedModuleLoader {
fn ensure_read_permission<'a>(
&self,
permissions: &mut PermissionsContainer,
path: Cow<'a, Path>,
) -> Result<Cow<'a, Path>, JsErrorBox> {
if self.shared.modules.has_file(&path) {
// allow reading if the file is in the snapshot
return Ok(path);
}
self
.shared
.npm_registry_permission_checker
.ensure_read_permission(permissions, path)
.map_err(JsErrorBox::from_err)
}
fn load_text_file_lossy(
&self,
path: &std::path::Path,
) -> Result<FastString, JsErrorBox> {
let file_entry = self
.shared
.vfs
.file_entry(path)
.map_err(JsErrorBox::from_err)?;
let file_bytes = self
.shared
.vfs
.read_file_offset_with_len(
file_entry.transpiled_offset.unwrap_or(file_entry.offset),
)
.map_err(JsErrorBox::from_err)?;
Ok(match from_utf8_lossy_cow(file_bytes) {
Cow::Borrowed(s) => FastString::from_static(s),
Cow::Owned(s) => s.into(),
})
}
fn is_maybe_cjs(
&self,
specifier: &Url,
) -> Result<bool, PackageJsonLoadError> {
let media_type = MediaType::from_specifier(specifier);
self.shared.cjs_tracker.is_maybe_cjs(specifier, media_type)
}
}
struct StandaloneModuleLoaderFactory {
shared: Arc<SharedModuleLoaderState>,
sys: DenoRtSys,
}
impl StandaloneModuleLoaderFactory {
pub fn create_result(&self) -> CreateModuleLoaderResult {
let loader = Rc::new(EmbeddedModuleLoader {
shared: self.shared.clone(),
sys: self.sys.clone(),
});
CreateModuleLoaderResult {
module_loader: loader.clone(),
node_require_loader: loader,
}
}
}
impl ModuleLoaderFactory for StandaloneModuleLoaderFactory {
fn create_for_main(
&self,
_root_permissions: PermissionsContainer,
) -> CreateModuleLoaderResult {
self.create_result()
}
fn create_for_worker(
&self,
_parent_permissions: PermissionsContainer,
_permissions: PermissionsContainer,
) -> CreateModuleLoaderResult {
self.create_result()
}
}
struct StandaloneRootCertStoreProvider {
ca_stores: Option<Vec<String>>,
ca_data: Option<CaData>,
cell: OnceLock<Result<RootCertStore, RootCertStoreLoadError>>,
}
impl RootCertStoreProvider for StandaloneRootCertStoreProvider {
fn get_or_try_init(&self) -> Result<&RootCertStore, JsErrorBox> {
self
.cell
// get_or_try_init was not stable yet when this was written
.get_or_init(|| {
get_root_cert_store(None, self.ca_stores.clone(), self.ca_data.clone())
})
.as_ref()
.map_err(|err| JsErrorBox::from_err(err.clone()))
}
}
pub async fn run(
fs: Arc<dyn FileSystem>,
sys: DenoRtSys,
data: StandaloneData,
) -> Result<i32, AnyError> {
let StandaloneData {
metadata,
modules,
npm_snapshot,
root_path,
vfs,
} = data;
let root_cert_store_provider = Arc::new(StandaloneRootCertStoreProvider {
ca_stores: metadata.ca_stores,
ca_data: metadata.ca_data.map(CaData::Bytes),
cell: Default::default(),
});
// use a dummy npm registry url
let npm_registry_url = Url::parse("https://localhost/").unwrap();
let root_dir_url = Arc::new(Url::from_directory_path(&root_path).unwrap());
let main_module = root_dir_url.join(&metadata.entrypoint_key).unwrap();
let npm_global_cache_dir = root_path.join(".deno_compile_node_modules");
let pkg_json_resolver = Arc::new(PackageJsonResolver::new(
sys.clone(),
Some(Arc::new(PackageJsonThreadLocalCache)),
));
let npm_registry_permission_checker = {
let mode = match &metadata.node_modules {
Some(NodeModules::Managed {
node_modules_dir: Some(path),
}) => NpmRegistryReadPermissionCheckerMode::Local(PathBuf::from(path)),
Some(NodeModules::Byonm { .. }) => {
NpmRegistryReadPermissionCheckerMode::Byonm
}
Some(NodeModules::Managed {
node_modules_dir: None,
})
| None => NpmRegistryReadPermissionCheckerMode::Global(
npm_global_cache_dir.clone(),
),
};
NpmRegistryReadPermissionChecker::new(sys.clone(), mode)
};
let node_resolution_sys = NodeResolutionSys::new(sys.clone(), None);
let (in_npm_pkg_checker, npm_resolver) = match metadata.node_modules {
Some(NodeModules::Managed { node_modules_dir }) => {
// create an npmrc that uses the fake npm_registry_url to resolve packages
let npmrc = Arc::new(ResolvedNpmRc {
default_config: deno_npm::npm_rc::RegistryConfigWithUrl {
registry_url: npm_registry_url.clone(),
config: Default::default(),
},
scopes: Default::default(),
registry_configs: Default::default(),
});
let npm_cache_dir = Arc::new(NpmCacheDir::new(
&sys,
npm_global_cache_dir,
npmrc.get_all_known_registries_urls(),
));
let snapshot = npm_snapshot.unwrap();
let maybe_node_modules_path = node_modules_dir
.map(|node_modules_dir| root_path.join(node_modules_dir));
let in_npm_pkg_checker =
DenoInNpmPackageChecker::new(CreateInNpmPkgCheckerOptions::Managed(
ManagedInNpmPkgCheckerCreateOptions {
root_cache_dir_url: npm_cache_dir.root_dir_url(),
maybe_node_modules_path: maybe_node_modules_path.as_deref(),
},
));
let npm_resolution =
Arc::new(NpmResolutionCell::new(NpmResolutionSnapshot::new(snapshot)));
let npm_resolver = NpmResolver::<DenoRtSys>::new::<DenoRtSys>(
NpmResolverCreateOptions::Managed(ManagedNpmResolverCreateOptions {
npm_resolution,
npm_cache_dir,
sys: sys.clone(),
maybe_node_modules_path,
npm_system_info: Default::default(),
npmrc,
}),
);
(in_npm_pkg_checker, npm_resolver)
}
Some(NodeModules::Byonm {
root_node_modules_dir,
}) => {
let root_node_modules_dir =
root_node_modules_dir.map(|p| vfs.root().join(p));
let in_npm_pkg_checker =
DenoInNpmPackageChecker::new(CreateInNpmPkgCheckerOptions::Byonm);
let npm_resolver = NpmResolver::<DenoRtSys>::new::<DenoRtSys>(
NpmResolverCreateOptions::Byonm(ByonmNpmResolverCreateOptions {
sys: node_resolution_sys.clone(),
pkg_json_resolver: pkg_json_resolver.clone(),
root_node_modules_dir,
}),
);
(in_npm_pkg_checker, npm_resolver)
}
None => {
// Packages from different registries are already inlined in the binary,
// so no need to create actual `.npmrc` configuration.
let npmrc = create_default_npmrc();
let npm_cache_dir = Arc::new(NpmCacheDir::new(
&sys,
npm_global_cache_dir,
npmrc.get_all_known_registries_urls(),
));
let in_npm_pkg_checker =
DenoInNpmPackageChecker::new(CreateInNpmPkgCheckerOptions::Managed(
ManagedInNpmPkgCheckerCreateOptions {
root_cache_dir_url: npm_cache_dir.root_dir_url(),
maybe_node_modules_path: None,
},
));
let npm_resolution = Arc::new(NpmResolutionCell::default());
let npm_resolver = NpmResolver::<DenoRtSys>::new::<DenoRtSys>(
NpmResolverCreateOptions::Managed(ManagedNpmResolverCreateOptions {
npm_resolution,
sys: sys.clone(),
npm_cache_dir,
maybe_node_modules_path: None,
npm_system_info: Default::default(),
npmrc: create_default_npmrc(),
}),
);
(in_npm_pkg_checker, npm_resolver)
}
};
let has_node_modules_dir = npm_resolver.root_node_modules_path().is_some();
let node_resolver = Arc::new(NodeResolver::new(
in_npm_pkg_checker.clone(),
DenoIsBuiltInNodeModuleChecker,
npm_resolver.clone(),
pkg_json_resolver.clone(),
node_resolution_sys,
node_resolver::NodeResolverOptions::default(),
));
let require_modules = metadata
.require_modules
.iter()
.map(|key| root_dir_url.join(key).unwrap())
.collect::<Vec<_>>();
let cjs_tracker = Arc::new(CjsTracker::new(
in_npm_pkg_checker.clone(),
pkg_json_resolver.clone(),
if metadata.unstable_config.detect_cjs {
IsCjsResolutionMode::ImplicitTypeCommonJs
} else if metadata.workspace_resolver.package_jsons.is_empty() {
IsCjsResolutionMode::Disabled
} else {
IsCjsResolutionMode::ExplicitTypeCommonJs
},
require_modules.clone(),
));
let npm_req_resolver = Arc::new(NpmReqResolver::new(NpmReqResolverOptions {
sys: sys.clone(),
in_npm_pkg_checker: in_npm_pkg_checker.clone(),
node_resolver: node_resolver.clone(),
npm_resolver: npm_resolver.clone(),
}));
let cjs_esm_code_analyzer =
CjsCodeAnalyzer::new(cjs_tracker.clone(), modules.clone(), sys.clone());
let cjs_module_export_analyzer = Arc::new(CjsModuleExportAnalyzer::new(
cjs_esm_code_analyzer,
in_npm_pkg_checker,
node_resolver.clone(),
npm_resolver.clone(),
pkg_json_resolver.clone(),
sys.clone(),
));
let node_code_translator = Arc::new(NodeCodeTranslator::new(
cjs_module_export_analyzer,
node_resolver::analyze::NodeCodeTranslatorMode::ModuleLoader,
));
let workspace_resolver = {
let import_map = match metadata.workspace_resolver.import_map {
Some(import_map) => Some(
import_map::parse_from_json_with_options(
root_dir_url.join(&import_map.specifier).unwrap(),
&import_map.json,
import_map::ImportMapOptions {
address_hook: None,
expand_imports: true,
},
)?
.import_map,
),
None => None,
};
let pkg_jsons = metadata
.workspace_resolver
.package_jsons
.into_iter()
.map(|(relative_path, json)| {
let path = root_dir_url
.join(&relative_path)
.unwrap()
.to_file_path()
.unwrap();
let pkg_json =
deno_package_json::PackageJson::load_from_value(path, json)?;
Ok(Arc::new(pkg_json))
})
.collect::<Result<Vec<_>, AnyError>>()?;
WorkspaceResolver::new_raw(
root_dir_url.clone(),
import_map,
metadata
.workspace_resolver
.jsr_pkgs
.iter()
.map(|pkg| ResolverWorkspaceJsrPackage {
is_link: false, // only used for enhancing the diagnostic, which isn't shown in deno compile
base: root_dir_url.join(&pkg.relative_base).unwrap(),
name: pkg.name.clone(),
version: pkg.version.clone(),
exports: pkg.exports.clone(),
})
.collect(),
pkg_jsons,
metadata.workspace_resolver.pkg_json_resolution,
if metadata.unstable_config.sloppy_imports {
SloppyImportsOptions::Enabled
} else {
SloppyImportsOptions::Unspecified
},
Default::default(),
sys.clone(),
)
};
let code_cache = match metadata.code_cache_key {
Some(code_cache_key) => Some(Arc::new(DenoCompileCodeCache::new(
root_path.with_file_name(format!(
"{}.cache",
root_path.file_name().unwrap().to_string_lossy()
)),
code_cache_key,
))),
None => {
log::debug!("Code cache disabled.");
None
}
};
let module_loader_factory = StandaloneModuleLoaderFactory {
shared: Arc::new(SharedModuleLoaderState {
cjs_tracker: cjs_tracker.clone(),
code_cache: code_cache.clone(),
modules,
node_code_translator: node_code_translator.clone(),
node_resolver: node_resolver.clone(),
npm_module_loader: Arc::new(NpmModuleLoader::new(
cjs_tracker.clone(),
node_code_translator,
sys.clone(),
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/integration_tests_runner.rs | cli/rt/integration_tests_runner.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub fn main() {
// this file exists to cause the executable to be built when running cargo test
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/binary.rs | cli/rt/binary.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::ffi::OsString;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_core::FastString;
use deno_core::ModuleCodeBytes;
use deno_core::ModuleSourceCode;
use deno_core::ModuleType;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_error::JsError;
use deno_error::JsErrorBox;
use deno_lib::standalone::binary::DenoRtDeserializable;
use deno_lib::standalone::binary::MAGIC_BYTES;
use deno_lib::standalone::binary::Metadata;
use deno_lib::standalone::binary::RemoteModuleEntry;
use deno_lib::standalone::binary::SpecifierDataStore;
use deno_lib::standalone::binary::SpecifierId;
use deno_lib::standalone::virtual_fs::VirtualDirectory;
use deno_lib::standalone::virtual_fs::VirtualDirectoryEntries;
use deno_media_type::MediaType;
use deno_npm::NpmPackageId;
use deno_npm::resolution::SerializedNpmResolutionSnapshot;
use deno_npm::resolution::SerializedNpmResolutionSnapshotPackage;
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
use deno_semver::StackString;
use deno_semver::package::PackageReq;
use indexmap::IndexMap;
use sys_traits::FsRead;
use thiserror::Error;
use crate::file_system::FileBackedVfs;
use crate::file_system::VfsRoot;
pub struct StandaloneData {
pub metadata: Metadata,
pub modules: Arc<StandaloneModules>,
pub npm_snapshot: Option<ValidSerializedNpmResolutionSnapshot>,
pub root_path: PathBuf,
pub vfs: Arc<FileBackedVfs>,
}
/// This function will try to run this binary as a standalone binary
/// produced by `deno compile`. It determines if this is a standalone
/// binary by skipping over the trailer width at the end of the file,
/// then checking for the magic trailer string `d3n0l4nd`. If found,
/// the bundle is executed. If not, this function exits with `Ok(None)`.
pub fn extract_standalone(
cli_args: Cow<[OsString]>,
) -> Result<StandaloneData, AnyError> {
let data = find_section()?;
let root_path = {
let maybe_current_exe = std::env::current_exe().ok();
let current_exe_name = maybe_current_exe
.as_ref()
.and_then(|p| p.file_name())
.map(|p| p.to_string_lossy())
// should never happen
.unwrap_or_else(|| Cow::Borrowed("binary"));
std::env::temp_dir().join(format!("deno-compile-{}", current_exe_name))
};
let root_url = deno_path_util::url_from_directory_path(&root_path)?;
let DeserializedDataSection {
mut metadata,
npm_snapshot,
modules_store: remote_modules,
vfs_root_entries,
vfs_files_data,
} = deserialize_binary_data_section(&root_url, data)?;
let cli_args = cli_args.into_owned();
metadata.argv.reserve(cli_args.len() - 1);
for arg in cli_args.into_iter().skip(1) {
metadata.argv.push(arg.into_string().unwrap());
}
let vfs = {
let fs_root = VfsRoot {
dir: VirtualDirectory {
// align the name of the directory with the root dir
name: root_path
.file_name()
.unwrap()
.to_string_lossy()
.into_owned(),
entries: vfs_root_entries,
},
root_path: root_path.clone(),
start_file_offset: 0,
};
Arc::new(FileBackedVfs::new(
Cow::Borrowed(vfs_files_data),
fs_root,
metadata.vfs_case_sensitivity,
))
};
Ok(StandaloneData {
metadata,
modules: Arc::new(StandaloneModules {
modules: remote_modules,
vfs: vfs.clone(),
}),
npm_snapshot,
root_path,
vfs,
})
}
fn find_section() -> Result<&'static [u8], AnyError> {
#[cfg(windows)]
if std::env::var_os("DENO_INTERNAL_RT_USE_FILE_FALLBACK").is_some() {
return read_from_file_fallback();
}
match libsui::find_section("d3n0l4nd")
.context("Failed reading standalone binary section.")
{
Ok(Some(data)) => Ok(data),
Ok(None) => bail!("Could not find standalone binary section."),
Err(err) => {
#[cfg(windows)]
if let Ok(data) = read_from_file_fallback() {
return Ok(data);
}
Err(err)
}
}
}
/// This is a temporary hacky fallback until we can find
/// a fix for https://github.com/denoland/deno/issues/28982
#[cfg(windows)]
fn read_from_file_fallback() -> Result<&'static [u8], AnyError> {
use std::sync::OnceLock;
fn find_in_bytes(bytes: &[u8], needle: &[u8]) -> Option<usize> {
bytes.windows(needle.len()).position(|n| n == needle)
}
static FILE: OnceLock<std::fs::File> = OnceLock::new();
static MMAP_FILE: OnceLock<memmap2::Mmap> = OnceLock::new();
// DENOLAND in utf16
const RESOURCE_SECTION_HEADER_NAME: &[u8] = &[
0x44, 0x00, 0x33, 0x00, 0x4E, 0x00, 0x30, 0x00, 0x4C, 0x00, 0x34, 0x00,
0x4E, 0x00, 0x44, 0x00,
];
const MAGIC_BYTES: &[u8] = b"d3n0l4nd";
let file_path = std::env::current_exe()?;
let file = FILE.get_or_init(|| std::fs::File::open(file_path).unwrap());
let mmap = MMAP_FILE.get_or_init(|| {
// SAFETY: memory mapped file creation
unsafe { memmap2::Mmap::map(file).unwrap() }
});
// the code in this file will cause this to appear twice in the binary,
// so skip over the first one
let Some(marker_pos) = find_in_bytes(mmap, RESOURCE_SECTION_HEADER_NAME)
else {
bail!("Failed to find first section name.");
};
let next_bytes = &mmap[marker_pos + RESOURCE_SECTION_HEADER_NAME.len()..];
let Some(marker_pos) =
find_in_bytes(next_bytes, RESOURCE_SECTION_HEADER_NAME)
else {
bail!("Failed to find second section name.");
};
let next_bytes =
&next_bytes[marker_pos + RESOURCE_SECTION_HEADER_NAME.len()..];
let Some(ascii_pos) = find_in_bytes(next_bytes, MAGIC_BYTES) else {
bail!("Failed to find first magic bytes.");
};
let next_bytes = &next_bytes[ascii_pos..];
let Some(last_pos) = next_bytes
.windows(MAGIC_BYTES.len())
.rposition(|w| w == MAGIC_BYTES)
else {
bail!("Failed to find end magic bytes.")
};
Ok(&next_bytes[..last_pos + MAGIC_BYTES.len()])
}
pub struct DeserializedDataSection {
pub metadata: Metadata,
pub npm_snapshot: Option<ValidSerializedNpmResolutionSnapshot>,
pub modules_store: RemoteModulesStore,
pub vfs_root_entries: VirtualDirectoryEntries,
pub vfs_files_data: &'static [u8],
}
pub fn deserialize_binary_data_section(
root_dir_url: &Url,
data: &'static [u8],
) -> Result<DeserializedDataSection, AnyError> {
fn read_magic_bytes(input: &[u8]) -> Result<(&[u8], bool), AnyError> {
if input.len() < MAGIC_BYTES.len() {
bail!("Unexpected end of data. Could not find magic bytes.");
}
let (magic_bytes, input) = input.split_at(MAGIC_BYTES.len());
if magic_bytes != MAGIC_BYTES {
return Ok((input, false));
}
Ok((input, true))
}
let (input, found) = read_magic_bytes(data)?;
if !found {
bail!("Did not find magic bytes.");
}
// 1. Metadata
let (input, data) =
read_bytes_with_u64_len(input).context("reading metadata")?;
let metadata: Metadata =
serde_json::from_slice(data).context("deserializing metadata")?;
// 2. Npm snapshot
let (input, data) =
read_bytes_with_u64_len(input).context("reading npm snapshot")?;
let npm_snapshot = if data.is_empty() {
None
} else {
Some(deserialize_npm_snapshot(data).context("deserializing npm snapshot")?)
};
// 3. Specifiers
let (input, specifiers_store) =
SpecifierStore::deserialize(root_dir_url, input)
.context("deserializing specifiers")?;
// 4. Redirects
let (input, redirects_store) =
SpecifierDataStore::<SpecifierId>::deserialize(input)
.context("deserializing redirects")?;
// 5. Remote modules
let (input, remote_modules_store) =
SpecifierDataStore::<RemoteModuleEntry<'static>>::deserialize(input)
.context("deserializing remote modules")?;
// 6. VFS
let (input, data) = read_bytes_with_u64_len(input).context("vfs")?;
let vfs_root_entries: VirtualDirectoryEntries =
serde_json::from_slice(data).context("deserializing vfs data")?;
let (input, vfs_files_data) =
read_bytes_with_u64_len(input).context("reading vfs files data")?;
// finally ensure we read the magic bytes at the end
let (_input, found) = read_magic_bytes(input)?;
if !found {
bail!("Could not find magic bytes at end of data.");
}
let modules_store = RemoteModulesStore::new(
specifiers_store,
redirects_store,
remote_modules_store,
);
Ok(DeserializedDataSection {
metadata,
npm_snapshot,
modules_store,
vfs_root_entries,
vfs_files_data,
})
}
struct SpecifierStore {
data: IndexMap<Arc<Url>, SpecifierId>,
reverse: IndexMap<SpecifierId, Arc<Url>>,
}
impl SpecifierStore {
pub fn deserialize<'a>(
root_dir_url: &Url,
input: &'a [u8],
) -> std::io::Result<(&'a [u8], Self)> {
let (input, len) = read_u32_as_usize(input)?;
let mut data = IndexMap::with_capacity(len);
let mut reverse = IndexMap::with_capacity(len);
let mut input = input;
for _ in 0..len {
let (new_input, specifier_str) = read_string_lossy(input)?;
let specifier = match Url::parse(&specifier_str) {
Ok(url) => url,
Err(err) => match root_dir_url.join(&specifier_str) {
Ok(url) => url,
Err(_) => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
err,
));
}
},
};
let (new_input, id) = SpecifierId::deserialize(new_input)?;
let specifier = Arc::new(specifier);
data.insert(specifier.clone(), id);
reverse.insert(id, specifier);
input = new_input;
}
Ok((input, Self { data, reverse }))
}
pub fn get_id(&self, specifier: &Url) -> Option<SpecifierId> {
self.data.get(specifier).cloned()
}
pub fn get_specifier(&self, specifier_id: SpecifierId) -> Option<&Url> {
self.reverse.get(&specifier_id).map(|url| url.as_ref())
}
}
pub struct StandaloneModules {
modules: RemoteModulesStore,
vfs: Arc<FileBackedVfs>,
}
impl StandaloneModules {
pub fn resolve_specifier<'a>(
&'a self,
specifier: &'a Url,
) -> Result<Option<&'a Url>, TooManyRedirectsError> {
if specifier.scheme() == "file" {
Ok(Some(specifier))
} else {
self.modules.resolve_specifier(specifier)
}
}
pub fn has_file(&self, path: &Path) -> bool {
self.vfs.file_entry(path).is_ok()
}
pub fn read<'a>(
&'a self,
specifier: &'a Url,
) -> Result<Option<DenoCompileModuleData<'a>>, JsErrorBox> {
if specifier.scheme() == "file" {
let path = deno_path_util::url_to_file_path(specifier)
.map_err(JsErrorBox::from_err)?;
let mut transpiled = None;
let mut source_map = None;
let mut cjs_export_analysis = None;
let mut is_valid_utf8 = false;
let bytes = match self.vfs.file_entry(&path) {
Ok(entry) => {
let bytes = self
.vfs
.read_file_all(entry)
.map_err(JsErrorBox::from_err)?;
is_valid_utf8 = entry.is_valid_utf8;
transpiled = entry
.transpiled_offset
.and_then(|t| self.vfs.read_file_offset_with_len(t).ok());
source_map = entry
.source_map_offset
.and_then(|t| self.vfs.read_file_offset_with_len(t).ok());
cjs_export_analysis = entry
.cjs_export_analysis_offset
.and_then(|t| self.vfs.read_file_offset_with_len(t).ok());
bytes
}
Err(err) if err.kind() == ErrorKind::NotFound => {
// actually use the real file system here
#[allow(clippy::disallowed_types)]
match sys_traits::impls::RealSys.fs_read(&path) {
Ok(bytes) => bytes,
Err(err) if err.kind() == ErrorKind::NotFound => {
return Ok(None);
}
Err(err) => return Err(JsErrorBox::from_err(err)),
}
}
Err(err) => return Err(JsErrorBox::from_err(err)),
};
Ok(Some(DenoCompileModuleData {
media_type: MediaType::from_specifier(specifier),
specifier,
is_valid_utf8,
data: bytes,
transpiled,
source_map,
cjs_export_analysis,
}))
} else {
self.modules.read(specifier).map_err(JsErrorBox::from_err)
}
}
}
pub struct DenoCompileModuleData<'a> {
pub specifier: &'a Url,
pub media_type: MediaType,
pub is_valid_utf8: bool,
pub data: Cow<'static, [u8]>,
pub transpiled: Option<Cow<'static, [u8]>>,
pub source_map: Option<Cow<'static, [u8]>>,
pub cjs_export_analysis: Option<Cow<'static, [u8]>>,
}
impl<'a> DenoCompileModuleData<'a> {
pub fn into_parts(self) -> (&'a Url, ModuleType, DenoCompileModuleSource) {
fn into_string_unsafe(
is_valid_utf8: bool,
data: Cow<'static, [u8]>,
) -> DenoCompileModuleSource {
match data {
Cow::Borrowed(d) if is_valid_utf8 => {
DenoCompileModuleSource::String(
// SAFETY: we know this is a valid utf8 string
unsafe { std::str::from_utf8_unchecked(d) },
)
}
Cow::Borrowed(_) => DenoCompileModuleSource::Bytes(data),
Cow::Owned(d) => DenoCompileModuleSource::Bytes(Cow::Owned(d)),
}
}
let data = self.transpiled.unwrap_or(self.data);
let (media_type, source) = match self.media_type {
MediaType::JavaScript
| MediaType::Jsx
| MediaType::Mjs
| MediaType::Cjs
| MediaType::TypeScript
| MediaType::Mts
| MediaType::Cts
| MediaType::Dts
| MediaType::Dmts
| MediaType::Dcts
| MediaType::Tsx => (
ModuleType::JavaScript,
into_string_unsafe(self.is_valid_utf8, data),
),
MediaType::Json => (
ModuleType::Json,
into_string_unsafe(self.is_valid_utf8, data),
),
MediaType::Wasm => {
(ModuleType::Wasm, DenoCompileModuleSource::Bytes(data))
}
// just assume javascript if we made it here
MediaType::Css
| MediaType::Html
| MediaType::Jsonc
| MediaType::Json5
| MediaType::SourceMap
| MediaType::Sql
| MediaType::Unknown => {
(ModuleType::JavaScript, DenoCompileModuleSource::Bytes(data))
}
};
(self.specifier, media_type, source)
}
}
#[derive(Debug)]
pub enum DenoCompileModuleSource {
String(&'static str),
Bytes(Cow<'static, [u8]>),
}
impl DenoCompileModuleSource {
pub fn into_for_v8(self) -> ModuleSourceCode {
match self {
// todo(https://github.com/denoland/deno_core/pull/943): store whether
// the string is ascii or not ahead of time so we can avoid the is_ascii()
// check in FastString::from_static
Self::String(s) => ModuleSourceCode::String(FastString::from_static(s)),
Self::Bytes(b) => ModuleSourceCode::Bytes(module_source_into_bytes(b)),
}
}
pub fn into_bytes_for_v8(self) -> ModuleCodeBytes {
match self {
DenoCompileModuleSource::String(text) => text.as_bytes().into(),
DenoCompileModuleSource::Bytes(b) => module_source_into_bytes(b),
}
}
}
fn module_source_into_bytes(data: Cow<'static, [u8]>) -> ModuleCodeBytes {
match data {
Cow::Borrowed(d) => d.into(),
Cow::Owned(d) => d.into_boxed_slice().into(),
}
}
#[derive(Debug, Error, JsError)]
#[class(generic)]
#[error("Too many redirects resolving: {0}")]
pub struct TooManyRedirectsError(Url);
pub struct RemoteModulesStore {
specifiers: SpecifierStore,
redirects: SpecifierDataStore<SpecifierId>,
remote_modules: SpecifierDataStore<RemoteModuleEntry<'static>>,
}
impl RemoteModulesStore {
fn new(
specifiers: SpecifierStore,
redirects: SpecifierDataStore<SpecifierId>,
remote_modules: SpecifierDataStore<RemoteModuleEntry<'static>>,
) -> Self {
Self {
specifiers,
redirects,
remote_modules,
}
}
pub fn resolve_specifier<'a>(
&'a self,
specifier: &'a Url,
) -> Result<Option<&'a Url>, TooManyRedirectsError> {
let Some(mut current) = self.specifiers.get_id(specifier) else {
return Ok(None);
};
let mut count = 0;
loop {
if count > 10 {
return Err(TooManyRedirectsError(specifier.clone()));
}
match self.redirects.get(current) {
Some(to) => {
current = *to;
count += 1;
}
None => {
if count == 0 {
return Ok(Some(specifier));
} else {
return Ok(self.specifiers.get_specifier(current));
}
}
}
}
}
pub fn read<'a>(
&'a self,
original_specifier: &'a Url,
) -> Result<Option<DenoCompileModuleData<'a>>, TooManyRedirectsError> {
#[allow(clippy::ptr_arg)]
fn handle_cow_ref(data: &Cow<'static, [u8]>) -> Cow<'static, [u8]> {
match data {
Cow::Borrowed(data) => Cow::Borrowed(data),
Cow::Owned(data) => {
// this variant should never happen because the data
// should always be borrowed static in denort
debug_assert!(false);
Cow::Owned(data.clone())
}
}
}
let mut count = 0;
let Some(mut specifier) = self.specifiers.get_id(original_specifier) else {
return Ok(None);
};
loop {
if count > 10 {
return Err(TooManyRedirectsError(original_specifier.clone()));
}
match self.redirects.get(specifier) {
Some(to) => {
specifier = *to;
count += 1;
}
None => {
let Some(entry) = self.remote_modules.get(specifier) else {
return Ok(None);
};
return Ok(Some(DenoCompileModuleData {
specifier: if count == 0 {
original_specifier
} else {
self.specifiers.get_specifier(specifier).unwrap()
},
media_type: entry.media_type,
is_valid_utf8: entry.is_valid_utf8,
data: handle_cow_ref(&entry.data),
transpiled: entry.maybe_transpiled.as_ref().map(handle_cow_ref),
source_map: entry.maybe_source_map.as_ref().map(handle_cow_ref),
cjs_export_analysis: entry
.maybe_cjs_export_analysis
.as_ref()
.map(handle_cow_ref),
}));
}
}
}
}
}
fn deserialize_npm_snapshot(
input: &[u8],
) -> Result<ValidSerializedNpmResolutionSnapshot, AnyError> {
fn parse_id(input: &[u8]) -> Result<(&[u8], NpmPackageId), AnyError> {
let (input, id) = read_string_lossy(input)?;
let id = NpmPackageId::from_serialized(&id)?;
Ok((input, id))
}
#[allow(clippy::needless_lifetimes)] // clippy bug
#[allow(clippy::type_complexity)]
fn parse_root_package<'a>(
id_to_npm_id: &'a impl Fn(usize) -> Result<NpmPackageId, AnyError>,
) -> impl Fn(&[u8]) -> Result<(&[u8], (PackageReq, NpmPackageId)), AnyError> + 'a
{
|input| {
let (input, req) = read_string_lossy(input)?;
let req = PackageReq::from_str(&req)?;
let (input, id) = read_u32_as_usize(input)?;
Ok((input, (req, id_to_npm_id(id)?)))
}
}
#[allow(clippy::needless_lifetimes)] // clippy bug
#[allow(clippy::type_complexity)]
fn parse_package_dep<'a>(
id_to_npm_id: &'a impl Fn(usize) -> Result<NpmPackageId, AnyError>,
) -> impl Fn(&[u8]) -> Result<(&[u8], (StackString, NpmPackageId)), AnyError> + 'a
{
|input| {
let (input, req) = read_string_lossy(input)?;
let (input, id) = read_u32_as_usize(input)?;
let req = StackString::from_cow(req);
Ok((input, (req, id_to_npm_id(id)?)))
}
}
fn parse_package<'a>(
input: &'a [u8],
id: NpmPackageId,
id_to_npm_id: &impl Fn(usize) -> Result<NpmPackageId, AnyError>,
) -> Result<(&'a [u8], SerializedNpmResolutionSnapshotPackage), AnyError> {
let (input, deps_len) = read_u32_as_usize(input)?;
let (input, dependencies) =
parse_hashmap_n_times(input, deps_len, parse_package_dep(id_to_npm_id))?;
Ok((
input,
SerializedNpmResolutionSnapshotPackage {
id,
system: Default::default(),
dist: Default::default(),
dependencies,
optional_dependencies: Default::default(),
optional_peer_dependencies: Default::default(),
has_bin: false,
has_scripts: false,
is_deprecated: false,
extra: Default::default(),
},
))
}
let (input, packages_len) = read_u32_as_usize(input)?;
// get a hashmap of all the npm package ids to their serialized ids
let (input, data_ids_to_npm_ids) =
parse_vec_n_times(input, packages_len, parse_id)
.context("deserializing id")?;
let data_id_to_npm_id = |id: usize| {
data_ids_to_npm_ids
.get(id)
.cloned()
.ok_or_else(|| deno_core::anyhow::anyhow!("Invalid npm package id"))
};
let (input, root_packages_len) = read_u32_as_usize(input)?;
let (input, root_packages) = parse_hashmap_n_times(
input,
root_packages_len,
parse_root_package(&data_id_to_npm_id),
)
.context("deserializing root package")?;
let (input, packages) =
parse_vec_n_times_with_index(input, packages_len, |input, index| {
parse_package(input, data_id_to_npm_id(index)?, &data_id_to_npm_id)
})
.context("deserializing package")?;
if !input.is_empty() {
bail!("Unexpected data left over");
}
Ok(
SerializedNpmResolutionSnapshot {
packages,
root_packages,
}
// this is ok because we have already verified that all the
// identifiers found in the snapshot are valid via the
// npm package id -> npm package id mapping
.into_valid_unsafe(),
)
}
fn parse_hashmap_n_times<TKey: std::cmp::Eq + std::hash::Hash, TValue>(
mut input: &[u8],
times: usize,
parse: impl Fn(&[u8]) -> Result<(&[u8], (TKey, TValue)), AnyError>,
) -> Result<(&[u8], HashMap<TKey, TValue>), AnyError> {
let mut results = HashMap::with_capacity(times);
for _ in 0..times {
let result = parse(input);
let (new_input, (key, value)) = result?;
results.insert(key, value);
input = new_input;
}
Ok((input, results))
}
fn parse_vec_n_times<TResult>(
input: &[u8],
times: usize,
parse: impl Fn(&[u8]) -> Result<(&[u8], TResult), AnyError>,
) -> Result<(&[u8], Vec<TResult>), AnyError> {
parse_vec_n_times_with_index(input, times, |input, _index| parse(input))
}
fn parse_vec_n_times_with_index<TResult>(
mut input: &[u8],
times: usize,
parse: impl Fn(&[u8], usize) -> Result<(&[u8], TResult), AnyError>,
) -> Result<(&[u8], Vec<TResult>), AnyError> {
let mut results = Vec::with_capacity(times);
for i in 0..times {
let result = parse(input, i);
let (new_input, result) = result?;
results.push(result);
input = new_input;
}
Ok((input, results))
}
fn read_bytes_with_u64_len(input: &[u8]) -> std::io::Result<(&[u8], &[u8])> {
let (input, len) = read_u64(input)?;
let (input, data) = read_bytes(input, len as usize)?;
Ok((input, data))
}
fn read_bytes_with_u32_len(input: &[u8]) -> std::io::Result<(&[u8], &[u8])> {
let (input, len) = read_u32_as_usize(input)?;
let (input, data) = read_bytes(input, len)?;
Ok((input, data))
}
fn read_bytes(input: &[u8], len: usize) -> std::io::Result<(&[u8], &[u8])> {
check_has_len(input, len)?;
let (len_bytes, input) = input.split_at(len);
Ok((input, len_bytes))
}
#[inline(always)]
fn check_has_len(input: &[u8], len: usize) -> std::io::Result<()> {
if input.len() < len {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Unexpected end of data",
))
} else {
Ok(())
}
}
fn read_string_lossy(input: &[u8]) -> std::io::Result<(&[u8], Cow<'_, str>)> {
let (input, data_bytes) = read_bytes_with_u32_len(input)?;
Ok((input, String::from_utf8_lossy(data_bytes)))
}
fn read_u32_as_usize(input: &[u8]) -> std::io::Result<(&[u8], usize)> {
let (input, len_bytes) = read_bytes(input, 4)?;
let len = u32::from_le_bytes(len_bytes.try_into().unwrap());
Ok((input, len as usize))
}
fn read_u64(input: &[u8]) -> std::io::Result<(&[u8], u64)> {
let (input, len_bytes) = read_bytes(input, 8)?;
let len = u64::from_le_bytes(len_bytes.try_into().unwrap());
Ok((input, len))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/rt/main.rs | cli/rt/main.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::env;
use std::sync::Arc;
use deno_core::error::AnyError;
use deno_lib::util::result::js_error_downcast_ref;
use deno_lib::version::otel_runtime_config;
use deno_runtime::deno_telemetry::OtelConfig;
use deno_runtime::fmt_errors::format_js_error;
use deno_runtime::tokio_util::create_and_run_current_thread_with_maybe_metrics;
use deno_terminal::colors;
use indexmap::IndexMap;
use self::binary::extract_standalone;
use self::file_system::DenoRtSys;
mod binary;
mod code_cache;
mod file_system;
mod node;
mod run;
pub(crate) fn unstable_exit_cb(feature: &str, api_name: &str) {
log::error!(
"Unstable API '{api_name}'. The `--unstable-{}` flag must be provided.",
feature
);
deno_runtime::exit(70);
}
fn exit_with_message(message: &str, code: i32) -> ! {
log::error!(
"{}: {}",
colors::red_bold("error"),
message.trim_start_matches("error: ")
);
deno_runtime::exit(code);
}
fn unwrap_or_exit<T>(result: Result<T, AnyError>) -> T {
match result {
Ok(value) => value,
Err(error) => {
let error_string = match js_error_downcast_ref(&error) {
Some(js_error) => format_js_error(js_error, None),
None => format!("{:?}", error),
};
exit_with_message(&error_string, 1);
}
}
}
fn load_env_vars(env_vars: &IndexMap<String, String>) {
env_vars.iter().for_each(|env_var| {
if env::var(env_var.0).is_err() {
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
std::env::set_var(env_var.0, env_var.1)
};
}
})
}
fn main() {
init_logging(None, None);
deno_runtime::deno_permissions::mark_standalone();
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.unwrap();
let args: Vec<_> = env::args_os().collect();
let standalone = extract_standalone(Cow::Owned(args));
let future = async move {
match standalone {
Ok(data) => {
deno_runtime::deno_telemetry::init(
otel_runtime_config(),
data.metadata.otel_config.clone(),
)?;
init_logging(
data.metadata.log_level,
Some(data.metadata.otel_config.clone()),
);
load_env_vars(&data.metadata.env_vars_from_env_file);
let sys = DenoRtSys::new(data.vfs.clone());
let exit_code = run::run(Arc::new(sys.clone()), sys, data).await?;
deno_runtime::exit(exit_code);
}
Err(err) => Err(err),
}
};
unwrap_or_exit::<()>(create_and_run_current_thread_with_maybe_metrics(
future,
));
}
fn init_logging(
maybe_level: Option<log::Level>,
otel_config: Option<OtelConfig>,
) {
deno_lib::util::logger::init(deno_lib::util::logger::InitLoggingOptions {
maybe_level,
otel_config,
on_log_start: || {},
on_log_end: || {},
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/lib.rs | cli/lib/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub mod args;
pub mod loader;
pub mod npm;
pub mod shared;
pub mod standalone;
pub mod sys;
pub mod util;
pub mod version;
pub mod worker;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/version.rs | cli/lib/version.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use deno_runtime::deno_telemetry::OtelRuntimeConfig;
use crate::shared::ReleaseChannel;
pub fn otel_runtime_config() -> OtelRuntimeConfig {
OtelRuntimeConfig {
runtime_name: Cow::Borrowed("deno"),
runtime_version: Cow::Borrowed(crate::version::DENO_VERSION_INFO.deno),
}
}
const GIT_COMMIT_HASH: &str = env!("GIT_COMMIT_HASH");
const TYPESCRIPT: &str = "5.9.2";
pub const DENO_VERSION: &str = env!("DENO_VERSION");
// TODO(bartlomieju): ideally we could remove this const.
const IS_CANARY: bool = option_env!("DENO_CANARY").is_some();
// TODO(bartlomieju): this is temporary, to allow Homebrew to cut RC releases as well
const IS_RC: bool = option_env!("DENO_RC").is_some();
pub static DENO_VERSION_INFO: std::sync::LazyLock<DenoVersionInfo> =
std::sync::LazyLock::new(|| {
#[cfg(not(all(
debug_assertions,
target_os = "macos",
target_arch = "x86_64"
)))]
let release_channel = libsui::find_section("denover")
.ok()
.flatten()
.and_then(|buf| std::str::from_utf8(buf).ok())
.and_then(|str_| ReleaseChannel::deserialize(str_).ok())
.unwrap_or({
if IS_CANARY {
ReleaseChannel::Canary
} else if IS_RC {
ReleaseChannel::Rc
} else {
ReleaseChannel::Stable
}
});
#[cfg(all(debug_assertions, target_os = "macos", target_arch = "x86_64"))]
let release_channel = if IS_CANARY {
ReleaseChannel::Canary
} else if IS_RC {
ReleaseChannel::Rc
} else {
ReleaseChannel::Stable
};
DenoVersionInfo {
deno: if release_channel == ReleaseChannel::Canary {
concat!(env!("DENO_VERSION"), "+", env!("GIT_COMMIT_HASH_SHORT"))
} else {
env!("DENO_VERSION")
},
release_channel,
git_hash: GIT_COMMIT_HASH,
// Keep in sync with `deno` field.
user_agent: if release_channel == ReleaseChannel::Canary {
concat!(
"Deno/",
env!("DENO_VERSION"),
"+",
env!("GIT_COMMIT_HASH_SHORT")
)
} else {
concat!("Deno/", env!("DENO_VERSION"))
},
typescript: TYPESCRIPT,
}
});
pub struct DenoVersionInfo {
/// Human-readable version of the current Deno binary.
///
/// For stable release, a semver, eg. `v1.46.2`.
/// For canary release, a semver + 7-char git hash, eg. `v1.46.3+asdfqwq`.
pub deno: &'static str,
pub release_channel: ReleaseChannel,
/// A full git hash.
pub git_hash: &'static str,
/// A user-agent header that will be used in HTTP client.
pub user_agent: &'static str,
pub typescript: &'static str,
}
impl DenoVersionInfo {
/// For stable release, a semver like, eg. `v1.46.2`.
/// For canary release a full git hash, eg. `9bdab6fb6b93eb43b1930f40987fa4997287f9c8`.
pub fn version_or_git_hash(&self) -> &'static str {
if self.release_channel == ReleaseChannel::Canary {
self.git_hash
} else {
DENO_VERSION
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/args.rs | cli/lib/args.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::BufReader;
use std::io::Cursor;
use std::path::PathBuf;
use base64::prelude::BASE64_STANDARD;
use base64::prelude::Engine;
use deno_npm::resolution::PackageIdNotFoundError;
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
use deno_npm_installer::process_state::NpmProcessState;
use deno_npm_installer::process_state::NpmProcessStateFromEnvVarSys;
use deno_npm_installer::process_state::NpmProcessStateKind;
use deno_runtime::UNSTABLE_ENV_VAR_NAMES;
use deno_runtime::colors;
use deno_runtime::deno_tls::deno_native_certs::load_native_certs;
use deno_runtime::deno_tls::rustls;
use deno_runtime::deno_tls::rustls::RootCertStore;
use deno_runtime::deno_tls::rustls_pemfile;
use deno_runtime::deno_tls::webpki_roots;
use deno_semver::npm::NpmPackageReqReference;
use serde::Deserialize;
use serde::Serialize;
use thiserror::Error;
pub fn npm_pkg_req_ref_to_binary_command(
req_ref: &NpmPackageReqReference,
) -> &str {
req_ref.sub_path().unwrap_or_else(|| &req_ref.req().name)
}
pub fn has_trace_permissions_enabled() -> bool {
has_flag_env_var("DENO_TRACE_PERMISSIONS")
}
pub fn has_flag_env_var(name: &str) -> bool {
match std::env::var_os(name) {
Some(value) => value == "1",
None => false,
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum CaData {
/// The string is a file path
File(String),
/// The string holds the actual certificate
Bytes(Vec<u8>),
}
impl CaData {
pub fn parse(input: String) -> Option<Self> {
if let Some(x) = input.strip_prefix("base64:") {
Some(CaData::Bytes(BASE64_STANDARD.decode(x).ok()?))
} else {
Some(CaData::File(input))
}
}
}
#[derive(Error, Debug, Clone, deno_error::JsError)]
#[class(generic)]
pub enum RootCertStoreLoadError {
#[error(
"Unknown certificate store \"{0}\" specified (allowed: \"system,mozilla\")"
)]
UnknownStore(String),
#[error("Unable to add pem file to certificate store: {0}")]
FailedAddPemFile(String),
#[error("Failed opening CA file: {0}")]
CaFileOpenError(String),
#[error("Failed to load platform certificates: {0}")]
FailedNativeCerts(String),
}
/// Create and populate a root cert store based on the passed options and
/// environment.
pub fn get_root_cert_store(
maybe_root_path: Option<PathBuf>,
maybe_ca_stores: Option<Vec<String>>,
maybe_ca_data: Option<CaData>,
) -> Result<RootCertStore, RootCertStoreLoadError> {
let mut root_cert_store = RootCertStore::empty();
let ca_stores: Vec<String> = maybe_ca_stores
.or_else(|| {
let env_ca_store = std::env::var("DENO_TLS_CA_STORE").ok()?;
Some(
env_ca_store
.split(',')
.map(|s| s.trim().to_string())
.filter(|s| !s.is_empty())
.collect(),
)
})
.unwrap_or_else(|| vec!["mozilla".to_string()]);
for store in ca_stores.iter() {
match store.as_str() {
"mozilla" => {
root_cert_store.extend(webpki_roots::TLS_SERVER_ROOTS.to_vec());
}
"system" => {
let roots = load_native_certs().map_err(|err| {
RootCertStoreLoadError::FailedNativeCerts(err.to_string())
})?;
for root in roots {
if let Err(err) = root_cert_store
.add(rustls::pki_types::CertificateDer::from(root.0.clone()))
{
log::error!(
"{}",
colors::yellow(&format!(
"Unable to add system certificate to certificate store: {:?}",
err
))
);
let hex_encoded_root = faster_hex::hex_string(&root.0);
log::error!("{}", colors::gray(&hex_encoded_root));
}
}
}
_ => {
return Err(RootCertStoreLoadError::UnknownStore(store.clone()));
}
}
}
let ca_data = maybe_ca_data
.or_else(|| std::env::var("DENO_CERT").ok().and_then(CaData::parse));
if let Some(ca_data) = ca_data {
let result = match ca_data {
CaData::File(ca_file) => {
let ca_file = if let Some(root) = &maybe_root_path {
root.join(&ca_file)
} else {
PathBuf::from(ca_file)
};
let certfile = std::fs::File::open(ca_file).map_err(|err| {
RootCertStoreLoadError::CaFileOpenError(err.to_string())
})?;
let mut reader = BufReader::new(certfile);
rustls_pemfile::certs(&mut reader).collect::<Result<Vec<_>, _>>()
}
CaData::Bytes(data) => {
let mut reader = BufReader::new(Cursor::new(data));
rustls_pemfile::certs(&mut reader).collect::<Result<Vec<_>, _>>()
}
};
match result {
Ok(certs) => {
root_cert_store.add_parsable_certificates(certs);
}
Err(e) => {
return Err(RootCertStoreLoadError::FailedAddPemFile(e.to_string()));
}
}
}
Ok(root_cert_store)
}
pub fn npm_process_state(
sys: &impl NpmProcessStateFromEnvVarSys,
) -> Option<&'static NpmProcessState> {
static NPM_PROCESS_STATE: std::sync::OnceLock<Option<NpmProcessState>> =
std::sync::OnceLock::new();
NPM_PROCESS_STATE
.get_or_init(|| {
use deno_runtime::deno_process::NPM_RESOLUTION_STATE_FD_ENV_VAR_NAME;
let fd_or_path = std::env::var_os(NPM_RESOLUTION_STATE_FD_ENV_VAR_NAME)?;
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
std::env::remove_var(NPM_RESOLUTION_STATE_FD_ENV_VAR_NAME)
};
if fd_or_path.is_empty() {
return None;
}
NpmProcessState::from_env_var(sys, fd_or_path)
.inspect_err(|e| {
log::error!("failed to resolve npm process state: {}", e);
})
.ok()
})
.as_ref()
}
pub fn resolve_npm_resolution_snapshot(
sys: &impl NpmProcessStateFromEnvVarSys,
) -> Result<Option<ValidSerializedNpmResolutionSnapshot>, PackageIdNotFoundError>
{
if let Some(NpmProcessStateKind::Snapshot(snapshot)) =
npm_process_state(sys).map(|s| &s.kind)
{
// TODO(bartlomieju): remove this clone
Ok(Some(snapshot.clone().into_valid()?))
} else {
Ok(None)
}
}
#[derive(Clone, Default, Debug, Eq, PartialEq, Serialize, Deserialize)]
pub struct UnstableConfig {
// TODO(bartlomieju): remove in Deno 2.5
pub legacy_flag_enabled: bool, // --unstable
pub bare_node_builtins: bool,
pub detect_cjs: bool,
pub lazy_dynamic_imports: bool,
pub raw_imports: bool,
pub sloppy_imports: bool,
pub npm_lazy_caching: bool,
pub tsgo: bool,
pub features: Vec<String>, // --unstabe-kv --unstable-cron
}
impl UnstableConfig {
pub fn fill_with_env(&mut self) {
fn maybe_set(value: &mut bool, var_name: &str) {
if !*value && has_flag_env_var(var_name) {
*value = true;
}
}
maybe_set(
&mut self.bare_node_builtins,
UNSTABLE_ENV_VAR_NAMES.bare_node_builtins,
);
maybe_set(
&mut self.lazy_dynamic_imports,
UNSTABLE_ENV_VAR_NAMES.lazy_dynamic_imports,
);
maybe_set(
&mut self.npm_lazy_caching,
UNSTABLE_ENV_VAR_NAMES.npm_lazy_caching,
);
maybe_set(&mut self.tsgo, UNSTABLE_ENV_VAR_NAMES.tsgo);
maybe_set(&mut self.raw_imports, UNSTABLE_ENV_VAR_NAMES.raw_imports);
maybe_set(
&mut self.sloppy_imports,
UNSTABLE_ENV_VAR_NAMES.sloppy_imports,
);
}
pub fn enable_node_compat(&mut self) {
self.bare_node_builtins = true;
self.sloppy_imports = true;
self.detect_cjs = true;
if !self.features.iter().any(|f| f == "node-globals") {
self.features.push("node-globals".to_string());
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/build.rs | cli/lib/build.rs | // Copyright 2018-2025 the Deno authors. MIT license.
fn main() {
// todo(dsherret): remove this after Deno 242.0 is published and then
// align the version of this crate with Deno then. We need to wait because
// there was previously a deno_lib 2.4.0 published (https://crates.io/crates/deno_lib/versions)
let version_path = std::path::Path::new(".").join("version.txt");
println!("cargo:rerun-if-changed={}", version_path.display());
#[allow(clippy::disallowed_methods)]
let text = std::fs::read_to_string(version_path).unwrap();
println!("cargo:rustc-env=DENO_VERSION={}", text);
let commit_hash = git_commit_hash();
println!("cargo:rustc-env=GIT_COMMIT_HASH={}", commit_hash);
println!("cargo:rerun-if-env-changed=GIT_COMMIT_HASH");
println!(
"cargo:rustc-env=GIT_COMMIT_HASH_SHORT={}",
&commit_hash[..7]
);
}
fn git_commit_hash() -> String {
if let Ok(output) = std::process::Command::new("git")
.arg("rev-list")
.arg("-1")
.arg("HEAD")
.output()
{
if output.status.success() {
std::str::from_utf8(&output.stdout[..40])
.unwrap()
.to_string()
} else {
// When not in git repository
// (e.g. when the user install by `cargo install deno`)
"UNKNOWN".to_string()
}
} else {
// When there is no git command for some reason
"UNKNOWN".to_string()
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/worker.rs | cli/lib/worker.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use deno_bundle_runtime::BundleProvider;
use deno_core::error::JsError;
use deno_node::NodeRequireLoaderRc;
use deno_node::ops::ipc::ChildIpcSerialization;
use deno_path_util::url_from_file_path;
use deno_path_util::url_to_file_path;
use deno_resolver::npm::DenoInNpmPackageChecker;
use deno_resolver::npm::NpmResolver;
use deno_runtime::BootstrapOptions;
use deno_runtime::FeatureChecker;
use deno_runtime::UNSTABLE_FEATURES;
use deno_runtime::WorkerExecutionMode;
use deno_runtime::WorkerLogLevel;
use deno_runtime::colors;
use deno_runtime::deno_core;
use deno_runtime::deno_core::CompiledWasmModuleStore;
use deno_runtime::deno_core::Extension;
use deno_runtime::deno_core::JsRuntime;
use deno_runtime::deno_core::LocalInspectorSession;
use deno_runtime::deno_core::ModuleLoader;
use deno_runtime::deno_core::SharedArrayBufferStore;
use deno_runtime::deno_core::error::CoreError;
use deno_runtime::deno_core::v8;
use deno_runtime::deno_fs;
use deno_runtime::deno_napi::DenoRtNativeAddonLoaderRc;
use deno_runtime::deno_node::NodeExtInitServices;
use deno_runtime::deno_node::NodeRequireLoader;
use deno_runtime::deno_node::NodeResolver;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_runtime::deno_process::NpmProcessStateProviderRc;
use deno_runtime::deno_telemetry::OtelConfig;
use deno_runtime::deno_tls::RootCertStoreProvider;
use deno_runtime::deno_web::BlobStore;
use deno_runtime::deno_web::InMemoryBroadcastChannel;
use deno_runtime::fmt_errors::format_js_error;
use deno_runtime::inspector_server::InspectorServer;
use deno_runtime::inspector_server::MainInspectorSessionChannel;
use deno_runtime::ops::worker_host::CreateWebWorkerCb;
use deno_runtime::web_worker::WebWorker;
use deno_runtime::web_worker::WebWorkerOptions;
use deno_runtime::web_worker::WebWorkerServiceOptions;
use deno_runtime::worker::MainWorker;
use deno_runtime::worker::WorkerOptions;
use deno_runtime::worker::WorkerServiceOptions;
use node_resolver::UrlOrPath;
use node_resolver::errors::ResolvePkgJsonBinExportError;
use url::Url;
use crate::args::has_trace_permissions_enabled;
use crate::sys::DenoLibSys;
use crate::util::checksum;
pub struct CreateModuleLoaderResult {
pub module_loader: Rc<dyn ModuleLoader>,
pub node_require_loader: Rc<dyn NodeRequireLoader>,
}
pub trait ModuleLoaderFactory: Send + Sync {
fn create_for_main(
&self,
root_permissions: PermissionsContainer,
) -> CreateModuleLoaderResult;
fn create_for_worker(
&self,
parent_permissions: PermissionsContainer,
permissions: PermissionsContainer,
) -> CreateModuleLoaderResult;
}
enum StorageKeyResolverStrategy {
Specified(Option<String>),
UseMainModule,
}
pub struct StorageKeyResolver(StorageKeyResolverStrategy);
impl StorageKeyResolver {
pub fn from_flag(location: &Url) -> Self {
// if a location is set, then the ascii serialization of the location is
// used, unless the origin is opaque, and then no storage origin is set, as
// we can't expect the origin to be reproducible
let storage_origin = location.origin();
Self(StorageKeyResolverStrategy::Specified(
if storage_origin.is_tuple() {
Some(storage_origin.ascii_serialization())
} else {
None
},
))
}
pub fn from_config_file_url(url: &Url) -> Self {
Self(StorageKeyResolverStrategy::Specified(Some(url.to_string())))
}
pub fn new_use_main_module() -> Self {
Self(StorageKeyResolverStrategy::UseMainModule)
}
/// Creates a storage key resolver that will always resolve to being empty.
pub fn empty() -> Self {
Self(StorageKeyResolverStrategy::Specified(None))
}
/// Resolves the storage key to use based on the current flags, config, or main module.
pub fn resolve_storage_key(&self, main_module: &Url) -> Option<String> {
// use the stored value or fall back to using the path of the main module.
match &self.0 {
StorageKeyResolverStrategy::Specified(value) => value.clone(),
StorageKeyResolverStrategy::UseMainModule => {
Some(main_module.to_string())
}
}
}
}
pub fn get_cache_storage_dir() -> PathBuf {
// ok because this won't ever be used by the js runtime
#[allow(clippy::disallowed_methods)]
// Note: we currently use temp_dir() to avoid managing storage size.
std::env::temp_dir().join("deno_cache")
}
/// By default V8 uses 1.4Gb heap limit which is meant for browser tabs.
/// Instead probe for the total memory on the system and use it instead
/// as a default. In case the platform is Linux and `DENO_USE_CGROUPS` is set,
/// parse cgroup config to get the cgroup-constrained memory limit.
pub fn create_isolate_create_params<TSys: DenoLibSys>(
// This is used only in Linux to get cgroup-constrained memory limit.
#[allow(unused_variables)] sys: &TSys,
) -> Option<v8::CreateParams> {
#[cfg(any(target_os = "android", target_os = "linux"))]
{
linux::get_memory_limit(sys).map(|memory_limit| {
v8::CreateParams::default()
.heap_limits_from_system_memory(memory_limit, 0)
})
}
#[cfg(not(any(target_os = "android", target_os = "linux")))]
{
let maybe_mem_info = deno_runtime::deno_os::sys_info::mem_info();
maybe_mem_info.map(|mem_info| {
v8::CreateParams::default()
.heap_limits_from_system_memory(mem_info.total, 0)
})
}
}
#[cfg(any(target_os = "android", target_os = "linux"))]
mod linux {
/// Get memory limit with cgroup (either v1 or v2) taken into account.
pub(super) fn get_memory_limit<TSys: crate::sys::DenoLibSys>(
sys: &TSys,
) -> Option<u64> {
let system_total_memory = deno_runtime::deno_os::sys_info::mem_info()
.map(|mem_info| mem_info.total);
// For performance, parse cgroup config only when DENO_USE_CGROUPS is set
if std::env::var("DENO_USE_CGROUPS").is_err() {
return system_total_memory;
}
let Ok(self_cgroup) = sys.fs_read_to_string("/proc/self/cgroup") else {
return system_total_memory;
};
let limit = match parse_self_cgroup(&self_cgroup) {
CgroupVersion::V1 { cgroup_relpath } => {
let limit_path = std::path::Path::new("/sys/fs/cgroup/memory")
.join(cgroup_relpath)
.join("memory.limit_in_bytes");
sys
.fs_read_to_string(limit_path)
.ok()
.and_then(|s| s.trim().parse::<u64>().ok())
}
CgroupVersion::V2 { cgroup_relpath } => {
let limit_path = std::path::Path::new("/sys/fs/cgroup")
.join(cgroup_relpath)
.join("memory.max");
sys
.fs_read_to_string(limit_path)
.ok()
.and_then(|s| s.trim().parse::<u64>().ok())
}
CgroupVersion::None => system_total_memory,
};
limit.or(system_total_memory)
}
enum CgroupVersion<'a> {
V1 { cgroup_relpath: &'a str },
V2 { cgroup_relpath: &'a str },
None,
}
fn parse_self_cgroup(self_cgroup_content: &str) -> CgroupVersion<'_> {
// Initialize the cgroup version as None. This will be updated based on the parsed lines.
let mut cgroup_version = CgroupVersion::None;
// Iterate through each line in the cgroup content. Each line represents a cgroup entry.
for line in self_cgroup_content.lines() {
// Split the line into parts using ":" as the delimiter. The format is typically:
// "<hierarchy_id>:<subsystems>:<cgroup_path>"
let split = line.split(":").collect::<Vec<_>>();
match &split[..] {
// If the line specifies "memory" as the subsystem, it indicates cgroup v1 is used
// for memory management. Extract the relative path and update the cgroup version.
[_, "memory", cgroup_v1_relpath] => {
cgroup_version = CgroupVersion::V1 {
cgroup_relpath: cgroup_v1_relpath
.strip_prefix("/")
.unwrap_or(cgroup_v1_relpath),
};
// Break early since v1 explicitly manages memory, and no further checks are needed.
break;
}
// If the line starts with "0::", it indicates cgroup v2 is used. However, in hybrid
// mode, memory might still be managed by v1. Continue checking other lines to confirm.
["0", "", cgroup_v2_relpath] => {
cgroup_version = CgroupVersion::V2 {
cgroup_relpath: cgroup_v2_relpath
.strip_prefix("/")
.unwrap_or(cgroup_v2_relpath),
};
}
_ => {}
}
}
cgroup_version
}
#[test]
fn test_parse_self_cgroup_v2() {
let self_cgroup = "0::/user.slice/user-1000.slice/session-3.scope";
let cgroup_version = parse_self_cgroup(self_cgroup);
assert!(matches!(
cgroup_version,
CgroupVersion::V2 { cgroup_relpath } if cgroup_relpath == "user.slice/user-1000.slice/session-3.scope"
));
}
#[test]
fn test_parse_self_cgroup_hybrid() {
let self_cgroup = r#"12:rdma:/
11:blkio:/user.slice
10:devices:/user.slice
9:cpu,cpuacct:/user.slice
8:pids:/user.slice/user-1000.slice/session-3.scope
7:memory:/user.slice/user-1000.slice/session-3.scope
6:perf_event:/
5:freezer:/
4:net_cls,net_prio:/
3:hugetlb:/
2:cpuset:/
1:name=systemd:/user.slice/user-1000.slice/session-3.scope
0::/user.slice/user-1000.slice/session-3.scope
"#;
let cgroup_version = parse_self_cgroup(self_cgroup);
assert!(matches!(
cgroup_version,
CgroupVersion::V1 { cgroup_relpath } if cgroup_relpath == "user.slice/user-1000.slice/session-3.scope"
));
}
#[test]
fn test_parse_self_cgroup_v1() {
let self_cgroup = r#"11:hugetlb:/
10:pids:/user.slice/user-1000.slice
9:perf_event:/
8:devices:/user.slice
7:net_cls,net_prio:/
6:memory:/
5:blkio:/
4:cpuset:/
3:cpu,cpuacct:/
2:freezer:/
1:name=systemd:/user.slice/user-1000.slice/session-2.scope
"#;
let cgroup_version = parse_self_cgroup(self_cgroup);
assert!(matches!(
cgroup_version,
CgroupVersion::V1 { cgroup_relpath } if cgroup_relpath.is_empty()
));
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ResolveNpmBinaryEntrypointError {
#[class(inherit)]
#[error(transparent)]
PathToUrl(#[from] deno_path_util::PathToUrlError),
#[class(inherit)]
#[error(transparent)]
ResolvePkgJsonBinExport(ResolvePkgJsonBinExportError),
#[class(generic)]
#[error("{original:#}\n\nFallback failed: {fallback:#}")]
Fallback {
fallback: ResolveNpmBinaryEntrypointFallbackError,
original: ResolvePkgJsonBinExportError,
},
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ResolveNpmBinaryEntrypointFallbackError {
#[class(inherit)]
#[error(transparent)]
PackageSubpathResolve(
node_resolver::errors::PackageSubpathFromDenoModuleResolveError,
),
#[class(generic)]
#[error("Cannot find module '{0}'")]
ModuleNotFound(UrlOrPath),
}
pub struct LibMainWorkerOptions {
pub argv: Vec<String>,
pub log_level: WorkerLogLevel,
pub enable_op_summary_metrics: bool,
pub enable_raw_imports: bool,
pub enable_testing_features: bool,
pub has_node_modules_dir: bool,
pub inspect_brk: bool,
pub inspect_wait: bool,
pub trace_ops: Option<Vec<String>>,
pub is_inspecting: bool,
/// If this is a `deno compile`-ed executable.
pub is_standalone: bool,
// If the runtime should try to use `export default { fetch }`
pub auto_serve: bool,
pub location: Option<Url>,
pub argv0: Option<String>,
pub node_debug: Option<String>,
pub otel_config: OtelConfig,
pub origin_data_folder_path: Option<PathBuf>,
pub seed: Option<u64>,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub skip_op_registration: bool,
pub node_ipc_init: Option<(i64, ChildIpcSerialization)>,
pub no_legacy_abort: bool,
pub startup_snapshot: Option<&'static [u8]>,
pub serve_port: Option<u16>,
pub serve_host: Option<String>,
pub maybe_initial_cwd: Option<Url>,
}
#[derive(Default, Clone)]
pub struct LibWorkerFactoryRoots {
pub compiled_wasm_module_store: CompiledWasmModuleStore,
pub shared_array_buffer_store: SharedArrayBufferStore,
}
struct LibWorkerFactorySharedState<TSys: DenoLibSys> {
blob_store: Arc<BlobStore>,
broadcast_channel: InMemoryBroadcastChannel,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
compiled_wasm_module_store: CompiledWasmModuleStore,
deno_rt_native_addon_loader: Option<DenoRtNativeAddonLoaderRc>,
feature_checker: Arc<FeatureChecker>,
fs: Arc<dyn deno_fs::FileSystem>,
maybe_coverage_dir: Option<PathBuf>,
maybe_inspector_server: Option<Arc<InspectorServer>>,
main_inspector_session_tx: MainInspectorSessionChannel,
module_loader_factory: Box<dyn ModuleLoaderFactory>,
node_resolver:
Arc<NodeResolver<DenoInNpmPackageChecker, NpmResolver<TSys>, TSys>>,
npm_process_state_provider: NpmProcessStateProviderRc,
pkg_json_resolver: Arc<node_resolver::PackageJsonResolver<TSys>>,
root_cert_store_provider: Arc<dyn RootCertStoreProvider>,
shared_array_buffer_store: SharedArrayBufferStore,
storage_key_resolver: StorageKeyResolver,
sys: TSys,
options: LibMainWorkerOptions,
bundle_provider: Option<Arc<dyn BundleProvider>>,
}
impl<TSys: DenoLibSys> LibWorkerFactorySharedState<TSys> {
fn resolve_unstable_features(
&self,
feature_checker: &FeatureChecker,
) -> Vec<i32> {
let mut unstable_features = Vec::with_capacity(UNSTABLE_FEATURES.len());
for feature in UNSTABLE_FEATURES {
if feature_checker.check(feature.name) {
unstable_features.push(feature.id);
}
}
unstable_features
}
fn create_node_init_services(
&self,
node_require_loader: NodeRequireLoaderRc,
) -> NodeExtInitServices<DenoInNpmPackageChecker, NpmResolver<TSys>, TSys> {
NodeExtInitServices {
node_require_loader,
node_resolver: self.node_resolver.clone(),
pkg_json_resolver: self.pkg_json_resolver.clone(),
sys: self.sys.clone(),
}
}
fn create_web_worker_callback(
self: &Arc<Self>,
stdio: deno_runtime::deno_io::Stdio,
) -> Arc<CreateWebWorkerCb> {
let shared = self.clone();
Arc::new(move |args| {
let maybe_inspector_server = shared.maybe_inspector_server.clone();
let CreateModuleLoaderResult {
module_loader,
node_require_loader,
} = shared.module_loader_factory.create_for_worker(
args.parent_permissions.clone(),
args.permissions.clone(),
);
let create_web_worker_cb =
shared.create_web_worker_callback(stdio.clone());
let maybe_storage_key = shared
.storage_key_resolver
.resolve_storage_key(&args.main_module);
let cache_storage_dir = maybe_storage_key.map(|key| {
// TODO(@satyarohith): storage quota management
get_cache_storage_dir().join(checksum::r#gen(&[key.as_bytes()]))
});
// TODO(bartlomieju): this is cruft, update FeatureChecker to spit out
// list of enabled features.
let feature_checker = shared.feature_checker.clone();
let unstable_features =
shared.resolve_unstable_features(feature_checker.as_ref());
let services = WebWorkerServiceOptions {
deno_rt_native_addon_loader: shared.deno_rt_native_addon_loader.clone(),
root_cert_store_provider: Some(shared.root_cert_store_provider.clone()),
module_loader,
fs: shared.fs.clone(),
node_services: Some(
shared.create_node_init_services(node_require_loader),
),
blob_store: shared.blob_store.clone(),
broadcast_channel: shared.broadcast_channel.clone(),
shared_array_buffer_store: Some(
shared.shared_array_buffer_store.clone(),
),
compiled_wasm_module_store: Some(
shared.compiled_wasm_module_store.clone(),
),
maybe_inspector_server,
main_inspector_session_tx: shared.main_inspector_session_tx.clone(),
feature_checker,
npm_process_state_provider: Some(
shared.npm_process_state_provider.clone(),
),
permissions: args.permissions,
bundle_provider: shared.bundle_provider.clone(),
};
let maybe_initial_cwd = shared.options.maybe_initial_cwd.clone();
let options = WebWorkerOptions {
name: args.name,
main_module: args.main_module.clone(),
worker_id: args.worker_id,
bootstrap: BootstrapOptions {
deno_version: crate::version::DENO_VERSION_INFO.deno.to_string(),
args: shared.options.argv.clone(),
cpu_count: std::thread::available_parallelism()
.map(|p| p.get())
.unwrap_or(1),
log_level: shared.options.log_level,
enable_op_summary_metrics: shared.options.enable_op_summary_metrics,
enable_testing_features: shared.options.enable_testing_features,
locale: deno_core::v8::icu::get_language_tag(),
location: Some(args.main_module),
color_level: colors::get_color_level(),
unstable_features,
user_agent: crate::version::DENO_VERSION_INFO.user_agent.to_string(),
inspect: shared.options.is_inspecting,
is_standalone: shared.options.is_standalone,
auto_serve: shared.options.auto_serve,
has_node_modules_dir: shared.options.has_node_modules_dir,
argv0: shared.options.argv0.clone(),
node_debug: shared.options.node_debug.clone(),
node_ipc_init: None,
mode: WorkerExecutionMode::Worker,
serve_port: shared.options.serve_port,
serve_host: shared.options.serve_host.clone(),
otel_config: shared.options.otel_config.clone(),
no_legacy_abort: shared.options.no_legacy_abort,
close_on_idle: args.close_on_idle,
},
extensions: vec![],
startup_snapshot: shared.options.startup_snapshot,
create_params: create_isolate_create_params(&shared.sys),
unsafely_ignore_certificate_errors: shared
.options
.unsafely_ignore_certificate_errors
.clone(),
seed: shared.options.seed,
create_web_worker_cb,
format_js_error_fn: Some(Arc::new(move |a| {
format_js_error(a, maybe_initial_cwd.as_ref())
})),
worker_type: args.worker_type,
stdio: stdio.clone(),
cache_storage_dir,
trace_ops: shared.options.trace_ops.clone(),
close_on_idle: args.close_on_idle,
maybe_worker_metadata: args.maybe_worker_metadata,
maybe_coverage_dir: shared.maybe_coverage_dir.clone(),
enable_raw_imports: shared.options.enable_raw_imports,
enable_stack_trace_arg_in_ops: has_trace_permissions_enabled(),
};
WebWorker::bootstrap_from_options(services, options)
})
}
}
pub struct LibMainWorkerFactory<TSys: DenoLibSys> {
shared: Arc<LibWorkerFactorySharedState<TSys>>,
}
impl<TSys: DenoLibSys> LibMainWorkerFactory<TSys> {
#[allow(clippy::too_many_arguments)]
pub fn new(
blob_store: Arc<BlobStore>,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
deno_rt_native_addon_loader: Option<DenoRtNativeAddonLoaderRc>,
feature_checker: Arc<FeatureChecker>,
fs: Arc<dyn deno_fs::FileSystem>,
maybe_coverage_dir: Option<PathBuf>,
maybe_inspector_server: Option<Arc<InspectorServer>>,
module_loader_factory: Box<dyn ModuleLoaderFactory>,
node_resolver: Arc<
NodeResolver<DenoInNpmPackageChecker, NpmResolver<TSys>, TSys>,
>,
npm_process_state_provider: NpmProcessStateProviderRc,
pkg_json_resolver: Arc<node_resolver::PackageJsonResolver<TSys>>,
root_cert_store_provider: Arc<dyn RootCertStoreProvider>,
storage_key_resolver: StorageKeyResolver,
sys: TSys,
options: LibMainWorkerOptions,
roots: LibWorkerFactoryRoots,
bundle_provider: Option<Arc<dyn BundleProvider>>,
) -> Self {
Self {
shared: Arc::new(LibWorkerFactorySharedState {
blob_store,
broadcast_channel: Default::default(),
code_cache,
compiled_wasm_module_store: roots.compiled_wasm_module_store,
deno_rt_native_addon_loader,
feature_checker,
fs,
maybe_coverage_dir,
maybe_inspector_server,
main_inspector_session_tx: MainInspectorSessionChannel::new(),
module_loader_factory,
node_resolver,
npm_process_state_provider,
pkg_json_resolver,
root_cert_store_provider,
shared_array_buffer_store: roots.shared_array_buffer_store,
storage_key_resolver,
sys,
options,
bundle_provider,
}),
}
}
#[allow(clippy::result_large_err)]
pub fn create_main_worker(
&self,
mode: WorkerExecutionMode,
permissions: PermissionsContainer,
main_module: Url,
preload_modules: Vec<Url>,
require_modules: Vec<Url>,
) -> Result<LibMainWorker, CoreError> {
self.create_custom_worker(
mode,
main_module,
preload_modules,
require_modules,
permissions,
vec![],
Default::default(),
None,
)
}
#[allow(clippy::result_large_err)]
#[allow(clippy::too_many_arguments)]
pub fn create_custom_worker(
&self,
mode: WorkerExecutionMode,
main_module: Url,
preload_modules: Vec<Url>,
require_modules: Vec<Url>,
permissions: PermissionsContainer,
custom_extensions: Vec<Extension>,
stdio: deno_runtime::deno_io::Stdio,
unconfigured_runtime: Option<deno_runtime::UnconfiguredRuntime>,
) -> Result<LibMainWorker, CoreError> {
let shared = &self.shared;
let CreateModuleLoaderResult {
module_loader,
node_require_loader,
} = shared
.module_loader_factory
.create_for_main(permissions.clone());
// TODO(bartlomieju): this is cruft, update FeatureChecker to spit out
// list of enabled features.
let feature_checker = shared.feature_checker.clone();
let unstable_features =
shared.resolve_unstable_features(feature_checker.as_ref());
let maybe_storage_key = shared
.storage_key_resolver
.resolve_storage_key(&main_module);
let origin_storage_dir: Option<PathBuf> =
maybe_storage_key.as_ref().map(|key| {
shared
.options
.origin_data_folder_path
.as_ref()
.unwrap() // must be set if storage key resolver returns a value
.join(checksum::r#gen(&[key.as_bytes()]))
});
let cache_storage_dir = maybe_storage_key.map(|key| {
// TODO(@satyarohith): storage quota management
get_cache_storage_dir().join(checksum::r#gen(&[key.as_bytes()]))
});
let services = WorkerServiceOptions {
deno_rt_native_addon_loader: shared.deno_rt_native_addon_loader.clone(),
root_cert_store_provider: Some(shared.root_cert_store_provider.clone()),
module_loader,
fs: shared.fs.clone(),
node_services: Some(
shared.create_node_init_services(node_require_loader),
),
npm_process_state_provider: Some(
shared.npm_process_state_provider.clone(),
),
blob_store: shared.blob_store.clone(),
broadcast_channel: shared.broadcast_channel.clone(),
fetch_dns_resolver: Default::default(),
shared_array_buffer_store: Some(shared.shared_array_buffer_store.clone()),
compiled_wasm_module_store: Some(
shared.compiled_wasm_module_store.clone(),
),
feature_checker,
permissions,
v8_code_cache: shared.code_cache.clone(),
bundle_provider: shared.bundle_provider.clone(),
};
let maybe_initial_cwd = shared.options.maybe_initial_cwd.clone();
let options = WorkerOptions {
bootstrap: BootstrapOptions {
deno_version: crate::version::DENO_VERSION_INFO.deno.to_string(),
args: shared.options.argv.clone(),
cpu_count: std::thread::available_parallelism()
.map(|p| p.get())
.unwrap_or(1),
log_level: shared.options.log_level,
enable_op_summary_metrics: shared.options.enable_op_summary_metrics,
enable_testing_features: shared.options.enable_testing_features,
locale: deno_core::v8::icu::get_language_tag(),
location: shared.options.location.clone(),
color_level: colors::get_color_level(),
unstable_features,
user_agent: crate::version::DENO_VERSION_INFO.user_agent.to_string(),
inspect: shared.options.is_inspecting,
is_standalone: shared.options.is_standalone,
auto_serve: shared.options.auto_serve,
has_node_modules_dir: shared.options.has_node_modules_dir,
argv0: shared.options.argv0.clone(),
node_debug: shared.options.node_debug.clone(),
node_ipc_init: shared.options.node_ipc_init,
mode,
no_legacy_abort: shared.options.no_legacy_abort,
serve_port: shared.options.serve_port,
serve_host: shared.options.serve_host.clone(),
otel_config: shared.options.otel_config.clone(),
close_on_idle: true,
},
extensions: custom_extensions,
startup_snapshot: shared.options.startup_snapshot,
create_params: create_isolate_create_params(&shared.sys),
unsafely_ignore_certificate_errors: shared
.options
.unsafely_ignore_certificate_errors
.clone(),
seed: shared.options.seed,
format_js_error_fn: Some(Arc::new(move |e| {
format_js_error(e, maybe_initial_cwd.as_ref())
})),
create_web_worker_cb: shared.create_web_worker_callback(stdio.clone()),
maybe_inspector_server: shared.maybe_inspector_server.clone(),
should_break_on_first_statement: shared.options.inspect_brk,
should_wait_for_inspector_session: shared.options.inspect_wait,
trace_ops: shared.options.trace_ops.clone(),
cache_storage_dir,
origin_storage_dir,
stdio,
skip_op_registration: shared.options.skip_op_registration,
enable_raw_imports: shared.options.enable_raw_imports,
enable_stack_trace_arg_in_ops: has_trace_permissions_enabled(),
unconfigured_runtime,
};
let mut worker =
MainWorker::bootstrap_from_options(&main_module, services, options);
worker.setup_memory_trim_handler();
// Store the main inspector session sender for worker debugging
let inspector = worker.js_runtime.inspector();
let session_tx = inspector.get_session_sender();
shared.main_inspector_session_tx.set(session_tx);
Ok(LibMainWorker {
main_module,
preload_modules,
require_modules,
worker,
})
}
#[allow(clippy::result_large_err)]
pub fn resolve_npm_binary_entrypoint(
&self,
package_folder: &Path,
sub_path: Option<&str>,
) -> Result<Url, ResolveNpmBinaryEntrypointError> {
match self
.shared
.node_resolver
.resolve_binary_export(package_folder, sub_path)
{
Ok(bin_value) => Ok(url_from_file_path(bin_value.path())?),
Err(original_err) => {
// if the binary entrypoint was not found, fallback to regular node resolution
let result =
self.resolve_binary_entrypoint_fallback(package_folder, sub_path);
match result {
Ok(Some(path)) => Ok(url_from_file_path(&path)?),
Ok(None) => {
Err(ResolveNpmBinaryEntrypointError::ResolvePkgJsonBinExport(
original_err,
))
}
Err(fallback_err) => Err(ResolveNpmBinaryEntrypointError::Fallback {
original: original_err,
fallback: fallback_err,
}),
}
}
}
}
/// resolve the binary entrypoint using regular node resolution
fn resolve_binary_entrypoint_fallback(
&self,
package_folder: &Path,
sub_path: Option<&str>,
) -> Result<Option<PathBuf>, ResolveNpmBinaryEntrypointFallbackError> {
// only fallback if the user specified a sub path
if sub_path.is_none() {
// it's confusing to users if the package doesn't have any binary
// entrypoint and we just execute the main script which will likely
// have blank output, so do not resolve the entrypoint in this case
return Ok(None);
}
let specifier = self
.shared
.node_resolver
.resolve_package_subpath_from_deno_module(
package_folder,
sub_path,
/* referrer */ None,
node_resolver::ResolutionMode::Import,
node_resolver::NodeResolutionKind::Execution,
)
.map_err(
ResolveNpmBinaryEntrypointFallbackError::PackageSubpathResolve,
)?;
let path = match specifier {
UrlOrPath::Url(ref url) => match url_to_file_path(url) {
Ok(path) => path,
Err(_) => {
return Err(ResolveNpmBinaryEntrypointFallbackError::ModuleNotFound(
specifier,
));
}
},
UrlOrPath::Path(path) => path,
};
if self.shared.sys.fs_exists_no_err(&path) {
Ok(Some(path))
} else {
Err(ResolveNpmBinaryEntrypointFallbackError::ModuleNotFound(
UrlOrPath::Path(path),
))
}
}
}
pub struct LibMainWorker {
main_module: Url,
preload_modules: Vec<Url>,
require_modules: Vec<Url>,
worker: MainWorker,
}
impl LibMainWorker {
pub fn into_main_worker(self) -> MainWorker {
self.worker
}
pub fn main_module(&self) -> &Url {
&self.main_module
}
pub fn js_runtime(&mut self) -> &mut JsRuntime {
&mut self.worker.js_runtime
}
#[inline]
pub fn create_inspector_session(
&mut self,
cb: deno_core::InspectorSessionSend,
) -> LocalInspectorSession {
self.worker.create_inspector_session(cb)
}
#[inline]
pub fn dispatch_load_event(&mut self) -> Result<(), Box<JsError>> {
self.worker.dispatch_load_event()
}
#[inline]
pub fn dispatch_beforeunload_event(&mut self) -> Result<bool, Box<JsError>> {
self.worker.dispatch_beforeunload_event()
}
#[inline]
pub fn dispatch_process_beforeexit_event(
&mut self,
) -> Result<bool, Box<JsError>> {
self.worker.dispatch_process_beforeexit_event()
}
#[inline]
pub fn dispatch_unload_event(&mut self) -> Result<(), Box<JsError>> {
self.worker.dispatch_unload_event()
}
#[inline]
pub fn dispatch_process_exit_event(&mut self) -> Result<(), Box<JsError>> {
self.worker.dispatch_process_exit_event()
}
pub async fn execute_main_module(&mut self) -> Result<(), CoreError> {
let id = self.worker.preload_main_module(&self.main_module).await?;
self.worker.evaluate_module(id).await
}
pub async fn execute_side_module(&mut self) -> Result<(), CoreError> {
let id = self.worker.preload_side_module(&self.main_module).await?;
self.worker.evaluate_module(id).await
}
pub async fn execute_preload_modules(&mut self) -> Result<(), CoreError> {
for preload_module_url in self.preload_modules.iter() {
let id = self.worker.preload_side_module(preload_module_url).await?;
self.worker.evaluate_module(id).await?;
self.worker.run_event_loop(false).await?;
}
// Even though we load as ESM here, these files will be forced to be loaded as CJS
// because of checks in get_known_mode_with_is_script
for require_module_url in self.require_modules.iter() {
let id = self.worker.preload_side_module(require_module_url).await?;
self.worker.evaluate_module(id).await?;
self.worker.run_event_loop(false).await?;
}
Ok(())
}
pub async fn run(&mut self) -> Result<i32, CoreError> {
log::debug!("main_module {}", self.main_module);
// Run preload modules first if they were defined
self.execute_preload_modules().await?;
self.execute_main_module().await?;
self.worker.dispatch_load_event()?;
loop {
self
.worker
.run_event_loop(/* wait for inspector */ false)
.await?;
let web_continue = self.worker.dispatch_beforeunload_event()?;
if !web_continue {
let node_continue = self.worker.dispatch_process_beforeexit_event()?;
if !node_continue {
break;
}
}
}
self.worker.dispatch_unload_event()?;
self.worker.dispatch_process_exit_event()?;
Ok(self.worker.exit_code())
}
#[inline]
pub async fn run_event_loop(
&mut self,
wait_for_inspector: bool,
) -> Result<(), CoreError> {
self.worker.run_event_loop(wait_for_inspector).await
}
#[inline]
pub fn exit_code(&self) -> i32 {
self.worker.exit_code()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn storage_key_resolver_test() {
let resolver =
StorageKeyResolver(StorageKeyResolverStrategy::UseMainModule);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/loader.rs | cli/lib/loader.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use deno_media_type::MediaType;
use deno_resolver::loader::LoadedModuleSource;
use deno_runtime::deno_core::FastString;
use deno_runtime::deno_core::ModuleSourceCode;
use deno_runtime::deno_core::ModuleType;
use deno_runtime::deno_core::RequestedModuleType;
pub fn module_type_from_media_and_requested_type(
media_type: MediaType,
requested_module_type: &RequestedModuleType,
) -> ModuleType {
match requested_module_type {
RequestedModuleType::Text => ModuleType::Text,
RequestedModuleType::Bytes => ModuleType::Bytes,
RequestedModuleType::None
| RequestedModuleType::Other(_)
| RequestedModuleType::Json => match media_type {
MediaType::Json => ModuleType::Json,
MediaType::Wasm => ModuleType::Wasm,
_ => ModuleType::JavaScript,
},
}
}
pub fn loaded_module_source_to_module_source_code(
loaded_module_source: LoadedModuleSource,
) -> ModuleSourceCode {
match loaded_module_source {
LoadedModuleSource::ArcStr(text) => ModuleSourceCode::String(text.into()),
LoadedModuleSource::ArcBytes(bytes) => {
ModuleSourceCode::Bytes(bytes.into())
}
LoadedModuleSource::String(text) => match text {
Cow::Borrowed(static_text) => {
ModuleSourceCode::String(FastString::from_static(static_text))
}
Cow::Owned(text) => ModuleSourceCode::String(text.into()),
},
LoadedModuleSource::Bytes(bytes) => match bytes {
Cow::Borrowed(static_bytes) => {
ModuleSourceCode::Bytes(static_bytes.into())
}
Cow::Owned(bytes) => {
ModuleSourceCode::Bytes(bytes.into_boxed_slice().into())
}
},
}
}
pub fn as_deno_resolver_requested_module_type(
value: &RequestedModuleType,
) -> deno_resolver::loader::RequestedModuleType<'_> {
match value {
RequestedModuleType::None => {
deno_resolver::loader::RequestedModuleType::None
}
RequestedModuleType::Json => {
deno_resolver::loader::RequestedModuleType::Json
}
RequestedModuleType::Text => {
deno_resolver::loader::RequestedModuleType::Text
}
RequestedModuleType::Bytes => {
deno_resolver::loader::RequestedModuleType::Bytes
}
RequestedModuleType::Other(text) => {
deno_resolver::loader::RequestedModuleType::Other(text)
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/shared.rs | cli/lib/shared.rs | // Copyright 2018-2025 the Deno authors. MIT license.
/// This module is shared between build script and the binaries. Use it sparsely.
use thiserror::Error;
#[derive(Debug, Error)]
#[error("Unrecognized release channel: {0}")]
pub struct UnrecognizedReleaseChannelError(pub String);
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ReleaseChannel {
/// Stable version, eg. 1.45.4, 2.0.0, 2.1.0
#[allow(unused)]
Stable,
/// Pointing to a git hash
#[allow(unused)]
Canary,
/// Long term support release
#[allow(unused)]
Lts,
/// Release candidate, eg. 1.46.0-rc.0, 2.0.0-rc.1
#[allow(unused)]
Rc,
}
impl ReleaseChannel {
#[allow(unused)]
pub fn name(&self) -> &str {
match self {
Self::Stable => "stable",
Self::Canary => "canary",
Self::Rc => "release candidate",
Self::Lts => "long term support",
}
}
// NOTE(bartlomieju): do not ever change these values, tools like `patchver`
// rely on them.
#[allow(unused)]
pub fn serialize(&self) -> String {
match self {
Self::Stable => "stable",
Self::Canary => "canary",
Self::Rc => "rc",
Self::Lts => "lts",
}
.to_string()
}
// NOTE(bartlomieju): do not ever change these values, tools like `patchver`
// rely on them.
#[allow(unused)]
pub fn deserialize(
str_: &str,
) -> Result<Self, UnrecognizedReleaseChannelError> {
Ok(match str_ {
"stable" => Self::Stable,
"canary" => Self::Canary,
"rc" => Self::Rc,
"lts" => Self::Lts,
unknown => {
return Err(UnrecognizedReleaseChannelError(unknown.to_string()));
}
})
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/sys.rs | cli/lib/sys.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_node::ExtNodeSys;
use sys_traits::FsCanonicalize;
use sys_traits::FsCreateDirAll;
use sys_traits::FsMetadata;
use sys_traits::FsOpen;
use sys_traits::FsRead;
use sys_traits::FsReadDir;
use sys_traits::FsRemoveFile;
use sys_traits::FsRename;
use sys_traits::SystemRandom;
use sys_traits::ThreadSleep;
#[sys_traits::auto_impl]
pub trait DenoLibSys:
FsCanonicalize
+ FsCreateDirAll
+ FsReadDir
+ FsMetadata
+ FsOpen
+ FsRemoveFile
+ FsRename
+ FsRead
+ ThreadSleep
+ SystemRandom
+ ExtNodeSys
+ Clone
+ Send
+ Sync
+ std::fmt::Debug
+ 'static
{
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/logger.rs | cli/lib/util/logger.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::io::Write;
use std::sync::Arc;
use std::sync::OnceLock;
use arc_swap::ArcSwap;
use deno_runtime::deno_telemetry;
use deno_runtime::deno_telemetry::OtelConfig;
use deno_runtime::deno_telemetry::OtelConsoleConfig;
struct CliLoggerInner {
otel_console_config: OtelConsoleConfig,
logger: env_logger::Logger,
}
struct CliLogger {
inner: ArcSwap<CliLoggerInner>,
on_log_start: fn(),
on_log_end: fn(),
}
impl CliLogger {
pub fn filter(&self) -> log::LevelFilter {
self.inner.load().logger.filter()
}
}
impl log::Log for CliLogger {
fn enabled(&self, metadata: &log::Metadata) -> bool {
self.inner.load().logger.enabled(metadata)
}
fn log(&self, record: &log::Record) {
if self.enabled(record.metadata()) {
(self.on_log_start)();
match self.inner.load().otel_console_config {
OtelConsoleConfig::Ignore => {
self.inner.load().logger.log(record);
}
OtelConsoleConfig::Capture => {
self.inner.load().logger.log(record);
deno_telemetry::handle_log(record);
}
OtelConsoleConfig::Replace => {
deno_telemetry::handle_log(record);
}
}
(self.on_log_end)();
}
}
fn flush(&self) {
self.inner.load().logger.flush();
}
}
pub struct InitLoggingOptions {
pub on_log_start: fn(),
pub on_log_end: fn(),
pub maybe_level: Option<log::Level>,
pub otel_config: Option<OtelConfig>,
}
static LOGGER: OnceLock<CliLogger> = OnceLock::new();
pub fn init(options: InitLoggingOptions) {
let log_level = options.maybe_level.unwrap_or(log::Level::Info);
let logger = env_logger::Builder::from_env(
env_logger::Env::new()
// Use `DENO_LOG` and `DENO_LOG_STYLE` instead of `RUST_` prefix
.filter_or("DENO_LOG", log_level.to_level_filter().to_string())
.write_style("DENO_LOG_STYLE"),
)
// https://github.com/denoland/deno/issues/6641
.filter_module("rustyline", log::LevelFilter::Off)
// wgpu crates (gfx_backend), have a lot of useless INFO and WARN logs
.filter_module("wgpu", log::LevelFilter::Error)
.filter_module("gfx", log::LevelFilter::Error)
.filter_module("globset", log::LevelFilter::Error)
// used to make available the lsp_debug which is then filtered out at runtime
// in the cli logger
.filter_module("deno::lsp::performance", log::LevelFilter::Debug)
.filter_module("rustls", log::LevelFilter::Off)
// swc_ecma_codegen's `srcmap!` macro emits error-level spans only on debug
// build:
// https://github.com/swc-project/swc/blob/74d6478be1eb8cdf1df096c360c159db64b64d8a/crates/swc_ecma_codegen/src/macros.rs#L112
// We suppress them here to avoid flooding our CI logs in integration tests.
.filter_module("swc_ecma_codegen", log::LevelFilter::Off)
.filter_module("swc_common::source_map", log::LevelFilter::Off)
.filter_module("swc_ecma_transforms_optimization", log::LevelFilter::Off)
.filter_module("swc_ecma_parser", log::LevelFilter::Error)
.filter_module("swc_ecma_lexer", log::LevelFilter::Error)
// Suppress span lifecycle logs since they are too verbose
.filter_module("tracing::span", log::LevelFilter::Off)
.filter_module("tower_lsp", log::LevelFilter::Trace)
.filter_module("opentelemetry_sdk", log::LevelFilter::Off)
// for deno_compile, this is too verbose
.filter_module("editpe", log::LevelFilter::Error)
// too verbose
.filter_module("cranelift_codegen", log::LevelFilter::Off)
.write_style(if deno_terminal::colors::use_color() {
env_logger::WriteStyle::Always
} else {
env_logger::WriteStyle::Never
})
.format(|buf, record| {
let mut target = record.target().to_string();
if let Some(line_no) = record.line() {
target.push(':');
target.push_str(&line_no.to_string());
}
if record.level() <= log::Level::Info
|| (record.target() == "deno::lsp::performance"
&& record.level() == log::Level::Debug)
{
// Print ERROR, WARN, INFO and lsp_debug logs as they are
writeln!(buf, "{}", record.args())
} else {
// Add prefix to DEBUG or TRACE logs
writeln!(
buf,
"{} RS - {} - {}",
record.level(),
target,
record.args()
)
}
})
.build();
let otel_console_config = options
.otel_config
.map(|c| c.console)
.unwrap_or(OtelConsoleConfig::Ignore);
let cli_logger = LOGGER.get_or_init(move || CliLogger {
on_log_start: options.on_log_start,
on_log_end: options.on_log_end,
inner: ArcSwap::new(Arc::new(CliLoggerInner {
logger: env_logger::Builder::new().build(),
otel_console_config,
})),
});
cli_logger.inner.swap(Arc::new(CliLoggerInner {
logger,
otel_console_config,
}));
let _ = log::set_logger(cli_logger);
log::set_max_level(cli_logger.filter());
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/v8.rs | cli/lib/util/v8.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#[inline(always)]
pub fn construct_v8_flags(
default_v8_flags: &[String],
v8_flags: &[String],
env_v8_flags: Vec<String>,
) -> Vec<String> {
std::iter::once("UNUSED_BUT_NECESSARY_ARG0".to_owned())
.chain(default_v8_flags.iter().cloned())
.chain(env_v8_flags)
.chain(v8_flags.iter().cloned())
.collect::<Vec<_>>()
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/text_encoding.rs | cli/lib/util/text_encoding.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::sync::Arc;
pub fn is_valid_utf8(bytes: &[u8]) -> bool {
matches!(String::from_utf8_lossy(bytes), Cow::Borrowed(_))
}
// todo(https://github.com/rust-lang/rust/issues/129436): remove once stabilized
#[inline(always)]
pub fn from_utf8_lossy_owned(bytes: Vec<u8>) -> String {
match String::from_utf8_lossy(&bytes) {
Cow::Owned(code) => code,
// SAFETY: `String::from_utf8_lossy` guarantees that the result is valid
// UTF-8 if `Cow::Borrowed` is returned.
Cow::Borrowed(_) => unsafe { String::from_utf8_unchecked(bytes) },
}
}
#[inline(always)]
pub fn from_utf8_lossy_cow(bytes: Cow<'_, [u8]>) -> Cow<'_, str> {
match bytes {
Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes),
Cow::Owned(bytes) => Cow::Owned(from_utf8_lossy_owned(bytes)),
}
}
/// Converts an `Arc<str>` to an `Arc<[u8]>`.
#[allow(dead_code)]
pub fn arc_str_to_bytes(arc_str: Arc<str>) -> Arc<[u8]> {
let raw = Arc::into_raw(arc_str);
// SAFETY: This is safe because they have the same memory layout.
unsafe { Arc::from_raw(raw as *const [u8]) }
}
/// Converts an `Arc<u8>` to an `Arc<str>` if able.
#[allow(dead_code)]
pub fn arc_u8_to_arc_str(
arc_u8: Arc<[u8]>,
) -> Result<Arc<str>, std::str::Utf8Error> {
// Check that the string is valid UTF-8.
std::str::from_utf8(&arc_u8)?;
// SAFETY: the string is valid UTF-8, and the layout Arc<[u8]> is the same as
// Arc<str>. This is proven by the From<Arc<str>> impl for Arc<[u8]> from the
// standard library.
Ok(unsafe {
std::mem::transmute::<std::sync::Arc<[u8]>, std::sync::Arc<str>>(arc_u8)
})
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/checksum.rs | cli/lib/util/checksum.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use aws_lc_rs::digest::Context;
use aws_lc_rs::digest::SHA256;
/// Generate a SHA256 checksum of a slice of byte-slice-like things.
pub fn r#gen(v: &[impl AsRef<[u8]>]) -> String {
let mut ctx = Context::new(&SHA256);
for src in v {
ctx.update(src.as_ref());
}
faster_hex::hex_string(ctx.finish().as_ref())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_gen() {
let actual = r#gen(&[b"hello world"]);
assert_eq!(
actual,
"b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/result.rs | cli/lib/util/result.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::convert::Infallible;
use deno_error::JsErrorBox;
use deno_error::JsErrorClass;
use deno_resolver::DenoResolveError;
use deno_resolver::DenoResolveErrorKind;
use deno_runtime::deno_core::error::AnyError;
use deno_runtime::deno_core::error::CoreError;
use deno_runtime::deno_core::error::CoreErrorKind;
pub trait InfallibleResultExt<T> {
fn unwrap_infallible(self) -> T;
}
impl<T> InfallibleResultExt<T> for Result<T, Infallible> {
fn unwrap_infallible(self) -> T {
match self {
Ok(value) => value,
Err(never) => match never {},
}
}
}
pub fn js_error_downcast_ref(
err: &AnyError,
) -> Option<&deno_runtime::deno_core::error::JsError> {
any_and_jserrorbox_downcast_ref(err)
.or_else(|| {
err
.downcast_ref::<CoreError>()
.and_then(|e| match e.as_kind() {
CoreErrorKind::Js(e) => Some(e),
_ => None,
})
})
.map(|v| &**v)
}
pub fn any_and_jserrorbox_downcast_ref<
E: std::error::Error + Send + Sync + 'static,
>(
err: &AnyError,
) -> Option<&E> {
err
.downcast_ref::<E>()
.or_else(|| {
err
.downcast_ref::<JsErrorBox>()
.and_then(|e| e.get_ref().downcast_ref::<E>())
})
.or_else(|| {
err
.downcast_ref::<CoreError>()
.and_then(|e| match e.as_kind() {
CoreErrorKind::JsBox(e) => e.get_ref().downcast_ref::<E>(),
_ => None,
})
})
}
pub fn downcast_ref_deno_resolve_error(
err: &JsErrorBox,
) -> Option<&DenoResolveErrorKind> {
err
.get_ref()
.downcast_ref::<DenoResolveError>()
.map(|e| e.as_kind())
.or_else(|| err.get_ref().downcast_ref::<DenoResolveErrorKind>())
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/mod.rs | cli/lib/util/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub mod checksum;
pub mod hash;
pub mod logger;
pub mod result;
pub mod text_encoding;
pub mod v8;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/util/hash.rs | cli/lib/util/hash.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::hash::Hasher;
/// A very fast insecure hasher that uses the xxHash algorithm.
#[derive(Debug, Clone)]
pub struct FastInsecureHasher(twox_hash::XxHash64);
impl FastInsecureHasher {
pub fn new_without_deno_version() -> Self {
Self(Default::default())
}
pub fn new_deno_versioned() -> Self {
let mut hasher = Self::new_without_deno_version();
hasher.write_str(crate::version::DENO_VERSION_INFO.deno);
hasher
}
pub fn write_str(&mut self, text: &str) -> &mut Self {
self.write(text.as_bytes());
self
}
pub fn write(&mut self, bytes: &[u8]) -> &mut Self {
self.0.write(bytes);
self
}
pub fn write_u8(&mut self, value: u8) -> &mut Self {
self.0.write_u8(value);
self
}
pub fn write_u64(&mut self, value: u64) -> &mut Self {
self.0.write_u64(value);
self
}
pub fn write_hashable(
&mut self,
hashable: impl std::hash::Hash,
) -> &mut Self {
hashable.hash(&mut self.0);
self
}
pub fn finish(&self) -> u64 {
self.0.finish()
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/npm/permission_checker.rs | cli/lib/npm/permission_checker.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use deno_error::JsErrorBox;
use deno_runtime::deno_permissions::OpenAccessKind;
use deno_runtime::deno_permissions::PermissionsContainer;
use parking_lot::Mutex;
use crate::sys::DenoLibSys;
#[derive(Debug)]
pub enum NpmRegistryReadPermissionCheckerMode {
Byonm,
Global(PathBuf),
Local(PathBuf),
}
#[derive(Debug)]
pub struct NpmRegistryReadPermissionChecker<TSys: DenoLibSys> {
sys: TSys,
cache: Mutex<HashMap<PathBuf, PathBuf>>,
mode: NpmRegistryReadPermissionCheckerMode,
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class(inherit)]
#[error("failed canonicalizing '{path}'")]
struct EnsureRegistryReadPermissionError {
path: PathBuf,
#[source]
#[inherit]
source: std::io::Error,
}
impl<TSys: DenoLibSys> NpmRegistryReadPermissionChecker<TSys> {
pub fn new(sys: TSys, mode: NpmRegistryReadPermissionCheckerMode) -> Self {
Self {
sys,
cache: Default::default(),
mode,
}
}
#[must_use = "the resolved return value to mitigate time-of-check to time-of-use issues"]
pub fn ensure_read_permission<'a>(
&self,
permissions: &mut PermissionsContainer,
path: Cow<'a, Path>,
) -> Result<Cow<'a, Path>, JsErrorBox> {
if permissions.query_read_all() {
return Ok(path); // skip permissions checks below
}
match &self.mode {
NpmRegistryReadPermissionCheckerMode::Byonm => {
if path.components().any(|c| c.as_os_str() == "node_modules") {
Ok(path)
} else {
permissions
.check_open(path, OpenAccessKind::Read, None)
.map(|p| p.into_path())
.map_err(JsErrorBox::from_err)
}
}
NpmRegistryReadPermissionCheckerMode::Global(registry_path)
| NpmRegistryReadPermissionCheckerMode::Local(registry_path) => {
// allow reading if it's in the node_modules
let is_path_in_node_modules = path.starts_with(registry_path)
&& path
.components()
.all(|c| !matches!(c, std::path::Component::ParentDir));
if is_path_in_node_modules {
let mut cache = self.cache.lock();
let mut canonicalize =
|path: &Path| -> Result<Option<PathBuf>, JsErrorBox> {
match cache.get(path) {
Some(canon) => Ok(Some(canon.clone())),
None => match self.sys.fs_canonicalize(path) {
Ok(canon) => {
cache.insert(path.to_path_buf(), canon.clone());
Ok(Some(canon))
}
Err(e) => {
if e.kind() == ErrorKind::NotFound {
return Ok(None);
}
Err(JsErrorBox::from_err(
EnsureRegistryReadPermissionError {
path: path.to_path_buf(),
source: e,
},
))
}
},
}
};
if let Some(registry_path_canon) = canonicalize(registry_path)? {
if let Some(path_canon) = canonicalize(&path)? {
if path_canon.starts_with(registry_path_canon) {
return Ok(Cow::Owned(path_canon));
}
} else if path.starts_with(registry_path_canon)
|| path.starts_with(registry_path)
{
return Ok(path);
}
}
}
permissions
.check_open(path, OpenAccessKind::Read, None)
.map(|p| p.into_path())
.map_err(JsErrorBox::from_err)
}
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/npm/mod.rs | cli/lib/npm/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod permission_checker;
use std::sync::Arc;
use deno_npm_installer::process_state::NpmProcessState;
use deno_npm_installer::process_state::NpmProcessStateKind;
use deno_resolver::npm::ByonmNpmResolver;
use deno_resolver::npm::ManagedNpmResolverRc;
use deno_resolver::npm::NpmResolver;
use deno_runtime::deno_process::NpmProcessStateProvider;
use deno_runtime::deno_process::NpmProcessStateProviderRc;
pub use permission_checker::NpmRegistryReadPermissionChecker;
pub use permission_checker::NpmRegistryReadPermissionCheckerMode;
use crate::sys::DenoLibSys;
pub fn create_npm_process_state_provider<TSys: DenoLibSys>(
npm_resolver: &NpmResolver<TSys>,
) -> NpmProcessStateProviderRc {
match npm_resolver {
NpmResolver::Byonm(byonm_npm_resolver) => {
Arc::new(ByonmNpmProcessStateProvider(byonm_npm_resolver.clone()))
}
NpmResolver::Managed(managed_npm_resolver) => {
Arc::new(ManagedNpmProcessStateProvider(managed_npm_resolver.clone()))
}
}
}
#[derive(Debug)]
pub struct ManagedNpmProcessStateProvider<TSys: DenoLibSys>(
pub ManagedNpmResolverRc<TSys>,
);
impl<TSys: DenoLibSys> NpmProcessStateProvider
for ManagedNpmProcessStateProvider<TSys>
{
fn get_npm_process_state(&self) -> String {
NpmProcessState::new_managed(
self.0.resolution().serialized_valid_snapshot(),
self.0.root_node_modules_path(),
)
.as_serialized()
}
}
#[derive(Debug)]
pub struct ByonmNpmProcessStateProvider<TSys: DenoLibSys>(
pub Arc<ByonmNpmResolver<TSys>>,
);
impl<TSys: DenoLibSys> NpmProcessStateProvider
for ByonmNpmProcessStateProvider<TSys>
{
fn get_npm_process_state(&self) -> String {
NpmProcessState {
kind: NpmProcessStateKind::Byonm,
local_node_modules_path: self
.0
.root_node_modules_path()
.map(|p| p.to_string_lossy().into_owned()),
}
.as_serialized()
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/standalone/mod.rs | cli/lib/standalone/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub mod binary;
pub mod virtual_fs;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/standalone/binary.rs | cli/lib/standalone/binary.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::BTreeMap;
use deno_media_type::MediaType;
use deno_resolver::workspace::PackageJsonDepResolution;
use deno_runtime::deno_permissions::PermissionsOptions;
use deno_runtime::deno_telemetry::OtelConfig;
use deno_semver::Version;
use indexmap::IndexMap;
use serde::Deserialize;
use serde::Serialize;
use url::Url;
use super::virtual_fs::FileSystemCaseSensitivity;
use crate::args::UnstableConfig;
pub const MAGIC_BYTES: &[u8; 8] = b"d3n0l4nd";
pub trait DenoRtDeserializable<'a>: Sized {
fn deserialize(input: &'a [u8]) -> std::io::Result<(&'a [u8], Self)>;
}
impl<'a> DenoRtDeserializable<'a> for Cow<'a, [u8]> {
fn deserialize(input: &'a [u8]) -> std::io::Result<(&'a [u8], Self)> {
let (input, data) = read_bytes_with_u32_len(input)?;
Ok((input, Cow::Borrowed(data)))
}
}
pub trait DenoRtSerializable<'a> {
fn serialize(
&'a self,
builder: &mut capacity_builder::BytesBuilder<'a, Vec<u8>>,
);
}
#[derive(Deserialize, Serialize)]
pub enum NodeModules {
Managed {
/// Relative path for the node_modules directory in the vfs.
node_modules_dir: Option<String>,
},
Byonm {
root_node_modules_dir: Option<String>,
},
}
#[derive(Deserialize, Serialize)]
pub struct SerializedWorkspaceResolverImportMap {
pub specifier: String,
pub json: String,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SerializedResolverWorkspaceJsrPackage {
pub relative_base: String,
pub name: String,
pub version: Option<Version>,
pub exports: IndexMap<String, String>,
}
#[derive(Deserialize, Serialize)]
pub struct SerializedWorkspaceResolver {
pub import_map: Option<SerializedWorkspaceResolverImportMap>,
pub jsr_pkgs: Vec<SerializedResolverWorkspaceJsrPackage>,
pub package_jsons: BTreeMap<String, serde_json::Value>,
pub pkg_json_resolution: PackageJsonDepResolution,
}
// Note: Don't use hashmaps/hashsets. Ensure the serialization
// is deterministic.
#[derive(Deserialize, Serialize)]
pub struct Metadata {
pub argv: Vec<String>,
pub seed: Option<u64>,
pub code_cache_key: Option<u64>,
pub permissions: PermissionsOptions,
pub location: Option<Url>,
pub v8_flags: Vec<String>,
pub log_level: Option<log::Level>,
pub ca_stores: Option<Vec<String>>,
pub ca_data: Option<Vec<u8>>,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub env_vars_from_env_file: IndexMap<String, String>,
pub workspace_resolver: SerializedWorkspaceResolver,
pub entrypoint_key: String,
pub preload_modules: Vec<String>,
pub require_modules: Vec<String>,
pub node_modules: Option<NodeModules>,
pub unstable_config: UnstableConfig,
pub otel_config: OtelConfig,
pub vfs_case_sensitivity: FileSystemCaseSensitivity,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub struct SpecifierId(u32);
impl SpecifierId {
pub fn new(id: u32) -> Self {
Self(id)
}
}
impl<'a> capacity_builder::BytesAppendable<'a> for SpecifierId {
fn append_to_builder<TBytes: capacity_builder::BytesType>(
self,
builder: &mut capacity_builder::BytesBuilder<'a, TBytes>,
) {
builder.append_le(self.0);
}
}
impl<'a> DenoRtSerializable<'a> for SpecifierId {
fn serialize(
&'a self,
builder: &mut capacity_builder::BytesBuilder<'a, Vec<u8>>,
) {
builder.append_le(self.0);
}
}
impl<'a> DenoRtDeserializable<'a> for SpecifierId {
fn deserialize(input: &'a [u8]) -> std::io::Result<(&'a [u8], Self)> {
let (input, id) = read_u32(input)?;
Ok((input, Self(id)))
}
}
#[derive(Deserialize, Serialize)]
pub enum CjsExportAnalysisEntry {
Esm,
Cjs(Vec<String>),
Error(String),
}
const HAS_TRANSPILED_FLAG: u8 = 1 << 0;
const HAS_SOURCE_MAP_FLAG: u8 = 1 << 1;
const HAS_CJS_EXPORT_ANALYSIS_FLAG: u8 = 1 << 2;
const HAS_VALID_UTF8_FLAG: u8 = 1 << 3;
pub struct RemoteModuleEntry<'a> {
pub media_type: MediaType,
pub is_valid_utf8: bool,
pub data: Cow<'a, [u8]>,
pub maybe_transpiled: Option<Cow<'a, [u8]>>,
pub maybe_source_map: Option<Cow<'a, [u8]>>,
pub maybe_cjs_export_analysis: Option<Cow<'a, [u8]>>,
}
impl<'a> DenoRtSerializable<'a> for RemoteModuleEntry<'a> {
fn serialize(
&'a self,
builder: &mut capacity_builder::BytesBuilder<'a, Vec<u8>>,
) {
fn append_maybe_data<'a>(
builder: &mut capacity_builder::BytesBuilder<'a, Vec<u8>>,
maybe_data: Option<&'a [u8]>,
) {
if let Some(data) = maybe_data {
builder.append_le(data.len() as u32);
builder.append(data);
}
}
let mut has_data_flags = 0;
if self.is_valid_utf8 {
has_data_flags |= HAS_VALID_UTF8_FLAG;
}
if self.maybe_transpiled.is_some() {
has_data_flags |= HAS_TRANSPILED_FLAG;
}
if self.maybe_source_map.is_some() {
has_data_flags |= HAS_SOURCE_MAP_FLAG;
}
if self.maybe_cjs_export_analysis.is_some() {
has_data_flags |= HAS_CJS_EXPORT_ANALYSIS_FLAG;
}
builder.append(serialize_media_type(self.media_type));
builder.append_le(self.data.len() as u32);
builder.append(self.data.as_ref());
builder.append(has_data_flags);
append_maybe_data(builder, self.maybe_transpiled.as_deref());
append_maybe_data(builder, self.maybe_source_map.as_deref());
append_maybe_data(builder, self.maybe_cjs_export_analysis.as_deref());
}
}
impl<'a> DenoRtDeserializable<'a> for RemoteModuleEntry<'a> {
fn deserialize(input: &'a [u8]) -> std::io::Result<(&'a [u8], Self)> {
#[allow(clippy::type_complexity)]
fn deserialize_data_if_has_flag(
input: &[u8],
has_data_flags: u8,
flag: u8,
) -> std::io::Result<(&[u8], Option<Cow<'_, [u8]>>)> {
if has_data_flags & flag != 0 {
let (input, bytes) = read_bytes_with_u32_len(input)?;
Ok((input, Some(Cow::Borrowed(bytes))))
} else {
Ok((input, None))
}
}
let (input, media_type) = MediaType::deserialize(input)?;
let (input, data) = read_bytes_with_u32_len(input)?;
let (input, has_data_flags) = read_u8(input)?;
let (input, maybe_transpiled) =
deserialize_data_if_has_flag(input, has_data_flags, HAS_TRANSPILED_FLAG)?;
let (input, maybe_source_map) =
deserialize_data_if_has_flag(input, has_data_flags, HAS_SOURCE_MAP_FLAG)?;
let is_valid_utf8 = has_data_flags & HAS_VALID_UTF8_FLAG != 0;
let (input, maybe_cjs_export_analysis) = deserialize_data_if_has_flag(
input,
has_data_flags,
HAS_CJS_EXPORT_ANALYSIS_FLAG,
)?;
Ok((
input,
Self {
media_type,
data: Cow::Borrowed(data),
is_valid_utf8,
maybe_transpiled,
maybe_source_map,
maybe_cjs_export_analysis,
},
))
}
}
fn serialize_media_type(media_type: MediaType) -> u8 {
match media_type {
MediaType::JavaScript => 0,
MediaType::Jsx => 1,
MediaType::Mjs => 2,
MediaType::Cjs => 3,
MediaType::TypeScript => 4,
MediaType::Mts => 5,
MediaType::Cts => 6,
MediaType::Dts => 7,
MediaType::Dmts => 8,
MediaType::Dcts => 9,
MediaType::Tsx => 10,
MediaType::Json => 11,
MediaType::Jsonc => 12,
MediaType::Json5 => 13,
MediaType::Wasm => 14,
MediaType::Css => 15,
MediaType::Html => 16,
MediaType::SourceMap => 17,
MediaType::Sql => 18,
MediaType::Unknown => 19,
}
}
impl<'a> DenoRtDeserializable<'a> for MediaType {
fn deserialize(input: &'a [u8]) -> std::io::Result<(&'a [u8], Self)> {
let (input, value) = read_u8(input)?;
let value = match value {
0 => MediaType::JavaScript,
1 => MediaType::Jsx,
2 => MediaType::Mjs,
3 => MediaType::Cjs,
4 => MediaType::TypeScript,
5 => MediaType::Mts,
6 => MediaType::Cts,
7 => MediaType::Dts,
8 => MediaType::Dmts,
9 => MediaType::Dcts,
10 => MediaType::Tsx,
11 => MediaType::Json,
12 => MediaType::Jsonc,
13 => MediaType::Json5,
14 => MediaType::Wasm,
15 => MediaType::Css,
16 => MediaType::Html,
17 => MediaType::SourceMap,
18 => MediaType::Sql,
19 => MediaType::Unknown,
value => {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
format!("Unknown media type value: {value}"),
));
}
};
Ok((input, value))
}
}
/// Data stored keyed by specifier.
pub struct SpecifierDataStore<TData> {
data: IndexMap<SpecifierId, TData>,
}
impl<TData> Default for SpecifierDataStore<TData> {
fn default() -> Self {
Self {
data: IndexMap::new(),
}
}
}
impl<TData> SpecifierDataStore<TData> {
pub fn with_capacity(capacity: usize) -> Self {
Self {
data: IndexMap::with_capacity(capacity),
}
}
pub fn iter(&self) -> impl Iterator<Item = (SpecifierId, &TData)> {
self.data.iter().map(|(k, v)| (*k, v))
}
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.data.len()
}
pub fn contains(&self, specifier: SpecifierId) -> bool {
self.data.contains_key(&specifier)
}
pub fn add(&mut self, specifier: SpecifierId, value: TData) {
self.data.insert(specifier, value);
}
pub fn get(&self, specifier: SpecifierId) -> Option<&TData> {
self.data.get(&specifier)
}
}
impl<'a, TData> SpecifierDataStore<TData>
where
TData: DenoRtSerializable<'a> + 'a,
{
pub fn serialize(
&'a self,
builder: &mut capacity_builder::BytesBuilder<'a, Vec<u8>>,
) {
builder.append_le(self.len() as u32);
for (specifier, value) in self.iter() {
builder.append(specifier);
value.serialize(builder);
}
}
}
impl<'a, TData> DenoRtDeserializable<'a> for SpecifierDataStore<TData>
where
TData: DenoRtDeserializable<'a>,
{
fn deserialize(input: &'a [u8]) -> std::io::Result<(&'a [u8], Self)> {
let (input, len) = read_u32_as_usize(input)?;
let mut data = IndexMap::with_capacity(len);
let mut input = input;
for _ in 0..len {
let (new_input, specifier) = SpecifierId::deserialize(input)?;
let (new_input, value) = TData::deserialize(new_input)?;
data.insert(specifier, value);
input = new_input;
}
Ok((input, Self { data }))
}
}
fn read_bytes_with_u32_len(input: &[u8]) -> std::io::Result<(&[u8], &[u8])> {
let (input, len) = read_u32_as_usize(input)?;
let (input, data) = read_bytes(input, len)?;
Ok((input, data))
}
fn read_u32_as_usize(input: &[u8]) -> std::io::Result<(&[u8], usize)> {
read_u32(input).map(|(input, len)| (input, len as usize))
}
fn read_u32(input: &[u8]) -> std::io::Result<(&[u8], u32)> {
let (input, len_bytes) = read_bytes(input, 4)?;
let len = u32::from_le_bytes(len_bytes.try_into().unwrap());
Ok((input, len))
}
fn read_u8(input: &[u8]) -> std::io::Result<(&[u8], u8)> {
check_has_len(input, 1)?;
Ok((&input[1..], input[0]))
}
fn read_bytes(input: &[u8], len: usize) -> std::io::Result<(&[u8], &[u8])> {
check_has_len(input, len)?;
let (len_bytes, input) = input.split_at(len);
Ok((input, len_bytes))
}
#[inline(always)]
fn check_has_len(input: &[u8], len: usize) -> std::io::Result<()> {
if input.len() < len {
Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Unexpected end of data",
))
} else {
Ok(())
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/lib/standalone/virtual_fs.rs | cli/lib/standalone/virtual_fs.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cmp::Ordering;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::collections::hash_map::Entry;
use std::fmt;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use std::time::SystemTime;
use deno_path_util::normalize_path;
use deno_path_util::strip_unc_prefix;
use deno_runtime::colors;
use deno_runtime::deno_core::anyhow::Context;
use deno_runtime::deno_core::anyhow::bail;
use deno_runtime::deno_core::error::AnyError;
use indexmap::IndexSet;
use serde::Deserialize;
use serde::Deserializer;
use serde::Serialize;
use serde::Serializer;
use serde::de;
use serde::de::SeqAccess;
use serde::de::Visitor;
use crate::util::text_encoding::is_valid_utf8;
#[derive(Debug, PartialEq, Eq)]
pub enum WindowsSystemRootablePath {
/// The root of the system above any drive letters.
WindowSystemRoot,
Path(PathBuf),
}
impl WindowsSystemRootablePath {
pub fn root_for_current_os() -> Self {
if cfg!(windows) {
WindowsSystemRootablePath::WindowSystemRoot
} else {
WindowsSystemRootablePath::Path(PathBuf::from("/"))
}
}
pub fn join(&self, name_component: &str) -> PathBuf {
// this method doesn't handle multiple components
debug_assert!(
!name_component.contains('\\'),
"Invalid component: {}",
name_component
);
debug_assert!(
!name_component.contains('/'),
"Invalid component: {}",
name_component
);
match self {
WindowsSystemRootablePath::WindowSystemRoot => {
// windows drive letter
PathBuf::from(&format!("{}\\", name_component))
}
WindowsSystemRootablePath::Path(path) => path.join(name_component),
}
}
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum FileSystemCaseSensitivity {
#[serde(rename = "s")]
Sensitive,
#[serde(rename = "i")]
Insensitive,
}
#[derive(Debug, Default, Serialize, Deserialize)]
pub struct VirtualDirectoryEntries(Vec<VfsEntry>);
impl VirtualDirectoryEntries {
pub fn new(mut entries: Vec<VfsEntry>) -> Self {
// needs to be sorted by name
entries.sort_by(|a, b| a.name().cmp(b.name()));
Self(entries)
}
pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, VfsEntry> {
self.0.iter_mut()
}
pub fn iter(&self) -> std::slice::Iter<'_, VfsEntry> {
self.0.iter()
}
pub fn take_inner(&mut self) -> Vec<VfsEntry> {
std::mem::take(&mut self.0)
}
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
pub fn len(&self) -> usize {
self.0.len()
}
pub fn get_by_name(
&self,
name: &str,
case_sensitivity: FileSystemCaseSensitivity,
) -> Option<&VfsEntry> {
self
.binary_search(name, case_sensitivity)
.ok()
.map(|index| &self.0[index])
}
pub fn get_mut_by_name(
&mut self,
name: &str,
case_sensitivity: FileSystemCaseSensitivity,
) -> Option<&mut VfsEntry> {
self
.binary_search(name, case_sensitivity)
.ok()
.map(|index| &mut self.0[index])
}
pub fn get_mut_by_index(&mut self, index: usize) -> Option<&mut VfsEntry> {
self.0.get_mut(index)
}
pub fn get_by_index(&self, index: usize) -> Option<&VfsEntry> {
self.0.get(index)
}
pub fn binary_search(
&self,
name: &str,
case_sensitivity: FileSystemCaseSensitivity,
) -> Result<usize, usize> {
match case_sensitivity {
FileSystemCaseSensitivity::Sensitive => {
self.0.binary_search_by(|e| e.name().cmp(name))
}
FileSystemCaseSensitivity::Insensitive => self.0.binary_search_by(|e| {
e.name()
.chars()
.zip(name.chars())
.map(|(a, b)| a.to_ascii_lowercase().cmp(&b.to_ascii_lowercase()))
.find(|&ord| ord != Ordering::Equal)
.unwrap_or_else(|| e.name().len().cmp(&name.len()))
}),
}
}
pub fn insert(
&mut self,
entry: VfsEntry,
case_sensitivity: FileSystemCaseSensitivity,
) -> usize {
match self.binary_search(entry.name(), case_sensitivity) {
Ok(index) => {
self.0[index] = entry;
index
}
Err(insert_index) => {
self.0.insert(insert_index, entry);
insert_index
}
}
}
pub fn insert_or_modify(
&mut self,
name: &str,
case_sensitivity: FileSystemCaseSensitivity,
on_insert: impl FnOnce() -> VfsEntry,
on_modify: impl FnOnce(&mut VfsEntry),
) -> usize {
match self.binary_search(name, case_sensitivity) {
Ok(index) => {
on_modify(&mut self.0[index]);
index
}
Err(insert_index) => {
self.0.insert(insert_index, on_insert());
insert_index
}
}
}
pub fn remove(&mut self, index: usize) -> VfsEntry {
self.0.remove(index)
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VirtualDirectory {
#[serde(rename = "n")]
pub name: String,
// should be sorted by name
#[serde(rename = "e")]
pub entries: VirtualDirectoryEntries,
}
#[derive(Debug, Clone, Copy)]
pub struct OffsetWithLength {
pub offset: u64,
pub len: u64,
}
// serialize as an array in order to save space
impl Serialize for OffsetWithLength {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let array = [self.offset, self.len];
array.serialize(serializer)
}
}
impl<'de> Deserialize<'de> for OffsetWithLength {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct OffsetWithLengthVisitor;
impl<'de> Visitor<'de> for OffsetWithLengthVisitor {
type Value = OffsetWithLength;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an array with two elements: [offset, len]")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let offset = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let len = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
Ok(OffsetWithLength { offset, len })
}
}
deserializer.deserialize_seq(OffsetWithLengthVisitor)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct VirtualFile {
#[serde(rename = "n")]
pub name: String,
#[serde(rename = "o")]
pub offset: OffsetWithLength,
#[serde(default, rename = "u", skip_serializing_if = "is_false")]
pub is_valid_utf8: bool,
#[serde(rename = "m", skip_serializing_if = "Option::is_none")]
pub transpiled_offset: Option<OffsetWithLength>,
#[serde(rename = "c", skip_serializing_if = "Option::is_none")]
pub cjs_export_analysis_offset: Option<OffsetWithLength>,
#[serde(rename = "s", skip_serializing_if = "Option::is_none")]
pub source_map_offset: Option<OffsetWithLength>,
#[serde(rename = "t", skip_serializing_if = "Option::is_none")]
pub mtime: Option<u128>, // mtime in milliseconds
}
fn is_false(value: &bool) -> bool {
!value
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VirtualSymlinkParts(Vec<String>);
impl VirtualSymlinkParts {
pub fn from_path(path: &Path) -> Self {
Self(
path
.components()
.filter(|c| !matches!(c, std::path::Component::RootDir))
.map(|c| c.as_os_str().to_string_lossy().into_owned())
.collect(),
)
}
pub fn take_parts(&mut self) -> Vec<String> {
std::mem::take(&mut self.0)
}
pub fn parts(&self) -> &[String] {
&self.0
}
pub fn set_parts(&mut self, parts: Vec<String>) {
self.0 = parts;
}
pub fn display(&self) -> String {
self.0.join("/")
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct VirtualSymlink {
#[serde(rename = "n")]
pub name: String,
#[serde(rename = "p")]
pub dest_parts: VirtualSymlinkParts,
}
impl VirtualSymlink {
pub fn resolve_dest_from_root(&self, root: &Path) -> PathBuf {
let mut dest = root.to_path_buf();
for part in &self.dest_parts.0 {
dest.push(part);
}
dest
}
}
#[derive(Debug, Copy, Clone)]
pub enum VfsEntryRef<'a> {
Dir(&'a VirtualDirectory),
File(&'a VirtualFile),
Symlink(&'a VirtualSymlink),
}
impl VfsEntryRef<'_> {
pub fn name(&self) -> &str {
match self {
Self::Dir(dir) => &dir.name,
Self::File(file) => &file.name,
Self::Symlink(symlink) => &symlink.name,
}
}
}
// todo(dsherret): we should store this more efficiently in the binary
#[derive(Debug, Serialize, Deserialize)]
pub enum VfsEntry {
Dir(VirtualDirectory),
File(VirtualFile),
Symlink(VirtualSymlink),
}
impl VfsEntry {
pub fn name(&self) -> &str {
match self {
Self::Dir(dir) => &dir.name,
Self::File(file) => &file.name,
Self::Symlink(symlink) => &symlink.name,
}
}
pub fn as_ref(&self) -> VfsEntryRef<'_> {
match self {
VfsEntry::Dir(dir) => VfsEntryRef::Dir(dir),
VfsEntry::File(file) => VfsEntryRef::File(file),
VfsEntry::Symlink(symlink) => VfsEntryRef::Symlink(symlink),
}
}
}
pub static DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME: &str =
".deno_compile_node_modules";
#[derive(Debug)]
pub struct BuiltVfs {
pub root_path: WindowsSystemRootablePath,
pub case_sensitivity: FileSystemCaseSensitivity,
pub entries: VirtualDirectoryEntries,
pub files: Vec<Vec<u8>>,
}
#[derive(Debug, Default)]
struct FilesData {
files: Vec<Vec<u8>>,
current_offset: u64,
file_offsets: HashMap<(String, usize), OffsetWithLength>,
}
impl FilesData {
pub fn file_bytes(&self, offset: OffsetWithLength) -> Option<&[u8]> {
if offset.len == 0 {
return Some(&[]);
}
// the debug assertions in this method should never happen
// because it would indicate providing an offset not in the vfs
let mut count: u64 = 0;
for file in &self.files {
// clippy wanted a match
match count.cmp(&offset.offset) {
Ordering::Equal => {
debug_assert_eq!(offset.len, file.len() as u64);
if offset.len == file.len() as u64 {
return Some(file);
} else {
return None;
}
}
Ordering::Less => {
count += file.len() as u64;
}
Ordering::Greater => {
debug_assert!(false);
return None;
}
}
}
debug_assert!(false);
None
}
pub fn add_data(&mut self, data: Vec<u8>) -> OffsetWithLength {
if data.is_empty() {
return OffsetWithLength { offset: 0, len: 0 };
}
let checksum = crate::util::checksum::r#gen(&[&data]);
match self.file_offsets.entry((checksum, data.len())) {
Entry::Occupied(occupied_entry) => {
let offset_and_len = *occupied_entry.get();
debug_assert_eq!(data.len() as u64, offset_and_len.len);
offset_and_len
}
Entry::Vacant(vacant_entry) => {
let offset_and_len = OffsetWithLength {
offset: self.current_offset,
len: data.len() as u64,
};
vacant_entry.insert(offset_and_len);
self.current_offset += offset_and_len.len;
self.files.push(data);
offset_and_len
}
}
}
}
pub struct AddFileDataOptions {
pub data: Vec<u8>,
pub mtime: Option<SystemTime>,
pub maybe_transpiled: Option<Vec<u8>>,
pub maybe_source_map: Option<Vec<u8>>,
pub maybe_cjs_export_analysis: Option<Vec<u8>>,
}
#[derive(Debug)]
pub struct VfsBuilder {
executable_root: VirtualDirectory,
files: FilesData,
/// The minimum root directory that should be included in the VFS.
min_root_dir: Option<WindowsSystemRootablePath>,
case_sensitivity: FileSystemCaseSensitivity,
exclude_paths: HashSet<PathBuf>,
}
impl Default for VfsBuilder {
fn default() -> Self {
Self::new()
}
}
impl VfsBuilder {
pub fn new() -> Self {
Self {
executable_root: VirtualDirectory {
name: "/".to_string(),
entries: Default::default(),
},
files: Default::default(),
min_root_dir: Default::default(),
// This is not exactly correct because file systems on these OSes
// may be case-sensitive or not based on the directory, but this
// is a good enough approximation and limitation. In the future,
// we may want to store this information per directory instead
// depending on the feedback we get.
case_sensitivity: if cfg!(windows) || cfg!(target_os = "macos") {
FileSystemCaseSensitivity::Insensitive
} else {
FileSystemCaseSensitivity::Sensitive
},
exclude_paths: Default::default(),
}
}
pub fn case_sensitivity(&self) -> FileSystemCaseSensitivity {
self.case_sensitivity
}
pub fn files_len(&self) -> usize {
self.files.files.len()
}
pub fn file_bytes(&self, offset: OffsetWithLength) -> Option<&[u8]> {
self.files.file_bytes(offset)
}
pub fn add_exclude_path(&mut self, path: PathBuf) {
self.exclude_paths.insert(path);
}
/// Add a directory that might be the minimum root directory
/// of the VFS.
///
/// For example, say the user has a deno.json and specifies an
/// import map in a parent directory. The import map won't be
/// included in the VFS, but its base will meaning we need to
/// tell the VFS builder to include the base of the import map
/// by calling this method.
pub fn add_possible_min_root_dir(&mut self, path: &Path) {
self.add_dir_raw(path);
match &self.min_root_dir {
Some(WindowsSystemRootablePath::WindowSystemRoot) => {
// already the root dir
}
Some(WindowsSystemRootablePath::Path(current_path)) => {
let mut common_components = Vec::new();
for (a, b) in current_path.components().zip(path.components()) {
if a != b {
break;
}
common_components.push(a);
}
if common_components.is_empty() {
self.min_root_dir =
Some(WindowsSystemRootablePath::root_for_current_os());
} else {
self.min_root_dir = Some(WindowsSystemRootablePath::Path(
common_components.iter().collect(),
));
}
}
None => {
self.min_root_dir =
Some(WindowsSystemRootablePath::Path(path.to_path_buf()));
}
}
}
pub fn add_dir_recursive(&mut self, path: &Path) -> Result<(), AnyError> {
let target_path = self.resolve_target_path(path)?;
self.add_dir_recursive_not_symlink(&target_path)
}
fn add_dir_recursive_not_symlink(
&mut self,
path: &Path,
) -> Result<(), AnyError> {
if self.exclude_paths.contains(path) {
return Ok(());
}
self.add_dir_raw(path);
// ok, building fs implementation
#[allow(clippy::disallowed_methods)]
let read_dir = std::fs::read_dir(path)
.with_context(|| format!("Reading {}", path.display()))?;
let mut dir_entries =
read_dir.into_iter().collect::<Result<Vec<_>, _>>()?;
dir_entries.sort_by_cached_key(|entry| entry.file_name()); // determinism
for entry in dir_entries {
let file_type = entry.file_type()?;
let path = entry.path();
self.add_path_with_file_type(&path, file_type)?;
}
Ok(())
}
pub fn add_path(&mut self, path: &Path) -> Result<(), AnyError> {
// ok, building fs implementation
#[allow(clippy::disallowed_methods)]
let file_type = path.metadata()?.file_type();
self.add_path_with_file_type(path, file_type)
}
fn add_path_with_file_type(
&mut self,
path: &Path,
file_type: std::fs::FileType,
) -> Result<(), AnyError> {
if self.exclude_paths.contains(path) {
return Ok(());
}
if file_type.is_dir() {
self.add_dir_recursive_not_symlink(path)
} else if file_type.is_file() {
self.add_file_at_path_not_symlink(path)
} else if file_type.is_symlink() {
match self.add_symlink(path) {
Ok(target) => match target {
SymlinkTarget::File(target) => {
self.add_file_at_path_not_symlink(&target)
}
SymlinkTarget::Dir(target) => {
self.add_dir_recursive_not_symlink(&target)
}
},
Err(err) => {
log::warn!(
"{} Failed resolving symlink. Ignoring.\n Path: {}\n Message: {:#}",
colors::yellow("Warning"),
path.display(),
err
);
Ok(())
}
}
} else {
// ignore
Ok(())
}
}
fn add_dir_raw(&mut self, path: &Path) -> &mut VirtualDirectory {
log::debug!("Ensuring directory '{}'", path.display());
debug_assert!(path.is_absolute());
let mut current_dir = &mut self.executable_root;
for component in path.components() {
if matches!(component, std::path::Component::RootDir) {
continue;
}
let name = component.as_os_str().to_string_lossy();
let index = current_dir.entries.insert_or_modify(
&name,
self.case_sensitivity,
|| {
VfsEntry::Dir(VirtualDirectory {
name: name.to_string(),
entries: Default::default(),
})
},
|_| {
// ignore
},
);
match current_dir.entries.get_mut_by_index(index) {
Some(VfsEntry::Dir(dir)) => {
current_dir = dir;
}
_ => unreachable!(),
};
}
current_dir
}
pub fn get_system_root_dir_mut(&mut self) -> &mut VirtualDirectory {
&mut self.executable_root
}
pub fn get_dir_mut(&mut self, path: &Path) -> Option<&mut VirtualDirectory> {
debug_assert!(path.is_absolute());
let mut current_dir = &mut self.executable_root;
for component in path.components() {
if matches!(component, std::path::Component::RootDir) {
continue;
}
let name = component.as_os_str().to_string_lossy();
let entry = current_dir
.entries
.get_mut_by_name(&name, self.case_sensitivity)?;
match entry {
VfsEntry::Dir(dir) => {
current_dir = dir;
}
_ => unreachable!("{}", path.display()),
};
}
Some(current_dir)
}
pub fn add_file_at_path(&mut self, path: &Path) -> Result<(), AnyError> {
if self.exclude_paths.contains(path) {
return Ok(());
}
let (file_bytes, mtime) = self.read_file_bytes_and_mtime(path)?;
self.add_file_with_data(
path,
AddFileDataOptions {
data: file_bytes,
mtime,
maybe_cjs_export_analysis: None,
maybe_transpiled: None,
maybe_source_map: None,
},
)
}
fn add_file_at_path_not_symlink(
&mut self,
path: &Path,
) -> Result<(), AnyError> {
if self.exclude_paths.contains(path) {
return Ok(());
}
let (file_bytes, mtime) = self.read_file_bytes_and_mtime(path)?;
self.add_file_with_data_raw(path, file_bytes, mtime)
}
fn read_file_bytes_and_mtime(
&self,
path: &Path,
) -> Result<(Vec<u8>, Option<SystemTime>), AnyError> {
// ok, building fs implementation
#[allow(clippy::disallowed_methods)]
{
let mut file = std::fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(|| format!("Opening {}", path.display()))?;
let mtime = file.metadata().ok().and_then(|m| m.modified().ok());
let mut file_bytes = Vec::new();
file
.read_to_end(&mut file_bytes)
.with_context(|| format!("Reading {}", path.display()))?;
Ok((file_bytes, mtime))
}
}
pub fn add_file_with_data(
&mut self,
path: &Path,
options: AddFileDataOptions,
) -> Result<(), AnyError> {
// ok, fs implementation
#[allow(clippy::disallowed_methods)]
let metadata = std::fs::symlink_metadata(path).with_context(|| {
format!("Resolving target path for '{}'", path.display())
})?;
if metadata.is_symlink() {
let target = self.add_symlink(path)?.into_path_buf();
self.add_file_with_data_raw_options(&target, options)
} else {
self.add_file_with_data_raw_options(path, options)
}
}
pub fn add_file_with_data_raw(
&mut self,
path: &Path,
data: Vec<u8>,
mtime: Option<SystemTime>,
) -> Result<(), AnyError> {
self.add_file_with_data_raw_options(
path,
AddFileDataOptions {
data,
mtime,
maybe_transpiled: None,
maybe_cjs_export_analysis: None,
maybe_source_map: None,
},
)
}
fn add_file_with_data_raw_options(
&mut self,
path: &Path,
options: AddFileDataOptions,
) -> Result<(), AnyError> {
log::debug!("Adding file '{}'", path.display());
let case_sensitivity = self.case_sensitivity;
let is_valid_utf8 = is_valid_utf8(&options.data);
let offset_and_len = self.files.add_data(options.data);
let transpiled_offset = options
.maybe_transpiled
.map(|data| self.files.add_data(data));
let source_map_offset = options
.maybe_source_map
.map(|data| self.files.add_data(data));
let cjs_export_analysis_offset = options
.maybe_cjs_export_analysis
.map(|data| self.files.add_data(data));
let dir = self.add_dir_raw(path.parent().unwrap());
let name = path.file_name().unwrap().to_string_lossy();
let mtime = options
.mtime
.and_then(|mtime| mtime.duration_since(std::time::UNIX_EPOCH).ok())
.map(|m| m.as_millis());
dir.entries.insert_or_modify(
&name,
case_sensitivity,
|| {
VfsEntry::File(VirtualFile {
name: name.to_string(),
is_valid_utf8,
offset: offset_and_len,
transpiled_offset,
cjs_export_analysis_offset,
source_map_offset,
mtime,
})
},
|entry| match entry {
VfsEntry::File(virtual_file) => {
virtual_file.offset = offset_and_len;
// doesn't overwrite to None
if transpiled_offset.is_some() {
virtual_file.transpiled_offset = transpiled_offset;
}
if source_map_offset.is_some() {
virtual_file.source_map_offset = source_map_offset;
}
if cjs_export_analysis_offset.is_some() {
virtual_file.cjs_export_analysis_offset =
cjs_export_analysis_offset;
}
virtual_file.mtime = mtime;
}
VfsEntry::Dir(_) | VfsEntry::Symlink(_) => unreachable!(),
},
);
Ok(())
}
fn resolve_target_path(&mut self, path: &Path) -> Result<PathBuf, AnyError> {
// ok, fs implementation
#[allow(clippy::disallowed_methods)]
let metadata = std::fs::symlink_metadata(path).with_context(|| {
format!("Resolving target path for '{}'", path.display())
})?;
if metadata.is_symlink() {
Ok(self.add_symlink(path)?.into_path_buf())
} else {
Ok(path.to_path_buf())
}
}
pub fn add_symlink(
&mut self,
path: &Path,
) -> Result<SymlinkTarget, AnyError> {
self.add_symlink_inner(path, &mut IndexSet::new())
}
fn add_symlink_inner(
&mut self,
path: &Path,
visited: &mut IndexSet<PathBuf>,
) -> Result<SymlinkTarget, AnyError> {
log::debug!("Adding symlink '{}'", path.display());
let target = strip_unc_prefix(
// ok, fs implementation
#[allow(clippy::disallowed_methods)]
std::fs::read_link(path)
.with_context(|| format!("Reading symlink '{}'", path.display()))?,
);
let case_sensitivity = self.case_sensitivity;
let target =
normalize_path(Cow::Owned(path.parent().unwrap().join(&target)));
let dir = self.add_dir_raw(path.parent().unwrap());
let name = path.file_name().unwrap().to_string_lossy();
dir.entries.insert_or_modify(
&name,
case_sensitivity,
|| {
VfsEntry::Symlink(VirtualSymlink {
name: name.to_string(),
dest_parts: VirtualSymlinkParts::from_path(&target),
})
},
|_| {
// ignore previously inserted
},
);
// ok, fs implementation
#[allow(clippy::disallowed_methods)]
let target_metadata =
std::fs::symlink_metadata(&target).with_context(|| {
format!("Reading symlink target '{}'", target.display())
})?;
if target_metadata.is_symlink() {
if !visited.insert(target.to_path_buf()) {
// todo: probably don't error in this scenario
bail!(
"Circular symlink detected: {} -> {}",
visited
.iter()
.map(|p| p.display().to_string())
.collect::<Vec<_>>()
.join(" -> "),
target.display()
);
}
self.add_symlink_inner(&target, visited)
} else if target_metadata.is_dir() {
Ok(SymlinkTarget::Dir(target.into_owned()))
} else {
Ok(SymlinkTarget::File(target.into_owned()))
}
}
/// Adds the CJS export analysis to the provided file.
///
/// Warning: This will panic if the file wasn't properly
/// setup before calling this.
pub fn add_cjs_export_analysis(&mut self, path: &Path, data: Vec<u8>) {
self.add_data_for_file_or_panic(path, data, |file, offset_with_length| {
file.cjs_export_analysis_offset = Some(offset_with_length);
})
}
fn add_data_for_file_or_panic(
&mut self,
path: &Path,
data: Vec<u8>,
update_file: impl FnOnce(&mut VirtualFile, OffsetWithLength),
) {
let offset_with_length = self.files.add_data(data);
let case_sensitivity = self.case_sensitivity;
let dir = self.get_dir_mut(path.parent().unwrap()).unwrap();
let name = path.file_name().unwrap().to_string_lossy();
let file = dir
.entries
.get_mut_by_name(&name, case_sensitivity)
.unwrap();
match file {
VfsEntry::File(virtual_file) => {
update_file(virtual_file, offset_with_length);
}
VfsEntry::Dir(_) | VfsEntry::Symlink(_) => {
unreachable!()
}
}
}
/// Iterates through all the files in the virtual file system.
pub fn iter_files(
&self,
) -> impl Iterator<Item = (PathBuf, &VirtualFile)> + '_ {
FileIterator {
pending_dirs: VecDeque::from([(
WindowsSystemRootablePath::root_for_current_os(),
&self.executable_root,
)]),
current_dir_index: 0,
}
}
pub fn build(self) -> BuiltVfs {
fn strip_prefix_from_symlinks(
dir: &mut VirtualDirectory,
parts: &[String],
) {
for entry in dir.entries.iter_mut() {
match entry {
VfsEntry::Dir(dir) => {
strip_prefix_from_symlinks(dir, parts);
}
VfsEntry::File(_) => {}
VfsEntry::Symlink(symlink) => {
let parts = symlink
.dest_parts
.take_parts()
.into_iter()
.skip(parts.len())
.collect();
symlink.dest_parts.set_parts(parts);
}
}
}
}
let mut current_dir = self.executable_root;
let mut current_path = WindowsSystemRootablePath::root_for_current_os();
loop {
if current_dir.entries.len() != 1 {
break;
}
if self.min_root_dir.as_ref() == Some(¤t_path) {
break;
}
match current_dir.entries.iter().next().unwrap() {
VfsEntry::Dir(dir) => {
if dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
// special directory we want to maintain
break;
}
match current_dir.entries.remove(0) {
VfsEntry::Dir(dir) => {
current_path =
WindowsSystemRootablePath::Path(current_path.join(&dir.name));
current_dir = dir;
}
_ => unreachable!(),
};
}
VfsEntry::File(_) | VfsEntry::Symlink(_) => break,
}
}
if let WindowsSystemRootablePath::Path(path) = ¤t_path {
strip_prefix_from_symlinks(
&mut current_dir,
VirtualSymlinkParts::from_path(path).parts(),
);
}
BuiltVfs {
root_path: current_path,
case_sensitivity: self.case_sensitivity,
entries: current_dir.entries,
files: self.files.files,
}
}
}
struct FileIterator<'a> {
pending_dirs: VecDeque<(WindowsSystemRootablePath, &'a VirtualDirectory)>,
current_dir_index: usize,
}
impl<'a> Iterator for FileIterator<'a> {
type Item = (PathBuf, &'a VirtualFile);
fn next(&mut self) -> Option<Self::Item> {
while !self.pending_dirs.is_empty() {
let (dir_path, current_dir) = self.pending_dirs.front()?;
if let Some(entry) =
current_dir.entries.get_by_index(self.current_dir_index)
{
self.current_dir_index += 1;
match entry {
VfsEntry::Dir(virtual_directory) => {
self.pending_dirs.push_back((
WindowsSystemRootablePath::Path(
dir_path.join(&virtual_directory.name),
),
virtual_directory,
));
}
VfsEntry::File(virtual_file) => {
return Some((dir_path.join(&virtual_file.name), virtual_file));
}
VfsEntry::Symlink(_) => {
// ignore
}
}
} else {
self.pending_dirs.pop_front();
self.current_dir_index = 0;
}
}
None
}
}
#[derive(Debug)]
pub enum SymlinkTarget {
File(PathBuf),
Dir(PathBuf),
}
impl SymlinkTarget {
pub fn into_path_buf(self) -> PathBuf {
match self {
Self::File(path) => path,
Self::Dir(path) => path,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/snapshot/lib.rs | cli/snapshot/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#[cfg(not(feature = "disable"))]
pub static CLI_SNAPSHOT: Option<&[u8]> = Some(include_bytes!(concat!(
env!("OUT_DIR"),
"/CLI_SNAPSHOT.bin"
)));
#[cfg(feature = "disable")]
pub static CLI_SNAPSHOT: Option<&[u8]> = None;
mod shared;
pub use shared::TS_VERSION;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/snapshot/build.rs | cli/snapshot/build.rs | // Copyright 2018-2025 the Deno authors. MIT license.
#[cfg(not(feature = "disable"))]
mod shared;
fn main() {
#[cfg(not(feature = "disable"))]
{
let o = std::path::PathBuf::from(std::env::var_os("OUT_DIR").unwrap());
let cli_snapshot_path = o.join("CLI_SNAPSHOT.bin");
create_cli_snapshot(cli_snapshot_path);
}
}
#[cfg(not(feature = "disable"))]
fn create_cli_snapshot(snapshot_path: std::path::PathBuf) {
use deno_runtime::ops::bootstrap::SnapshotOptions;
let snapshot_options = SnapshotOptions {
ts_version: shared::TS_VERSION.to_string(),
v8_version: deno_runtime::deno_core::v8::VERSION_STRING,
target: std::env::var("TARGET").unwrap(),
};
deno_runtime::snapshot::create_runtime_snapshot(
snapshot_path,
snapshot_options,
vec![],
);
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/snapshot/shared.rs | cli/snapshot/shared.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub static TS_VERSION: &str = "5.9.2";
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tsc/diagnostics.rs | cli/tsc/diagnostics.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::error::Error;
use std::fmt;
use deno_ast::ModuleSpecifier;
use deno_core::serde::Deserialize;
use deno_core::serde::Deserializer;
use deno_core::serde::Serialize;
use deno_core::serde::Serializer;
use deno_core::sourcemap::SourceMap;
use deno_graph::ModuleGraph;
use deno_graph::ResolutionError;
use deno_resolver::graph::enhanced_resolution_error_message;
use deno_terminal::colors;
use crate::graph_util::resolution_error_for_tsc_diagnostic;
const MAX_SOURCE_LINE_LENGTH: usize = 150;
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DiagnosticCategory {
Warning,
Error,
Suggestion,
Message,
}
impl fmt::Display for DiagnosticCategory {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"{}",
match self {
DiagnosticCategory::Warning => "WARN ",
DiagnosticCategory::Error => "ERROR ",
DiagnosticCategory::Suggestion => "",
DiagnosticCategory::Message => "",
}
)
}
}
impl<'de> Deserialize<'de> for DiagnosticCategory {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s: i64 = Deserialize::deserialize(deserializer)?;
Ok(DiagnosticCategory::from(s))
}
}
impl Serialize for DiagnosticCategory {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let value = match self {
DiagnosticCategory::Warning => 0_i32,
DiagnosticCategory::Error => 1_i32,
DiagnosticCategory::Suggestion => 2_i32,
DiagnosticCategory::Message => 3_i32,
};
Serialize::serialize(&value, serializer)
}
}
impl From<i64> for DiagnosticCategory {
fn from(value: i64) -> Self {
match value {
0 => DiagnosticCategory::Warning,
1 => DiagnosticCategory::Error,
2 => DiagnosticCategory::Suggestion,
3 => DiagnosticCategory::Message,
_ => panic!("Unknown value: {value}"),
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct DiagnosticMessageChain {
pub message_text: String,
pub category: DiagnosticCategory,
pub code: i64,
pub next: Option<Vec<DiagnosticMessageChain>>,
}
impl DiagnosticMessageChain {
pub fn format_message(&self, level: usize) -> String {
let mut s = String::new();
s.push_str(&" ".repeat(level * 2));
s.push_str(&self.message_text);
if let Some(next) = &self.next {
let arr = next.clone();
for dm in arr {
s.push('\n');
s.push_str(&dm.format_message(level + 1));
}
}
s
}
}
#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Position {
/// 0-indexed line number
pub line: u64,
/// 0-indexed character number
pub character: u64,
}
impl Position {
pub fn from_deno_graph(deno_graph_position: deno_graph::Position) -> Self {
Self {
line: deno_graph_position.line as u64,
character: deno_graph_position.character as u64,
}
}
}
#[derive(Debug, Deserialize, Serialize, Clone, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct Diagnostic {
pub category: DiagnosticCategory,
pub code: u64,
pub start: Option<Position>,
pub end: Option<Position>,
/// Position of this diagnostic in the original non-mapped source.
///
/// This will exist and be different from the `start` for fast
/// checked modules where the TypeScript source will differ
/// from the original source.
#[serde(skip_serializing)]
pub original_source_start: Option<Position>,
pub message_text: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub message_chain: Option<DiagnosticMessageChain>,
#[serde(skip_serializing_if = "Option::is_none")]
pub source: Option<String>,
pub source_line: Option<String>,
pub file_name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub related_information: Option<Vec<Diagnostic>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reports_deprecated: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub reports_unnecessary: Option<bool>,
#[serde(flatten)]
pub other: deno_core::serde_json::Map<String, deno_core::serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub missing_specifier: Option<String>,
}
impl Diagnostic {
pub fn from_missing_error(
specifier: &str,
maybe_range: Option<&deno_graph::Range>,
additional_message: Option<String>,
) -> Self {
Self {
category: DiagnosticCategory::Error,
code: 2307,
start: maybe_range.map(|r| Position::from_deno_graph(r.range.start)),
end: maybe_range.map(|r| Position::from_deno_graph(r.range.end)),
original_source_start: None, // will be applied later
message_text: Some(format!(
"Cannot find module '{}'.{}{}",
specifier,
if additional_message.is_none() {
""
} else {
" "
},
additional_message.unwrap_or_default()
)),
message_chain: None,
source: None,
source_line: None,
file_name: maybe_range.map(|r| r.specifier.to_string()),
related_information: None,
reports_deprecated: None,
reports_unnecessary: None,
other: Default::default(),
missing_specifier: Some(specifier.to_string()),
}
}
pub fn maybe_from_resolution_error(error: &ResolutionError) -> Option<Self> {
let error_ref = resolution_error_for_tsc_diagnostic(error)?;
if error_ref.is_module_not_found {
return Some(Self::from_missing_error(
error_ref.specifier,
Some(error_ref.range),
None,
));
}
Some(Self {
category: DiagnosticCategory::Error,
code: 2307,
start: Some(Position::from_deno_graph(error_ref.range.range.start)),
end: Some(Position::from_deno_graph(error_ref.range.range.end)),
original_source_start: None, // will be applied later
message_text: Some(enhanced_resolution_error_message(error)),
message_chain: None,
source: None,
source_line: None,
file_name: Some(error_ref.range.specifier.to_string()),
related_information: None,
reports_deprecated: None,
reports_unnecessary: None,
other: Default::default(),
missing_specifier: Some(error_ref.specifier.to_string()),
})
}
/// If this diagnostic should be included when it comes from a remote module.
pub fn include_when_remote(&self) -> bool {
/// TS6133: value is declared but its value is never read (noUnusedParameters and noUnusedLocals)
const TS6133: u64 = 6133;
/// TS4114: This member must have an 'override' modifier because it overrides a member in the base class 'X'.
const TS4114: u64 = 4114;
!matches!(self.code, TS6133 | TS4114)
}
fn fmt_category_and_code(&self, f: &mut fmt::Formatter) -> fmt::Result {
let category = match self.category {
DiagnosticCategory::Error => "ERROR",
DiagnosticCategory::Warning => "WARN",
_ => "",
};
let code = if self.code >= 900001 {
"".to_string()
} else {
colors::bold(format!("TS{} ", self.code)).to_string()
};
if !category.is_empty() {
write!(f, "{code}[{category}]: ")
} else {
Ok(())
}
}
fn fmt_frame(&self, f: &mut fmt::Formatter, level: usize) -> fmt::Result {
if let (Some(file_name), Some(start)) = (
self.file_name.as_ref(),
self.original_source_start.as_ref().or(self.start.as_ref()),
) {
write!(
f,
"\n{:indent$} at {}:{}:{}",
"",
colors::cyan(file_name),
colors::yellow(&(start.line + 1).to_string()),
colors::yellow(&(start.character + 1).to_string()),
indent = level
)
} else {
Ok(())
}
}
fn fmt_message(&self, f: &mut fmt::Formatter, level: usize) -> fmt::Result {
if let Some(message_chain) = &self.message_chain {
write!(f, "{}", message_chain.format_message(level))
} else {
write!(
f,
"{:indent$}{}",
"",
self.message_text.as_deref().unwrap_or_default(),
indent = level,
)
}
}
fn fmt_source_line(
&self,
f: &mut fmt::Formatter,
level: usize,
) -> fmt::Result {
if let (Some(source_line), Some(start), Some(end)) =
(&self.source_line, &self.start, &self.end)
&& !source_line.is_empty()
&& source_line.len() <= MAX_SOURCE_LINE_LENGTH
{
write!(f, "\n{:indent$}{}", "", source_line, indent = level)?;
let length = if start.line == end.line {
end.character - start.character
} else {
1
};
let mut s = String::new();
for i in 0..start.character {
s.push(if source_line.chars().nth(i as usize).unwrap() == '\t' {
'\t'
} else {
' '
});
}
// TypeScript always uses `~` when underlining, but v8 always uses `^`.
// We will use `^` to indicate a single point, or `~` when spanning
// multiple characters.
let ch = if length > 1 { '~' } else { '^' };
for _i in 0..length {
s.push(ch)
}
let underline = if self.is_error() {
colors::red(&s).to_string()
} else {
colors::cyan(&s).to_string()
};
write!(f, "\n{:indent$}{}", "", underline, indent = level)?;
}
Ok(())
}
fn fmt_related_information(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(related_information) = self.related_information.as_ref()
&& !related_information.is_empty()
{
write!(f, "\n\n")?;
for info in related_information {
info.fmt_stack(f, 4)?;
}
}
Ok(())
}
fn fmt_stack(&self, f: &mut fmt::Formatter, level: usize) -> fmt::Result {
self.fmt_category_and_code(f)?;
self.fmt_message(f, level)?;
self.fmt_source_line(f, level)?;
self.fmt_frame(f, level)
}
fn is_error(&self) -> bool {
self.category == DiagnosticCategory::Error
}
}
impl fmt::Display for Diagnostic {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_stack(f, 0)?;
self.fmt_related_information(f)
}
}
#[derive(Clone, Debug, Default, Eq, PartialEq, deno_error::JsError)]
#[class(generic)]
pub struct Diagnostics(Vec<Diagnostic>);
impl From<Vec<Diagnostic>> for Diagnostics {
fn from(diagnostics: Vec<Diagnostic>) -> Self {
Diagnostics(diagnostics)
}
}
impl Diagnostics {
#[cfg(test)]
pub fn new(diagnostics: Vec<Diagnostic>) -> Self {
Diagnostics(diagnostics)
}
pub fn emit_warnings(&mut self) {
self.0.retain(|d| {
if d.category == DiagnosticCategory::Warning {
log::warn!("{}\n", d);
false
} else {
true
}
});
}
pub fn push(&mut self, diagnostic: Diagnostic) {
self.0.push(diagnostic);
}
pub fn extend(&mut self, diagnostic: Diagnostics) {
self.0.extend(diagnostic.0);
}
/// Return a set of diagnostics where only the values where the predicate
/// returns `true` are included.
pub fn filter(self, predicate: impl FnMut(&Diagnostic) -> bool) -> Self {
let diagnostics = self.0.into_iter().filter(predicate).collect();
Self(diagnostics)
}
pub fn retain(&mut self, predicate: impl FnMut(&Diagnostic) -> bool) {
self.0.retain(predicate);
}
pub fn has_diagnostic(&self) -> bool {
!self.0.is_empty()
}
/// Modifies all the diagnostics to have their display positions
/// modified to point at the original source.
pub fn apply_fast_check_source_maps(&mut self, graph: &ModuleGraph) {
fn visit_diagnostic(d: &mut Diagnostic, graph: &ModuleGraph) {
if let Some(specifier) = d
.file_name
.as_ref()
.and_then(|n| ModuleSpecifier::parse(n).ok())
&& let Ok(Some(module)) = graph.try_get_prefer_types(&specifier)
&& let Some(fast_check_module) =
module.js().and_then(|m| m.fast_check_module())
{
// todo(dsherret): use a short lived cache to prevent parsing
// source maps so often
if let Ok(source_map) =
SourceMap::from_slice(fast_check_module.source_map.as_bytes())
&& let Some(start) = d.start.as_mut()
{
let maybe_token =
source_map.lookup_token(start.line as u32, start.character as u32);
if let Some(token) = maybe_token {
d.original_source_start = Some(Position {
line: token.get_src_line() as u64,
character: token.get_src_col() as u64,
});
}
}
}
if let Some(related) = &mut d.related_information {
for d in related.iter_mut() {
visit_diagnostic(d, graph);
}
}
}
for d in &mut self.0 {
visit_diagnostic(d, graph);
}
}
}
impl<'de> Deserialize<'de> for Diagnostics {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let items: Vec<Diagnostic> = Deserialize::deserialize(deserializer)?;
Ok(Diagnostics(items))
}
}
impl Serialize for Diagnostics {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(&self.0, serializer)
}
}
impl fmt::Display for Diagnostics {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
display_diagnostics(f, self)?;
if self.0.len() > 1 {
write!(f, "\n\nFound {} errors.", self.0.len())?;
}
Ok(())
}
}
fn display_diagnostics(
f: &mut fmt::Formatter,
diagnostics: &Diagnostics,
) -> fmt::Result {
for (i, item) in diagnostics.0.iter().enumerate() {
if i > 0 {
write!(f, "\n\n")?;
}
write!(f, "{item}")?;
}
Ok(())
}
impl Error for Diagnostics {}
#[cfg(test)]
mod tests {
use deno_core::serde_json;
use deno_core::serde_json::json;
use test_util::strip_ansi_codes;
use super::*;
#[test]
fn test_de_diagnostics() {
let value = json!([
{
"messageText": "Unknown compiler option 'invalid'.",
"category": 1,
"code": 5023
},
{
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 7
},
"fileName": "test.ts",
"messageText": "Cannot find name 'console'. Do you need to change your target library? Try changing the `lib` compiler option to include 'dom'.",
"sourceLine": "console.log(\"a\");",
"category": 1,
"code": 2584
},
{
"start": {
"line": 7,
"character": 0
},
"end": {
"line": 7,
"character": 7
},
"fileName": "test.ts",
"messageText": "Cannot find name 'foo_Bar'. Did you mean 'foo_bar'?",
"sourceLine": "foo_Bar();",
"relatedInformation": [
{
"start": {
"line": 3,
"character": 9
},
"end": {
"line": 3,
"character": 16
},
"fileName": "test.ts",
"messageText": "'foo_bar' is declared here.",
"sourceLine": "function foo_bar() {",
"category": 3,
"code": 2728
}
],
"category": 1,
"code": 2552
},
{
"start": {
"line": 18,
"character": 0
},
"end": {
"line": 18,
"character": 1
},
"fileName": "test.ts",
"messageChain": {
"messageText": "Type '{ a: { b: { c(): { d: number; }; }; }; }' is not assignable to type '{ a: { b: { c(): { d: string; }; }; }; }'.",
"category": 1,
"code": 2322,
"next": [
{
"messageText": "The types of 'a.b.c().d' are incompatible between these types.",
"category": 1,
"code": 2200,
"next": [
{
"messageText": "Type 'number' is not assignable to type 'string'.",
"category": 1,
"code": 2322
}
]
}
]
},
"sourceLine": "x = y;",
"code": 2322,
"category": 1
}
]);
let diagnostics: Diagnostics =
serde_json::from_value(value).expect("cannot deserialize");
assert_eq!(diagnostics.0.len(), 4);
assert!(diagnostics.0[0].source_line.is_none());
assert!(diagnostics.0[0].file_name.is_none());
assert!(diagnostics.0[0].start.is_none());
assert!(diagnostics.0[0].end.is_none());
assert!(diagnostics.0[0].message_text.is_some());
assert!(diagnostics.0[0].message_chain.is_none());
assert!(diagnostics.0[0].related_information.is_none());
assert!(diagnostics.0[1].source_line.is_some());
assert!(diagnostics.0[1].file_name.is_some());
assert!(diagnostics.0[1].start.is_some());
assert!(diagnostics.0[1].end.is_some());
assert!(diagnostics.0[1].message_text.is_some());
assert!(diagnostics.0[1].message_chain.is_none());
assert!(diagnostics.0[1].related_information.is_none());
assert!(diagnostics.0[2].source_line.is_some());
assert!(diagnostics.0[2].file_name.is_some());
assert!(diagnostics.0[2].start.is_some());
assert!(diagnostics.0[2].end.is_some());
assert!(diagnostics.0[2].message_text.is_some());
assert!(diagnostics.0[2].message_chain.is_none());
assert!(diagnostics.0[2].related_information.is_some());
}
#[test]
fn test_diagnostics_no_source() {
let value = json!([
{
"messageText": "Unknown compiler option 'invalid'.",
"category":1,
"code":5023
}
]);
let diagnostics: Diagnostics = serde_json::from_value(value).unwrap();
let actual = diagnostics.to_string();
assert_eq!(
strip_ansi_codes(&actual),
"TS5023 [ERROR]: Unknown compiler option \'invalid\'."
);
}
#[test]
fn test_diagnostics_basic() {
let value = json!([
{
"start": {
"line": 0,
"character": 0
},
"end": {
"line": 0,
"character": 7
},
"fileName": "test.ts",
"messageText": "Cannot find name 'console'. Do you need to change your target library? Try changing the `lib` compiler option to include 'dom'.",
"sourceLine": "console.log(\"a\");",
"category": 1,
"code": 2584
}
]);
let diagnostics: Diagnostics = serde_json::from_value(value).unwrap();
let actual = diagnostics.to_string();
assert_eq!(
strip_ansi_codes(&actual),
"TS2584 [ERROR]: Cannot find name \'console\'. Do you need to change your target library? Try changing the `lib` compiler option to include \'dom\'.\nconsole.log(\"a\");\n~~~~~~~\n at test.ts:1:1"
);
}
#[test]
fn test_diagnostics_related_info() {
let value = json!([
{
"start": {
"line": 7,
"character": 0
},
"end": {
"line": 7,
"character": 7
},
"fileName": "test.ts",
"messageText": "Cannot find name 'foo_Bar'. Did you mean 'foo_bar'?",
"sourceLine": "foo_Bar();",
"relatedInformation": [
{
"start": {
"line": 3,
"character": 9
},
"end": {
"line": 3,
"character": 16
},
"fileName": "test.ts",
"messageText": "'foo_bar' is declared here.",
"sourceLine": "function foo_bar() {",
"category": 3,
"code": 2728
}
],
"category": 1,
"code": 2552
}
]);
let diagnostics: Diagnostics = serde_json::from_value(value).unwrap();
let actual = diagnostics.to_string();
assert_eq!(
strip_ansi_codes(&actual),
"TS2552 [ERROR]: Cannot find name \'foo_Bar\'. Did you mean \'foo_bar\'?\nfoo_Bar();\n~~~~~~~\n at test.ts:8:1\n\n \'foo_bar\' is declared here.\n function foo_bar() {\n ~~~~~~~\n at test.ts:4:10"
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tsc/go.rs | cli/tsc/go.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod setup;
mod tsgo_version;
use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::HashMap;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_config::deno_json::CompilerOptions;
use deno_core::serde_json;
use deno_core::serde_json::json;
use deno_graph::ModuleGraph;
use deno_resolver::deno_json::JsxImportSourceConfigResolver;
use deno_typescript_go_client_rust::CallbackHandler;
use deno_typescript_go_client_rust::SyncRpcChannel;
use deno_typescript_go_client_rust::types::GetImpliedNodeFormatForFilePayload;
use deno_typescript_go_client_rust::types::Project;
use deno_typescript_go_client_rust::types::ResolveModuleNamePayload;
use deno_typescript_go_client_rust::types::ResolveTypeReferenceDirectivePayload;
pub use setup::DownloadError;
pub use setup::ensure_tsgo;
use super::Request;
use super::Response;
use crate::args::TypeCheckMode;
macro_rules! jsons {
($($arg:tt)*) => {
serde_json::to_string(&json!($($arg)*))
};
}
fn deser<T: serde::de::DeserializeOwned>(
payload: impl AsRef<str>,
) -> Result<T, serde_json::Error> {
serde_json::from_str::<T>(payload.as_ref())
}
// the way tsgo currently works, it really wants an actual tsconfig.json file.
// it also doesn't let you just pass in root file names. instead of making more changes in tsgo,
// work around both by making a fake tsconfig.json file with the `"files"` field set to the root file names.
// it's "synthetic" because it's not actually on disk, we pass a fake path to load it from memory.
fn synthetic_config(
config: &CompilerOptions,
root_names: &[String],
type_check_mode: TypeCheckMode,
) -> Result<String, serde_json::Error> {
let mut config = serde_json::to_value(config)?;
let obj = config.as_object_mut().unwrap();
obj.insert("allowImportingTsExtensions".to_string(), json!(true));
if type_check_mode != TypeCheckMode::All {
obj.insert("skipDefaultLibCheck".to_string(), json!(true));
}
if let Some(jsx) = obj.get("jsx")
&& jsx.as_str() == Some("precompile")
{
obj.insert("jsx".to_string(), json!("react-jsx"));
}
obj.insert("allowArbitraryExtensions".to_string(), json!(true));
let config = serde_json::to_string(&json!({
"compilerOptions": config,
"files": root_names,
}))?;
log::debug!("synthetic config: {}", config);
Ok(config)
}
pub fn exec_request(
request: Request,
root_names: Vec<String>,
root_map: HashMap<String, ModuleSpecifier>,
remapped_specifiers: HashMap<String, ModuleSpecifier>,
tsgo_path: &Path,
) -> Result<Response, super::ExecError> {
exec_request_inner(
request,
root_names,
root_map,
remapped_specifiers,
tsgo_path,
)
.map_err(super::ExecError::Go)
}
fn exec_request_inner(
request: Request,
root_names: Vec<String>,
root_map: HashMap<String, ModuleSpecifier>,
remapped_specifiers: HashMap<String, ModuleSpecifier>,
tsgo_path: &Path,
) -> Result<Response, ExecError> {
let handler = Handler::new(
"/virtual/tsconfig.json".to_string(),
synthetic_config(request.config.as_ref(), &root_names, request.check_mode)?,
remapped_specifiers,
root_map,
request.initial_cwd,
request.graph.clone(),
request.jsx_import_source_config_resolver.clone(),
request.maybe_npm,
);
let callbacks = handler.supported_callbacks();
let bin_path = tsgo_path;
let mut channel = SyncRpcChannel::new(bin_path, vec!["--api"], handler)?;
channel.request_sync(
"configure",
jsons!({
"callbacks": callbacks.iter().collect::<Vec<_>>(),
"logFile": "",
"forkContextInfo": {
"typesNodeIgnorableNames": super::TYPES_NODE_IGNORABLE_NAMES,
"nodeOnlyGlobalNames": super::NODE_ONLY_GLOBALS,
},
})?,
)?;
let project = channel.request_sync(
"loadProject",
jsons!({
"configFileName": "/virtual/tsconfig.json",
})?,
)?;
let project = deser::<Project>(project)?;
let file_names = if request.check_mode != TypeCheckMode::All {
root_names
} else {
Vec::new()
};
let diagnostics = channel.request_sync(
"getDiagnostics",
jsons!({
"project": &project.id,
"fileNames": file_names,
})?,
)?;
let diagnostics = deser::<
Vec<deno_typescript_go_client_rust::types::Diagnostic>,
>(diagnostics)?;
Ok(Response {
diagnostics: convert_diagnostics(diagnostics),
maybe_tsbuildinfo: None,
ambient_modules: vec![],
stats: super::Stats::default(),
})
}
fn diagnostic_category(category: &str) -> super::DiagnosticCategory {
match category {
"error" => super::DiagnosticCategory::Error,
"warning" => super::DiagnosticCategory::Warning,
"message" => super::DiagnosticCategory::Message,
"suggestion" => super::DiagnosticCategory::Suggestion,
_ => unreachable!("unexpected diagnostic category: {category}"),
}
}
fn maybe_rewrite_message(message: String, code: u64) -> String {
if code == 2304 && message.starts_with("Cannot find name 'Deno'") {
r#"Cannot find name 'Deno'. Do you need to change your target library? Try changing the 'lib' compiler option to include 'deno.ns' or add a triple-slash directive to the top of your entrypoint (main file): /// <reference lib="deno.ns" />"#.to_string()
} else if code == 2581 {
r#"Cannot find name '$'. Did you mean to import jQuery? Try adding `import $ from "npm:jquery";`."#.to_string()
} else if code == 2580 {
let regex = lazy_regex::regex!(r#"Cannot find name '([^']+)'"#);
let captures = regex.captures(&message).unwrap();
let name = captures.get(1).unwrap().as_str();
format!("Cannot find name '{}'.", name)
} else if code == 1203 {
"Export assignment cannot be used when targeting ECMAScript modules. Consider using 'export default' or another module format instead. This will start erroring in a future version of Deno 2 in order to align with TypeScript.".to_string()
} else if code == 2339 && message.contains("on type 'typeof Deno'") {
let regex = lazy_regex::regex!(
r#"Property '([^']+)' does not exist on type 'typeof Deno'"#
);
let captures = regex.captures(&message).unwrap();
let name = captures.get(1).unwrap().as_str();
format!(
"Property '{name}' does not exist on type 'typeof Deno'. 'Deno.{name}' is an unstable API. If not, try changing the 'lib' compiler option to include 'deno.unstable' or add a triple-slash directive to the top of your entrypoint (main file): /// <reference lib=\"deno.unstable\" />",
)
} else {
message
}
}
fn maybe_remap_category(
code: u64,
category: super::DiagnosticCategory,
) -> super::DiagnosticCategory {
if code == 1203 {
super::DiagnosticCategory::Warning
} else {
category
}
}
fn convert_diagnostic(
diagnostic: deno_typescript_go_client_rust::types::Diagnostic,
_diagnostics: &[deno_typescript_go_client_rust::types::Diagnostic],
) -> super::Diagnostic {
let (start, end) = if diagnostic.start.line == 0
&& diagnostic.start.character == 0
&& diagnostic.end.line == 0
&& diagnostic.end.character == 0
{
(None, None)
} else {
(Some(diagnostic.start), Some(diagnostic.end))
};
super::Diagnostic {
category: maybe_remap_category(
diagnostic.code as u64,
diagnostic_category(diagnostic.category.as_str()),
),
code: diagnostic.code as u64,
start: start.map(|s| super::Position {
line: s.line,
character: s.character,
}),
end: end.map(|e| super::Position {
line: e.line,
character: e.character,
}),
original_source_start: None,
message_chain: None,
message_text: Some(maybe_rewrite_message(
diagnostic.message,
diagnostic.code as u64,
)),
file_name: Some(diagnostic.file_name),
missing_specifier: None,
other: Default::default(),
related_information: if diagnostic.related_information.is_empty() {
None
} else {
Some(
diagnostic
.related_information
.into_iter()
.map(|d| convert_diagnostic(d, _diagnostics))
.collect::<Vec<_>>(),
)
},
reports_deprecated: Some(diagnostic.reports_deprecated),
reports_unnecessary: Some(diagnostic.reports_unnecessary),
source: None,
source_line: Some(diagnostic.source_line),
}
}
fn should_ignore_diagnostic(diagnostic: &super::Diagnostic) -> bool {
super::IGNORED_DIAGNOSTIC_CODES.contains(&diagnostic.code)
}
fn convert_diagnostics(
diagnostics: Vec<deno_typescript_go_client_rust::types::Diagnostic>,
) -> super::Diagnostics {
super::diagnostics::Diagnostics::from(
diagnostics
.iter()
.map(|diagnostic| convert_diagnostic(diagnostic.clone(), &diagnostics))
.filter(|diagnostic| !should_ignore_diagnostic(diagnostic))
.collect::<Vec<_>>(),
)
}
struct Handler {
state: RefCell<HandlerState>,
}
impl Handler {
#[allow(clippy::too_many_arguments)]
fn new(
config_path: String,
synthetic_config: String,
remapped_specifiers: HashMap<String, ModuleSpecifier>,
root_map: HashMap<String, ModuleSpecifier>,
current_dir: PathBuf,
graph: Arc<ModuleGraph>,
jsx_import_source_config_resolver: Arc<JsxImportSourceConfigResolver>,
maybe_npm: Option<super::RequestNpmState>,
) -> Self {
Self {
state: RefCell::new(HandlerState {
config_path,
synthetic_config,
remapped_specifiers,
root_map,
current_dir,
graph,
jsx_import_source_config_resolver,
maybe_npm,
module_kind_map: HashMap::new(),
load_result_pending: HashMap::new(),
}),
}
}
}
fn get_package_json_scope_if_applicable(
state: &mut HandlerState,
payload: String,
) -> Result<String, deno_typescript_go_client_rust::Error> {
log::debug!("get_package_json_scope_if_applicable: {}", payload);
if let Some(maybe_npm) = state.maybe_npm.as_ref() {
let file_path = deser::<String>(&payload)?;
let file_path = if let Ok(specifier) = ModuleSpecifier::parse(&file_path) {
deno_path_util::url_to_file_path(&specifier).ok()
} else {
Some(PathBuf::from(file_path))
};
let Some(file_path) = file_path else {
return Ok(jsons!(None::<String>)?);
};
if let Some(package_json) = maybe_npm
.package_json_resolver
.get_closest_package_jsons(&file_path)
.next()
.and_then(|r| r.ok())
{
let package_directory = package_json.path.parent();
let contents = serde_json::to_string(&package_json).ok();
if let Some(contents) = contents {
return Ok(jsons!({
"packageDirectory": package_directory,
"directoryExists": true,
"contents": contents,
})?);
}
}
}
Ok(jsons!(None::<String>)?)
}
fn append_raw_import_fragment(specifier: &mut String, raw_kind: &str) {
let fragment_index = specifier.find('#');
match fragment_index {
Some(index) => {
if specifier[index..].contains(&format!("denoRawImport={}.ts", raw_kind))
{
return;
}
specifier.push_str(&format!("&denoRawImport={}.ts", raw_kind));
}
None => {
specifier.push_str(&format!("#denoRawImport={}.ts", raw_kind));
}
}
}
struct HandlerState {
config_path: String,
synthetic_config: String,
remapped_specifiers: HashMap<String, ModuleSpecifier>,
root_map: HashMap<String, ModuleSpecifier>,
current_dir: PathBuf,
graph: Arc<ModuleGraph>,
jsx_import_source_config_resolver: Arc<JsxImportSourceConfigResolver>,
maybe_npm: Option<super::RequestNpmState>,
module_kind_map:
HashMap<String, deno_typescript_go_client_rust::types::ResolutionMode>,
load_result_pending: HashMap<String, LoadResult>,
}
impl deno_typescript_go_client_rust::CallbackHandler for Handler {
fn supported_callbacks(&self) -> &'static [&'static str] {
&[
"readFile",
"resolveJsxImportSource",
"resolveModuleName",
"getPackageJsonScopeIfApplicable",
"getPackageScopeForPath",
"resolveTypeReferenceDirective",
"getImpliedNodeFormatForFile",
"isNodeSourceFile",
]
}
fn handle_callback(
&self,
name: &str,
payload: String,
) -> Result<String, deno_typescript_go_client_rust::Error> {
let mut state = self.state.borrow_mut();
match name {
"readFile" => {
log::debug!("readFile: {}", payload);
let payload = deser::<String>(payload)?;
if payload == state.config_path {
Ok(jsons!(&state.synthetic_config)?)
} else {
if let Some(load_result) = state.load_result_pending.remove(&payload)
{
return Ok(jsons!(load_result.contents)?);
}
let result = load_inner(&mut state, &payload).map_err(adhoc)?;
if let Some(result) = result {
let contents = result.contents;
Ok(jsons!(&contents)?)
} else {
let path = Path::new(&payload);
if let Ok(contents) = std::fs::read_to_string(path) {
Ok(jsons!(&contents)?)
} else {
Ok(jsons!(None::<String>)?)
}
}
}
}
"loadSourceFile" => {
let payload = deser::<String>(payload)?;
log::debug!("loadSourceFile: {}", payload);
if let Some(load_result) = state.load_result_pending.remove(&payload) {
Ok(jsons!(&load_result)?)
} else {
let result = load_inner(&mut state, &payload).map_err(adhoc)?;
Ok(jsons!(&result)?)
}
}
"resolveModuleName" => {
let payload = deser::<ResolveModuleNamePayload>(payload)?;
let import_attribute_type = payload.import_attribute_type.clone();
let (mut out_name, mut extension) = resolve_name(&mut state, payload)?;
if let Some(import_attribute_type) = &import_attribute_type
&& matches!(import_attribute_type.as_str(), "text" | "bytes")
{
append_raw_import_fragment(
&mut out_name,
import_attribute_type.as_str(),
);
extension = Some("ts");
}
Ok(jsons!({
"resolvedFileName": out_name,
"extension": extension,
})?)
}
"getPackageJsonScopeIfApplicable" => {
log::debug!("getPackageJsonScopeIfApplicable: {}", payload);
get_package_json_scope_if_applicable(&mut state, payload).inspect(
|res| log::debug!("getPackageJsonScopeIfApplicable -> {}", res),
)
}
"getPackageScopeForPath" => {
log::debug!("getPackageScopeForPath: {}", payload);
get_package_json_scope_if_applicable(&mut state, payload)
.inspect(|res| log::debug!("getPackageScopeForPath -> {}", res))
}
"resolveTypeReferenceDirective" => {
log::debug!("resolveTypeReferenceDirective: {}", payload);
let payload = deser::<ResolveTypeReferenceDirectivePayload>(payload)?;
let payload = ResolveModuleNamePayload {
module_name: payload.type_reference_directive_name,
containing_file: payload.containing_file,
resolution_mode: payload.resolution_mode,
import_attribute_type: None,
};
let (out_name, extension) = resolve_name(&mut state, payload)?;
log::debug!(
"resolveTypeReferenceDirective: {:?}",
(&out_name, &extension)
);
Ok(jsons!({
"resolvedFileName": out_name,
"extension": extension,
"primary": true,
})?)
}
"getImpliedNodeFormatForFile" => {
let payload = deser::<GetImpliedNodeFormatForFilePayload>(payload)?;
log::debug!("getImpliedNodeFormatForFile: {:?}", payload);
// check if we already determined the module kind from a previous load
if let Some(module_kind) = state.module_kind_map.get(&payload.file_name)
{
log::debug!("getImpliedNodeFormatForFile -> {:?}", module_kind);
Ok(jsons!(&module_kind)?)
} else {
// if not, load the file and determine the module kind
let load_result =
load_inner(&mut state, &payload.file_name).map_err(adhoc)?;
if let Some(load_result) = load_result {
// store the load result in the pending map to avoid loading the file again
state
.load_result_pending
.insert(payload.file_name.clone(), load_result);
let module_kind = state
.module_kind_map
.get(&payload.file_name)
.copied()
.unwrap_or(
deno_typescript_go_client_rust::types::ResolutionMode::ESM,
);
Ok(jsons!(&module_kind)?)
} else {
Ok(jsons!(
&deno_typescript_go_client_rust::types::ResolutionMode::ESM
)?)
}
}
}
"isNodeSourceFile" => {
let path = deser::<String>(payload)?;
let state = &*state;
let result = path.starts_with("asset:///node/")
|| ModuleSpecifier::parse(&path)
.ok()
.or_else(|| {
deno_path_util::resolve_url_or_path(&path, &state.current_dir)
.ok()
})
.and_then(|specifier| {
state
.maybe_npm
.as_ref()
.map(|n| n.node_resolver.in_npm_package(&specifier))
})
.unwrap_or(false);
Ok(jsons!(result)?)
}
"resolveJsxImportSource" => {
let referrer = deser::<String>(payload)?;
let state = &*state;
let referrer = if let Some(remapped_specifier) =
state.maybe_remapped_specifier(&referrer)
{
Some(Cow::Borrowed(remapped_specifier))
} else {
deno_path_util::resolve_url_or_path(&referrer, &state.current_dir)
.ok()
.map(Cow::Owned)
};
let result = referrer.and_then(|referrer| {
state
.jsx_import_source_config_resolver
.for_specifier(&referrer)
.and_then(|config| config.specifier())
});
Ok(jsons!(result.unwrap_or_default())?)
}
_ => unreachable!("unknown callback: {name}"),
}
}
}
fn adhoc(err: impl std::error::Error) -> deno_typescript_go_client_rust::Error {
deno_typescript_go_client_rust::Error::AdHoc(err.to_string())
}
fn resolve_name(
handler: &mut HandlerState,
payload: ResolveModuleNamePayload,
) -> Result<(String, Option<&'static str>), deno_typescript_go_client_rust::Error>
{
log::debug!("resolve_name({payload:?})");
let graph = &handler.graph;
let maybe_npm = handler.maybe_npm.as_ref();
let referrer = if let Some(remapped_specifier) =
handler.maybe_remapped_specifier(&payload.containing_file)
{
remapped_specifier.clone()
} else {
deno_path_util::resolve_url_or_path(
&payload.containing_file,
&handler.current_dir,
)
.map_err(adhoc)?
};
let referrer_module = graph.get(&referrer);
let specifier = payload.module_name;
let result = super::resolve_specifier_for_tsc(
specifier,
&referrer,
graph,
match payload.resolution_mode {
deno_typescript_go_client_rust::types::ResolutionMode::None => {
super::ResolutionMode::Import
}
deno_typescript_go_client_rust::types::ResolutionMode::CommonJS => {
super::ResolutionMode::Require
}
deno_typescript_go_client_rust::types::ResolutionMode::ESM => {
super::ResolutionMode::Import
}
},
maybe_npm,
referrer_module,
&mut handler.remapped_specifiers,
)
.map_err(adhoc)?;
Ok(result)
}
impl HandlerState {
pub fn maybe_remapped_specifier(
&self,
specifier: &str,
) -> Option<&ModuleSpecifier> {
self
.remapped_specifiers
.get(specifier)
.or_else(|| self.root_map.get(specifier))
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum ExecError {
#[class(generic)]
#[error(transparent)]
SerdeJson(#[from] serde_json::Error),
#[class(generic)]
#[error(transparent)]
TsgoClient(#[from] deno_typescript_go_client_rust::Error),
#[class(generic)]
#[error(transparent)]
PackageJsonLoad(#[from] deno_package_json::PackageJsonLoadError),
#[class(generic)]
#[error(transparent)]
PackageJsonLoadError(#[from] node_resolver::errors::PackageJsonLoadError),
#[class(generic)]
#[error(transparent)]
DownloadError(#[from] DownloadError),
#[class(generic)]
#[error(transparent)]
LoadError(#[from] super::LoadError),
}
#[derive(Debug, Clone, serde::Serialize)]
#[serde(rename_all = "camelCase")]
struct LoadResult {
contents: String,
script_kind: i32,
}
impl super::LoadContent for String {
fn from_static(source: &'static str) -> Self {
source.to_string()
}
fn from_string(source: String) -> Self {
source
}
fn from_arc_str(source: Arc<str>) -> Self {
source.to_string()
}
}
impl super::Mapper for HandlerState {
fn maybe_remapped_specifier(
&self,
specifier: &str,
) -> Option<&ModuleSpecifier> {
self.maybe_remapped_specifier(specifier)
}
}
fn load_inner(
state: &mut HandlerState,
load_specifier: &str,
) -> Result<Option<LoadResult>, ExecError> {
log::debug!("load_inner: {}", load_specifier);
let result = super::load_for_tsc(
load_specifier,
state.maybe_npm.as_ref(),
&state.current_dir,
&state.graph,
None,
0,
state,
)?;
let Some(result) = result else {
log::debug!("load_inner {load_specifier} -> None");
return Ok(None);
};
let is_cjs = result.is_cjs;
let media_type = result.media_type;
match media_type {
MediaType::JavaScript
| MediaType::Jsx
| MediaType::Mjs
| MediaType::Cjs
| MediaType::TypeScript
| MediaType::Mts
| MediaType::Cts
| MediaType::Dts
| MediaType::Dmts
| MediaType::Dcts
| MediaType::Json
| MediaType::Tsx => {}
// anything you return from here will be treated as a js/ts
// source file and attempt to be parsed by typescript. so
// if it's not a js/ts source file, return None.
MediaType::SourceMap
| MediaType::Css
| MediaType::Jsonc
| MediaType::Json5
| MediaType::Html
| MediaType::Sql
| MediaType::Wasm
| MediaType::Unknown => return Ok(None),
}
let module_kind = get_resolution_mode(is_cjs, media_type);
let script_kind = super::as_ts_script_kind(media_type);
log::debug!("load_inner {load_specifier} -> {:?}", module_kind);
log::trace!("loaded contents ({load_specifier}) -> {:?}", result.data);
state
.module_kind_map
.insert(load_specifier.to_string(), module_kind);
Ok(Some(LoadResult {
contents: result.data,
script_kind,
}))
}
fn get_resolution_mode(
is_cjs: bool,
media_type: MediaType,
) -> deno_typescript_go_client_rust::types::ResolutionMode {
if is_cjs {
deno_typescript_go_client_rust::types::ResolutionMode::CommonJS
} else {
match media_type {
MediaType::Cjs | MediaType::Dcts | MediaType::Cts => {
deno_typescript_go_client_rust::types::ResolutionMode::CommonJS
}
MediaType::Css
| MediaType::Json
| MediaType::Html
| MediaType::Sql
| MediaType::Wasm
| MediaType::SourceMap
| MediaType::Unknown => {
deno_typescript_go_client_rust::types::ResolutionMode::None
}
_ => deno_typescript_go_client_rust::types::ResolutionMode::ESM,
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tsc/js.rs | cli/tsc/js.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashMap;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use deno_core::FastString;
use deno_core::JsRuntime;
use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_core::RuntimeOptions;
use deno_core::anyhow::Context;
use deno_core::located_script_name;
use deno_core::op2;
use deno_core::serde::Deserialize;
use deno_core::serde::Serialize;
use deno_core::serde_json::json;
use deno_core::url::Url;
use deno_graph::GraphKind;
use deno_graph::ModuleGraph;
use deno_lib::util::hash::FastInsecureHasher;
use deno_lib::worker::create_isolate_create_params;
use deno_path_util::resolve_url_or_path;
use deno_resolver::deno_json::JsxImportSourceConfigResolver;
use node_resolver::ResolutionMode;
use super::ResolveArgs;
use super::ResolveError;
use crate::args::TypeCheckMode;
use crate::tsc::Diagnostics;
use crate::tsc::ExecError;
use crate::tsc::LoadError;
use crate::tsc::Request;
use crate::tsc::RequestNpmState;
use crate::tsc::Response;
use crate::tsc::Stats;
use crate::tsc::get_hash;
#[op2]
#[string]
fn op_remap_specifier(
state: &mut OpState,
#[string] specifier: &str,
) -> Option<String> {
let state = state.borrow::<State>();
state
.maybe_remapped_specifier(specifier)
.map(|url| url.to_string())
}
#[op2]
#[serde]
fn op_libs() -> Vec<String> {
crate::tsc::lib_names()
}
#[op2]
#[serde]
fn op_resolve(
state: &mut OpState,
#[string] base: &str,
#[serde] specifiers: Vec<(bool, String)>,
) -> Result<Vec<(String, Option<&'static str>)>, ResolveError> {
op_resolve_inner(state, ResolveArgs { base, specifiers })
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct TscConstants {
types_node_ignorable_names: Vec<&'static str>,
node_only_globals: Vec<&'static str>,
ignored_diagnostic_codes: Vec<u64>,
}
impl TscConstants {
pub fn new() -> Self {
Self {
types_node_ignorable_names: super::TYPES_NODE_IGNORABLE_NAMES.to_vec(),
node_only_globals: super::NODE_ONLY_GLOBALS.to_vec(),
ignored_diagnostic_codes: super::IGNORED_DIAGNOSTIC_CODES
.iter()
.copied()
.collect(),
}
}
}
#[op2]
#[serde]
fn op_tsc_constants() -> TscConstants {
TscConstants::new()
}
#[inline]
fn op_resolve_inner(
state: &mut OpState,
args: ResolveArgs<'_>,
) -> Result<Vec<(String, Option<&'static str>)>, ResolveError> {
let state = state.borrow_mut::<State>();
let mut resolved: Vec<(String, Option<&'static str>)> =
Vec::with_capacity(args.specifiers.len());
let referrer = if let Some(remapped_specifier) =
state.maybe_remapped_specifier(args.base)
{
remapped_specifier.clone()
} else {
resolve_url_or_path(args.base, &state.current_dir)?
};
let referrer_module = state.graph.get(&referrer);
for (is_cjs, specifier) in args.specifiers {
let result = super::resolve_specifier_for_tsc(
specifier,
&referrer,
&state.graph,
if is_cjs {
ResolutionMode::Require
} else {
ResolutionMode::Import
},
state.maybe_npm.as_ref(),
referrer_module,
&mut state.remapped_specifiers,
)?;
resolved.push(result);
}
Ok(resolved)
}
#[op2]
#[string]
fn op_resolve_jsx_import_source(
state: &mut OpState,
#[string] referrer: &str,
) -> Option<String> {
let state = state.borrow::<State>();
let referrer = if let Some(remapped_specifier) =
state.maybe_remapped_specifier(referrer)
{
Cow::Borrowed(remapped_specifier)
} else {
Cow::Owned(resolve_url_or_path(referrer, &state.current_dir).ok()?)
};
state
.jsx_import_source_config_resolver
.for_specifier(&referrer)?
.specifier()
.map(|s| s.to_string())
}
deno_core::extension!(deno_cli_tsc,
ops = [
op_create_hash,
op_emit,
op_is_node_file,
op_load,
op_remap_specifier,
op_resolve,
op_resolve_jsx_import_source,
op_tsc_constants,
op_respond,
op_libs,
],
options = {
request: Request,
root_map: HashMap<String, Url>,
remapped_specifiers: HashMap<String, Url>,
},
state = |state, options| {
state.put(State::new(
options.request.graph,
options.request.jsx_import_source_config_resolver,
options.request.hash_data,
options.request.maybe_npm,
options.request.maybe_tsbuildinfo,
options.root_map,
options.remapped_specifiers,
std::env::current_dir()
.context("Unable to get CWD")
.unwrap(),
));
},
customizer = |ext: &mut deno_core::Extension| {
use deno_core::ExtensionFileSource;
ext.esm_files.to_mut().push(ExtensionFileSource::new_computed("ext:deno_cli_tsc/99_main_compiler.js", crate::tsc::MAIN_COMPILER_SOURCE.as_str().into()));
ext.esm_files.to_mut().push(ExtensionFileSource::new_computed("ext:deno_cli_tsc/97_ts_host.js", crate::tsc::TS_HOST_SOURCE.as_str().into()));
ext.esm_files.to_mut().push(ExtensionFileSource::new_computed("ext:deno_cli_tsc/98_lsp.js", crate::tsc::LSP_SOURCE.as_str().into()));
ext.js_files.to_mut().push(ExtensionFileSource::new_computed("ext:deno_cli_tsc/00_typescript.js", crate::tsc::TYPESCRIPT_SOURCE.as_str().into()));
ext.esm_entry_point = Some("ext:deno_cli_tsc/99_main_compiler.js");
}
);
// TODO(bartlomieju): this mechanism is questionable.
// Can't we use something more efficient here?
#[op2]
fn op_respond(state: &mut OpState, #[serde] args: RespondArgs) {
op_respond_inner(state, args)
}
#[inline]
fn op_respond_inner(state: &mut OpState, args: RespondArgs) {
let state = state.borrow_mut::<State>();
state.maybe_response = Some(args);
}
#[op2]
#[string]
fn op_create_hash(s: &mut OpState, #[string] text: &str) -> String {
op_create_hash_inner(s, text)
}
#[inline]
fn op_create_hash_inner(s: &mut OpState, text: &str) -> String {
let state = s.borrow_mut::<State>();
get_hash(text, state.hash_data)
}
#[derive(Debug, Deserialize)]
#[serde(rename_all = "camelCase")]
struct EmitArgs {
/// The text data/contents of the file.
data: String,
/// The _internal_ filename for the file. This will be used to determine how
/// the file is cached and stored.
file_name: String,
}
#[op2(fast)]
fn op_emit(
state: &mut OpState,
#[string] data: String,
#[string] file_name: String,
) -> bool {
op_emit_inner(state, EmitArgs { data, file_name })
}
#[inline]
fn op_emit_inner(state: &mut OpState, args: EmitArgs) -> bool {
let state = state.borrow_mut::<State>();
match args.file_name.as_ref() {
"internal:///.tsbuildinfo" => state.maybe_tsbuildinfo = Some(args.data),
_ => {
if cfg!(debug_assertions) {
panic!("Unhandled emit write: {}", args.file_name);
}
}
}
true
}
#[op2(fast)]
fn op_is_node_file(state: &mut OpState, #[string] path: &str) -> bool {
let state = state.borrow::<State>();
ModuleSpecifier::parse(path)
.ok()
.map(|specifier| {
state
.maybe_npm
.as_ref()
.map(|n| n.node_resolver.in_npm_package(&specifier))
.unwrap_or(false)
|| specifier.as_str().starts_with("asset:///node/")
})
.unwrap_or(false)
}
#[derive(Debug, Deserialize, Eq, PartialEq)]
#[serde(rename_all = "camelCase")]
struct RespondArgs {
pub diagnostics: Diagnostics,
pub ambient_modules: Vec<String>,
pub stats: Stats,
}
impl super::LoadContent for FastString {
fn from_static(source: &'static str) -> Self {
FastString::from_static(source)
}
fn from_string(source: String) -> Self {
FastString::from(source)
}
fn from_arc_str(source: Arc<str>) -> Self {
FastString::from(source)
}
}
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct LoadResponse {
data: FastString,
version: Option<String>,
script_kind: i32,
is_cjs: bool,
}
#[op2]
#[serde]
fn op_load(
state: &mut OpState,
#[string] load_specifier: &str,
) -> Result<Option<LoadResponse>, LoadError> {
op_load_inner(state, load_specifier)
}
impl super::Mapper for State {
fn maybe_remapped_specifier(
&self,
specifier: &str,
) -> Option<&ModuleSpecifier> {
self.maybe_remapped_specifier(specifier)
}
}
fn op_load_inner(
state: &mut OpState,
load_specifier: &str,
) -> Result<Option<LoadResponse>, LoadError> {
let state = state.borrow::<State>();
Ok(
super::load_for_tsc::<FastString, _>(
load_specifier,
state.maybe_npm.as_ref(),
&state.current_dir,
&state.graph,
state.maybe_tsbuildinfo.as_deref(),
state.hash_data,
state,
)?
.map(|res| LoadResponse {
data: res.data,
version: res.version,
is_cjs: res.is_cjs,
script_kind: super::as_ts_script_kind(res.media_type),
}),
)
}
pub fn exec_request(
request: Request,
root_names: Vec<String>,
root_map: HashMap<String, ModuleSpecifier>,
remapped_specifiers: HashMap<String, ModuleSpecifier>,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
) -> Result<Response, ExecError> {
let request_value = json!({
"config": request.config,
"debug": request.debug,
"rootNames": root_names,
"localOnly": request.check_mode == TypeCheckMode::Local,
});
let exec_source = format!("globalThis.exec({request_value})");
let mut extensions =
deno_runtime::snapshot_info::get_extensions_in_snapshot();
extensions.push(deno_cli_tsc::init(request, root_map, remapped_specifiers));
let extension_code_cache = code_cache.map(|cache| {
Rc::new(TscExtCodeCache::new(cache)) as Rc<dyn deno_core::ExtCodeCache>
});
let mut runtime = JsRuntime::new(RuntimeOptions {
extensions,
create_params: create_isolate_create_params(&crate::sys::CliSys::default()),
startup_snapshot: deno_snapshots::CLI_SNAPSHOT,
extension_code_cache,
..Default::default()
});
runtime
.execute_script(located_script_name!(), exec_source)
.map_err(ExecError::Js)?;
let op_state = runtime.op_state();
let mut op_state = op_state.borrow_mut();
let state = op_state.take::<State>();
if let Some(response) = state.maybe_response {
let diagnostics = response.diagnostics;
let ambient_modules = response.ambient_modules;
let maybe_tsbuildinfo = state.maybe_tsbuildinfo;
let stats = response.stats;
Ok(Response {
diagnostics,
ambient_modules,
maybe_tsbuildinfo,
stats,
})
} else {
Err(ExecError::ResponseNotSet)
}
}
pub struct TscExtCodeCache {
cache: Arc<dyn deno_runtime::code_cache::CodeCache>,
}
impl TscExtCodeCache {
pub fn new(cache: Arc<dyn deno_runtime::code_cache::CodeCache>) -> Self {
Self { cache }
}
}
impl deno_core::ExtCodeCache for TscExtCodeCache {
fn get_code_cache_info(
&self,
specifier: &ModuleSpecifier,
code: &deno_core::ModuleSourceCode,
esm: bool,
) -> deno_core::SourceCodeCacheInfo {
use deno_runtime::code_cache::CodeCacheType;
let code_hash = FastInsecureHasher::new_deno_versioned()
.write_hashable(code)
.finish();
let data = self
.cache
.get_sync(
specifier,
if esm {
CodeCacheType::EsModule
} else {
CodeCacheType::Script
},
code_hash,
)
.map(Cow::from)
.inspect(|_| {
log::debug!(
"V8 code cache hit for Extension module: {specifier}, [{code_hash:?}]"
);
});
deno_core::SourceCodeCacheInfo {
hash: code_hash,
data,
}
}
fn code_cache_ready(
&self,
specifier: ModuleSpecifier,
source_hash: u64,
code_cache: &[u8],
esm: bool,
) {
use deno_runtime::code_cache::CodeCacheType;
log::debug!(
"Updating V8 code cache for Extension module: {specifier}, [{source_hash:?}]"
);
self.cache.set_sync(
specifier,
if esm {
CodeCacheType::EsModule
} else {
CodeCacheType::Script
},
source_hash,
code_cache,
);
}
}
// TODO(bartlomieju): we have similar struct in `tsc.rs` - maybe at least change
// the name of the struct to avoid confusion?
#[derive(Debug)]
struct State {
hash_data: u64,
graph: Arc<ModuleGraph>,
jsx_import_source_config_resolver: Arc<JsxImportSourceConfigResolver>,
maybe_tsbuildinfo: Option<String>,
maybe_response: Option<RespondArgs>,
maybe_npm: Option<RequestNpmState>,
// todo(dsherret): it looks like the remapped_specifiers and
// root_map could be combined... what is the point of the separation?
remapped_specifiers: HashMap<String, ModuleSpecifier>,
root_map: HashMap<String, ModuleSpecifier>,
current_dir: PathBuf,
}
impl Default for State {
fn default() -> Self {
Self {
hash_data: Default::default(),
graph: Arc::new(ModuleGraph::new(GraphKind::All)),
jsx_import_source_config_resolver: Default::default(),
maybe_tsbuildinfo: Default::default(),
maybe_response: Default::default(),
maybe_npm: Default::default(),
remapped_specifiers: Default::default(),
root_map: Default::default(),
current_dir: Default::default(),
}
}
}
impl State {
#[allow(clippy::too_many_arguments)]
pub fn new(
graph: Arc<ModuleGraph>,
jsx_import_source_config_resolver: Arc<JsxImportSourceConfigResolver>,
hash_data: u64,
maybe_npm: Option<RequestNpmState>,
maybe_tsbuildinfo: Option<String>,
root_map: HashMap<String, ModuleSpecifier>,
remapped_specifiers: HashMap<String, ModuleSpecifier>,
current_dir: PathBuf,
) -> Self {
State {
hash_data,
graph,
jsx_import_source_config_resolver,
maybe_npm,
maybe_tsbuildinfo,
maybe_response: None,
remapped_specifiers,
root_map,
current_dir,
}
}
pub fn maybe_remapped_specifier(
&self,
specifier: &str,
) -> Option<&ModuleSpecifier> {
self
.remapped_specifiers
.get(specifier)
.or_else(|| self.root_map.get(specifier))
}
}
#[cfg(test)]
mod tests {
use deno_ast::MediaType;
use deno_core::OpState;
use deno_core::futures::future;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_error::JsErrorBox;
use deno_graph::GraphKind;
use deno_graph::ModuleGraph;
use deno_runtime::code_cache::CodeCacheType;
use test_util::PathRef;
use super::super::Diagnostic;
use super::super::DiagnosticCategory;
use super::*;
use crate::args::CompilerOptions;
use crate::tsc::MISSING_DEPENDENCY_SPECIFIER;
use crate::tsc::get_lazily_loaded_asset;
#[derive(Debug, Default)]
pub struct MockLoader {
pub fixtures: PathRef,
}
impl deno_graph::source::Loader for MockLoader {
fn load(
&self,
specifier: &ModuleSpecifier,
_options: deno_graph::source::LoadOptions,
) -> deno_graph::source::LoadFuture {
let specifier_text = specifier
.to_string()
.replace(":///", "_")
.replace("://", "_")
.replace('/', "-");
let source_path = self.fixtures.join(specifier_text);
let response = source_path
.read_to_bytes_if_exists()
.map(|c| {
Some(deno_graph::source::LoadResponse::Module {
specifier: specifier.clone(),
mtime: None,
maybe_headers: None,
content: c.into(),
})
})
.map_err(|e| {
deno_graph::source::LoadError::Other(Arc::new(JsErrorBox::generic(
e.to_string(),
)))
});
Box::pin(future::ready(response))
}
}
async fn setup(
maybe_specifier: Option<ModuleSpecifier>,
maybe_hash_data: Option<u64>,
maybe_tsbuildinfo: Option<String>,
) -> OpState {
let specifier = maybe_specifier
.unwrap_or_else(|| ModuleSpecifier::parse("file:///main.ts").unwrap());
let hash_data = maybe_hash_data.unwrap_or(0);
let fixtures = test_util::testdata_path().join("tsc2");
let loader = MockLoader { fixtures };
let mut graph = ModuleGraph::new(GraphKind::TypesOnly);
graph
.build(vec![specifier], Vec::new(), &loader, Default::default())
.await;
let state = State::new(
Arc::new(graph),
Default::default(),
hash_data,
None,
maybe_tsbuildinfo,
HashMap::new(),
HashMap::new(),
std::env::current_dir()
.context("Unable to get CWD")
.unwrap(),
);
let mut op_state = OpState::new(None);
op_state.put(state);
op_state
}
async fn test_exec(
specifier: &ModuleSpecifier,
) -> Result<Response, ExecError> {
test_exec_with_cache(specifier, None).await
}
async fn test_exec_with_cache(
specifier: &ModuleSpecifier,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
) -> Result<Response, ExecError> {
let hash_data = 123; // something random
let fixtures = test_util::testdata_path().join("tsc2");
let loader = MockLoader { fixtures };
let mut graph = ModuleGraph::new(GraphKind::TypesOnly);
graph
.build(
vec![specifier.clone()],
Vec::new(),
&loader,
Default::default(),
)
.await;
let config = Arc::new(CompilerOptions::new(json!({
"allowJs": true,
"checkJs": false,
"esModuleInterop": true,
"emitDecoratorMetadata": false,
"incremental": true,
"jsx": "react",
"jsxFactory": "React.createElement",
"jsxFragmentFactory": "React.Fragment",
"lib": ["deno.window"],
"noEmit": true,
"outDir": "internal:///",
"strict": true,
"target": "esnext",
"tsBuildInfoFile": "internal:///.tsbuildinfo",
})));
let request = Request {
config,
debug: false,
graph: Arc::new(graph),
jsx_import_source_config_resolver: Default::default(),
hash_data,
maybe_npm: None,
maybe_tsbuildinfo: None,
root_names: vec![(specifier.clone(), MediaType::TypeScript)],
check_mode: TypeCheckMode::All,
initial_cwd: std::env::current_dir().unwrap(),
};
crate::tsc::exec(request, code_cache, None)
}
#[tokio::test]
async fn test_create_hash() {
let mut state = setup(None, Some(123), None).await;
let actual = op_create_hash_inner(&mut state, "some sort of content");
assert_eq!(actual, "11905938177474799758");
}
#[tokio::test]
async fn test_hash_url() {
let specifier = deno_core::resolve_url(
"data:application/javascript,console.log(\"Hello%20Deno\");",
)
.unwrap();
assert_eq!(
crate::tsc::hash_url(&specifier, MediaType::JavaScript),
"data:///d300ea0796bd72b08df10348e0b70514c021f2e45bfe59cec24e12e97cd79c58.js"
);
}
#[tokio::test]
async fn test_emit_tsbuildinfo() {
let mut state = setup(None, None, None).await;
let actual = op_emit_inner(
&mut state,
EmitArgs {
data: "some file content".to_string(),
file_name: "internal:///.tsbuildinfo".to_string(),
},
);
assert!(actual);
let state = state.borrow::<State>();
assert_eq!(
state.maybe_tsbuildinfo,
Some("some file content".to_string())
);
}
#[tokio::test]
async fn test_load() {
let mut state = setup(
Some(ModuleSpecifier::parse("https://deno.land/x/mod.ts").unwrap()),
None,
Some("some content".to_string()),
)
.await;
let actual =
op_load_inner(&mut state, "https://deno.land/x/mod.ts").unwrap();
assert_eq!(
serde_json::to_value(actual).unwrap(),
json!({
"data": "console.log(\"hello deno\");\n",
"version": "7821807483407828376",
"scriptKind": 3,
"isCjs": false,
})
);
}
#[tokio::test]
async fn test_load_asset() {
let mut state = setup(
Some(ModuleSpecifier::parse("https://deno.land/x/mod.ts").unwrap()),
None,
Some("some content".to_string()),
)
.await;
let actual = op_load_inner(&mut state, "asset:///lib.dom.d.ts")
.expect("should have invoked op")
.expect("load should have succeeded");
let expected = get_lazily_loaded_asset("lib.dom.d.ts").unwrap();
assert_eq!(actual.data.to_string(), expected.to_string());
assert!(actual.version.is_some());
assert_eq!(actual.script_kind, 3);
}
#[tokio::test]
async fn test_load_tsbuildinfo() {
let mut state = setup(
Some(ModuleSpecifier::parse("https://deno.land/x/mod.ts").unwrap()),
None,
Some("some content".to_string()),
)
.await;
let actual = op_load_inner(&mut state, "internal:///.tsbuildinfo")
.expect("should have invoked op")
.expect("load should have succeeded");
assert_eq!(
serde_json::to_value(actual).unwrap(),
json!({
"data": "some content",
"version": null,
"scriptKind": 0,
"isCjs": false,
})
);
}
#[tokio::test]
async fn test_load_missing_specifier() {
let mut state = setup(None, None, None).await;
let actual = op_load_inner(&mut state, "https://deno.land/x/mod.ts")
.expect("should have invoked op");
assert_eq!(serde_json::to_value(actual).unwrap(), json!(null));
}
#[tokio::test]
async fn test_resolve() {
let mut state = setup(
Some(ModuleSpecifier::parse("https://deno.land/x/a.ts").unwrap()),
None,
None,
)
.await;
let actual = op_resolve_inner(
&mut state,
ResolveArgs {
base: "https://deno.land/x/a.ts",
specifiers: vec![(false, "./b.ts".to_string())],
},
)
.expect("should have invoked op");
assert_eq!(
actual,
vec![("https://deno.land/x/b.ts".into(), Some(".ts"))]
);
}
#[tokio::test]
async fn test_resolve_empty() {
let mut state = setup(
Some(ModuleSpecifier::parse("https://deno.land/x/a.ts").unwrap()),
None,
None,
)
.await;
let actual = op_resolve_inner(
&mut state,
ResolveArgs {
base: "https://deno.land/x/a.ts",
specifiers: vec![(false, "./bad.ts".to_string())],
},
)
.expect("should have not errored");
assert_eq!(
actual,
vec![(MISSING_DEPENDENCY_SPECIFIER.into(), Some(".d.ts"))]
);
}
#[tokio::test]
async fn test_respond() {
let mut state = setup(None, None, None).await;
let args = serde_json::from_value(json!({
"diagnostics": [
{
"messageText": "Unknown compiler option 'invalid'.",
"category": 1,
"code": 5023
}
],
"stats": [["a", 12]],
"ambientModules": []
}))
.unwrap();
op_respond_inner(&mut state, args);
let state = state.borrow::<State>();
assert_eq!(
state.maybe_response,
Some(RespondArgs {
diagnostics: Diagnostics::new(vec![Diagnostic {
category: DiagnosticCategory::Error,
code: 5023,
start: None,
end: None,
original_source_start: None,
message_text: Some(
"Unknown compiler option \'invalid\'.".to_string()
),
message_chain: None,
source: None,
source_line: None,
file_name: None,
related_information: None,
reports_deprecated: None,
reports_unnecessary: None,
other: Default::default(),
missing_specifier: None,
}]),
ambient_modules: vec![],
stats: Stats(vec![("a".to_string(), 12)])
})
);
}
#[tokio::test]
async fn test_exec_basic() {
let specifier = ModuleSpecifier::parse("https://deno.land/x/a.ts").unwrap();
let actual = test_exec(&specifier)
.await
.expect("exec should not have errored");
assert!(!actual.diagnostics.has_diagnostic());
assert!(actual.maybe_tsbuildinfo.is_some());
assert_eq!(actual.stats.0.len(), 12);
}
#[tokio::test]
async fn test_exec_reexport_dts() {
let specifier = ModuleSpecifier::parse("file:///reexports.ts").unwrap();
let actual = test_exec(&specifier)
.await
.expect("exec should not have errored");
assert!(!actual.diagnostics.has_diagnostic());
assert!(actual.maybe_tsbuildinfo.is_some());
assert_eq!(actual.stats.0.len(), 12);
}
#[tokio::test]
async fn fix_lib_ref() {
let specifier = ModuleSpecifier::parse("file:///libref.ts").unwrap();
let actual = test_exec(&specifier)
.await
.expect("exec should not have errored");
assert!(!actual.diagnostics.has_diagnostic());
}
pub type SpecifierWithType = (ModuleSpecifier, CodeCacheType);
#[derive(Default)]
struct TestExtCodeCache {
cache: Mutex<HashMap<(SpecifierWithType, u64), Vec<u8>>>,
hits: Mutex<HashMap<SpecifierWithType, usize>>,
misses: Mutex<HashMap<SpecifierWithType, usize>>,
}
impl deno_runtime::code_cache::CodeCache for TestExtCodeCache {
fn get_sync(
&self,
specifier: &ModuleSpecifier,
code_cache_type: CodeCacheType,
source_hash: u64,
) -> Option<Vec<u8>> {
let result = self
.cache
.lock()
.get(&((specifier.clone(), code_cache_type), source_hash))
.cloned();
if result.is_some() {
*self
.hits
.lock()
.entry((specifier.clone(), code_cache_type))
.or_default() += 1;
} else {
*self
.misses
.lock()
.entry((specifier.clone(), code_cache_type))
.or_default() += 1;
}
result
}
fn set_sync(
&self,
specifier: ModuleSpecifier,
code_cache_type: CodeCacheType,
source_hash: u64,
data: &[u8],
) {
self
.cache
.lock()
.insert(((specifier, code_cache_type), source_hash), data.to_vec());
}
}
#[tokio::test]
async fn test_exec_code_cache() {
let code_cache = Arc::new(TestExtCodeCache::default());
let specifier = ModuleSpecifier::parse("https://deno.land/x/a.ts").unwrap();
let actual = test_exec_with_cache(&specifier, Some(code_cache.clone()))
.await
.expect("exec should not have errored");
assert!(!actual.diagnostics.has_diagnostic());
let expect = [
(
"ext:deno_cli_tsc/99_main_compiler.js",
CodeCacheType::EsModule,
),
("ext:deno_cli_tsc/98_lsp.js", CodeCacheType::EsModule),
("ext:deno_cli_tsc/97_ts_host.js", CodeCacheType::EsModule),
("ext:deno_cli_tsc/00_typescript.js", CodeCacheType::Script),
];
{
let mut files = HashMap::new();
for (((specifier, ty), _), _) in code_cache.cache.lock().iter() {
let specifier = specifier.to_string();
if files.contains_key(&specifier) {
panic!("should have only 1 entry per specifier");
}
files.insert(specifier, *ty);
}
// 99_main_compiler, 98_lsp, 97_ts_host, 00_typescript
assert_eq!(files.len(), 4);
assert_eq!(code_cache.hits.lock().len(), 0);
assert_eq!(code_cache.misses.lock().len(), 4);
for (specifier, ty) in &expect {
assert_eq!(files.get(*specifier), Some(ty));
}
code_cache.hits.lock().clear();
code_cache.misses.lock().clear();
}
{
let _ = test_exec_with_cache(&specifier, Some(code_cache.clone()))
.await
.expect("exec should not have errored");
// 99_main_compiler, 98_lsp, 97_ts_host, 00_typescript
assert_eq!(code_cache.hits.lock().len(), 4);
assert_eq!(code_cache.misses.lock().len(), 0);
for (specifier, ty) in expect {
let url = ModuleSpecifier::parse(specifier).unwrap();
assert_eq!(code_cache.hits.lock().get(&(url, ty)), Some(&1));
}
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tsc/mod.rs | cli/tsc/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
//
mod go;
mod js;
use std::collections::HashMap;
use std::collections::HashSet;
use std::fmt;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::LazyLock;
use std::sync::OnceLock;
use deno_ast::MediaType;
use deno_core::ModuleSpecifier;
use deno_core::serde::Deserialize;
use deno_core::serde::Deserializer;
use deno_core::serde::Serialize;
use deno_core::serde::Serializer;
use deno_core::url::Url;
use deno_graph::Module;
use deno_graph::ModuleGraph;
use deno_lib::util::checksum;
use deno_lib::util::hash::FastInsecureHasher;
use deno_resolver::deno_json::JsxImportSourceConfigResolver;
use deno_resolver::npm::ResolvePkgFolderFromDenoReqError;
use deno_resolver::npm::managed::ResolvePkgFolderFromDenoModuleError;
use deno_semver::npm::NpmPackageReqReference;
use indexmap::IndexMap;
use node_resolver::NodeResolutionKind;
use node_resolver::ResolutionMode;
use node_resolver::errors::NodeJsErrorCode;
use node_resolver::errors::NodeJsErrorCoded;
use node_resolver::errors::PackageSubpathFromDenoModuleResolveError;
use node_resolver::resolve_specifier_into_node_modules;
use once_cell::sync::Lazy;
use thiserror::Error;
use crate::args::CompilerOptions;
use crate::args::TypeCheckMode;
use crate::cache::ModuleInfoCache;
use crate::node::CliNodeResolver;
use crate::node::CliPackageJsonResolver;
use crate::npm::CliNpmResolver;
use crate::resolver::CliCjsTracker;
use crate::sys::CliSys;
use crate::util::path::mapped_specifier_for_tsc;
mod diagnostics;
pub use self::diagnostics::Diagnostic;
pub use self::diagnostics::DiagnosticCategory;
pub use self::diagnostics::Diagnostics;
pub use self::diagnostics::Position;
pub use self::go::ensure_tsgo;
pub use self::js::TscConstants;
pub fn get_types_declaration_file_text() -> String {
let lib_names = vec![
"deno.ns",
"deno.console",
"deno.url",
"deno.web",
"deno.fetch",
"deno.webgpu",
"deno.websocket",
"deno.webstorage",
"deno.canvas",
"deno.crypto",
"deno.broadcast_channel",
"deno.net",
"deno.shared_globals",
"deno.cache",
"deno.window",
"deno.unstable",
];
lib_names
.into_iter()
.map(|name| {
let lib_name = format!("lib.{name}.d.ts");
LAZILY_LOADED_STATIC_ASSETS
.get(lib_name.as_str())
.unwrap()
.source
.as_str()
})
.collect::<Vec<_>>()
.join("\n")
}
macro_rules! maybe_compressed_source {
($file: expr) => {{ maybe_compressed_source!(compressed = $file, uncompressed = $file) }};
(compressed = $comp: expr, uncompressed = $uncomp: expr) => {{
#[cfg(feature = "hmr")]
{
StaticAssetSource::Owned(
concat!(env!("CARGO_MANIFEST_DIR"), "/", $uncomp),
std::sync::OnceLock::new(),
)
}
#[cfg(not(feature = "hmr"))]
{
#[cfg(debug_assertions)]
{
StaticAssetSource::Uncompressed(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/",
$uncomp
)))
}
#[cfg(not(debug_assertions))]
{
StaticAssetSource::Compressed(CompressedSource::new(include_bytes!(
concat!(env!("OUT_DIR"), "/", $comp, ".zstd")
)))
}
}
}};
}
macro_rules! maybe_compressed_static_asset {
($name: expr, $file: expr, $is_lib: literal) => {
(
$name,
StaticAsset {
is_lib: $is_lib,
source: maybe_compressed_source!(concat!("tsc/dts/", $file)),
},
)
};
($e: expr, $is_lib: literal) => {
maybe_compressed_static_asset!($e, $e, $is_lib)
};
}
macro_rules! maybe_compressed_lib {
($name: expr, $file: expr) => {
maybe_compressed_static_asset!($name, $file, true)
};
($e: expr) => {
maybe_compressed_lib!($e, $e)
};
}
// Include the auto-generated node type libs macro
include!(concat!(env!("OUT_DIR"), "/node_types.rs"));
#[derive(Clone)]
pub enum StaticAssetSource {
#[cfg_attr(any(debug_assertions, feature = "hmr"), allow(dead_code))]
Compressed(CompressedSource),
#[allow(dead_code)]
Uncompressed(&'static str),
#[cfg_attr(not(feature = "hmr"), allow(dead_code))]
Owned(&'static str, std::sync::OnceLock<Arc<str>>),
}
impl StaticAssetSource {
pub fn as_str(&'static self) -> &'static str {
match self {
StaticAssetSource::Compressed(compressed_source) => {
compressed_source.get()
}
StaticAssetSource::Uncompressed(src) => src,
StaticAssetSource::Owned(path, cell) => {
let str =
cell.get_or_init(|| std::fs::read_to_string(path).unwrap().into());
str.as_ref()
}
}
}
}
pub struct StaticAsset {
pub is_lib: bool,
pub source: StaticAssetSource,
}
/// Contains static assets that are not preloaded in the compiler snapshot.
///
/// We lazily load these because putting them in the compiler snapshot will
/// increase memory usage when not used (last time checked by about 0.5MB).
pub static LAZILY_LOADED_STATIC_ASSETS: Lazy<
IndexMap<&'static str, StaticAsset>,
> = Lazy::new(|| {
Vec::from([
// compressed in build.rs
maybe_compressed_lib!("lib.deno.console.d.ts", "lib.deno_console.d.ts"),
maybe_compressed_lib!("lib.deno.url.d.ts", "lib.deno_url.d.ts"),
maybe_compressed_lib!("lib.deno.web.d.ts", "lib.deno_web.d.ts"),
maybe_compressed_lib!("lib.deno.fetch.d.ts", "lib.deno_fetch.d.ts"),
maybe_compressed_lib!("lib.deno.websocket.d.ts", "lib.deno_websocket.d.ts"),
maybe_compressed_lib!(
"lib.deno.webstorage.d.ts",
"lib.deno_webstorage.d.ts"
),
maybe_compressed_lib!("lib.deno.canvas.d.ts", "lib.deno_canvas.d.ts"),
maybe_compressed_lib!("lib.deno.crypto.d.ts", "lib.deno_crypto.d.ts"),
maybe_compressed_lib!(
"lib.deno.broadcast_channel.d.ts",
"lib.deno_broadcast_channel.d.ts"
),
maybe_compressed_lib!("lib.deno.net.d.ts", "lib.deno_net.d.ts"),
maybe_compressed_lib!("lib.deno.cache.d.ts", "lib.deno_cache.d.ts"),
maybe_compressed_lib!("lib.deno.webgpu.d.ts", "lib.deno_webgpu.d.ts"),
maybe_compressed_lib!("lib.deno.window.d.ts"),
maybe_compressed_lib!("lib.deno.worker.d.ts"),
maybe_compressed_lib!("lib.deno.shared_globals.d.ts"),
maybe_compressed_lib!("lib.deno.ns.d.ts"),
maybe_compressed_lib!("lib.deno.unstable.d.ts"),
maybe_compressed_lib!("lib.decorators.d.ts"),
maybe_compressed_lib!("lib.decorators.legacy.d.ts"),
maybe_compressed_lib!("lib.dom.asynciterable.d.ts"),
maybe_compressed_lib!("lib.dom.d.ts"),
maybe_compressed_lib!("lib.dom.extras.d.ts"),
maybe_compressed_lib!("lib.dom.iterable.d.ts"),
maybe_compressed_lib!("lib.es2015.collection.d.ts"),
maybe_compressed_lib!("lib.es2015.core.d.ts"),
maybe_compressed_lib!("lib.es2015.d.ts"),
maybe_compressed_lib!("lib.es2015.generator.d.ts"),
maybe_compressed_lib!("lib.es2015.iterable.d.ts"),
maybe_compressed_lib!("lib.es2015.promise.d.ts"),
maybe_compressed_lib!("lib.es2015.proxy.d.ts"),
maybe_compressed_lib!("lib.es2015.reflect.d.ts"),
maybe_compressed_lib!("lib.es2015.symbol.d.ts"),
maybe_compressed_lib!("lib.es2015.symbol.wellknown.d.ts"),
maybe_compressed_lib!("lib.es2016.array.include.d.ts"),
maybe_compressed_lib!("lib.es2016.d.ts"),
maybe_compressed_lib!("lib.es2016.full.d.ts"),
maybe_compressed_lib!("lib.es2016.intl.d.ts"),
maybe_compressed_lib!("lib.es2017.arraybuffer.d.ts"),
maybe_compressed_lib!("lib.es2017.d.ts"),
maybe_compressed_lib!("lib.es2017.date.d.ts"),
maybe_compressed_lib!("lib.es2017.full.d.ts"),
maybe_compressed_lib!("lib.es2017.intl.d.ts"),
maybe_compressed_lib!("lib.es2017.object.d.ts"),
maybe_compressed_lib!("lib.es2017.sharedmemory.d.ts"),
maybe_compressed_lib!("lib.es2017.string.d.ts"),
maybe_compressed_lib!("lib.es2017.typedarrays.d.ts"),
maybe_compressed_lib!("lib.es2018.asyncgenerator.d.ts"),
maybe_compressed_lib!("lib.es2018.asynciterable.d.ts"),
maybe_compressed_lib!("lib.es2018.d.ts"),
maybe_compressed_lib!("lib.es2018.full.d.ts"),
maybe_compressed_lib!("lib.es2018.intl.d.ts"),
maybe_compressed_lib!("lib.es2018.promise.d.ts"),
maybe_compressed_lib!("lib.es2018.regexp.d.ts"),
maybe_compressed_lib!("lib.es2019.array.d.ts"),
maybe_compressed_lib!("lib.es2019.d.ts"),
maybe_compressed_lib!("lib.es2019.full.d.ts"),
maybe_compressed_lib!("lib.es2019.intl.d.ts"),
maybe_compressed_lib!("lib.es2019.object.d.ts"),
maybe_compressed_lib!("lib.es2019.string.d.ts"),
maybe_compressed_lib!("lib.es2019.symbol.d.ts"),
maybe_compressed_lib!("lib.es2020.bigint.d.ts"),
maybe_compressed_lib!("lib.es2020.d.ts"),
maybe_compressed_lib!("lib.es2020.date.d.ts"),
maybe_compressed_lib!("lib.es2020.full.d.ts"),
maybe_compressed_lib!("lib.es2020.intl.d.ts"),
maybe_compressed_lib!("lib.es2020.number.d.ts"),
maybe_compressed_lib!("lib.es2020.promise.d.ts"),
maybe_compressed_lib!("lib.es2020.sharedmemory.d.ts"),
maybe_compressed_lib!("lib.es2020.string.d.ts"),
maybe_compressed_lib!("lib.es2020.symbol.wellknown.d.ts"),
maybe_compressed_lib!("lib.es2021.d.ts"),
maybe_compressed_lib!("lib.es2021.full.d.ts"),
maybe_compressed_lib!("lib.es2021.intl.d.ts"),
maybe_compressed_lib!("lib.es2021.promise.d.ts"),
maybe_compressed_lib!("lib.es2021.string.d.ts"),
maybe_compressed_lib!("lib.es2021.weakref.d.ts"),
maybe_compressed_lib!("lib.es2022.array.d.ts"),
maybe_compressed_lib!("lib.es2022.d.ts"),
maybe_compressed_lib!("lib.es2022.error.d.ts"),
maybe_compressed_lib!("lib.es2022.full.d.ts"),
maybe_compressed_lib!("lib.es2022.intl.d.ts"),
maybe_compressed_lib!("lib.es2022.object.d.ts"),
maybe_compressed_lib!("lib.es2022.regexp.d.ts"),
maybe_compressed_lib!("lib.es2022.string.d.ts"),
maybe_compressed_lib!("lib.es2023.array.d.ts"),
maybe_compressed_lib!("lib.es2023.collection.d.ts"),
maybe_compressed_lib!("lib.es2023.d.ts"),
maybe_compressed_lib!("lib.es2023.full.d.ts"),
maybe_compressed_lib!("lib.es2023.intl.d.ts"),
maybe_compressed_lib!("lib.es2024.arraybuffer.d.ts"),
maybe_compressed_lib!("lib.es2024.collection.d.ts"),
maybe_compressed_lib!("lib.es2024.d.ts"),
maybe_compressed_lib!("lib.es2024.full.d.ts"),
maybe_compressed_lib!("lib.es2024.object.d.ts"),
maybe_compressed_lib!("lib.es2024.promise.d.ts"),
maybe_compressed_lib!("lib.es2024.regexp.d.ts"),
maybe_compressed_lib!("lib.es2024.sharedmemory.d.ts"),
maybe_compressed_lib!("lib.es2024.string.d.ts"),
maybe_compressed_lib!("lib.es5.d.ts"),
maybe_compressed_lib!("lib.es6.d.ts"),
maybe_compressed_lib!("lib.esnext.array.d.ts"),
maybe_compressed_lib!("lib.esnext.collection.d.ts"),
maybe_compressed_lib!("lib.esnext.d.ts"),
maybe_compressed_lib!("lib.esnext.decorators.d.ts"),
maybe_compressed_lib!("lib.esnext.disposable.d.ts"),
maybe_compressed_lib!("lib.esnext.error.d.ts"),
maybe_compressed_lib!("lib.esnext.float16.d.ts"),
maybe_compressed_lib!("lib.esnext.full.d.ts"),
maybe_compressed_lib!("lib.esnext.intl.d.ts"),
maybe_compressed_lib!("lib.esnext.iterator.d.ts"),
maybe_compressed_lib!("lib.esnext.promise.d.ts"),
maybe_compressed_lib!("lib.esnext.sharedmemory.d.ts"),
maybe_compressed_lib!("lib.node.d.ts"),
maybe_compressed_lib!("lib.scripthost.d.ts"),
maybe_compressed_lib!("lib.webworker.asynciterable.d.ts"),
maybe_compressed_lib!("lib.webworker.d.ts"),
maybe_compressed_lib!("lib.webworker.importscripts.d.ts"),
maybe_compressed_lib!("lib.webworker.iterable.d.ts"),
(
// Special file that can be used to inject the @types/node package.
// This is used for `node:` specifiers.
"reference_types_node.d.ts",
StaticAsset {
is_lib: false,
source: StaticAssetSource::Uncompressed(
// causes either the built-in node types to be used or it
// prefers the @types/node if it exists
"/// <reference lib=\"node\" />\n/// <reference types=\"npm:@types/node\" />\n",
),
},
),
])
.into_iter()
.chain(node_type_libs!())
.collect()
});
pub fn lib_names() -> Vec<String> {
let mut out =
Vec::with_capacity(crate::tsc::LAZILY_LOADED_STATIC_ASSETS.len());
for (key, value) in crate::tsc::LAZILY_LOADED_STATIC_ASSETS.iter() {
if !value.is_lib {
continue;
}
let lib = key
.replace("lib.", "")
.replace(".d.ts", "")
.replace("deno_", "deno.");
out.push(lib);
}
out
}
/// A structure representing stats from a type check operation for a graph.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Stats(pub Vec<(String, u32)>);
impl<'de> Deserialize<'de> for Stats {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let items: Vec<(String, u32)> = Deserialize::deserialize(deserializer)?;
Ok(Stats(items))
}
}
impl Serialize for Stats {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(&self.0, serializer)
}
}
impl fmt::Display for Stats {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Compilation statistics:")?;
for (key, value) in self.0.clone() {
writeln!(f, " {key}: {value}")?;
}
Ok(())
}
}
/// Retrieve a static asset that are included in the binary.
fn get_lazily_loaded_asset(asset: &str) -> Option<&'static str> {
LAZILY_LOADED_STATIC_ASSETS
.get(asset)
.map(|s| s.source.as_str())
}
fn get_maybe_hash(
maybe_source: Option<&str>,
hash_data: u64,
) -> Option<String> {
maybe_source.map(|source| get_hash(source, hash_data))
}
fn get_hash(source: &str, hash_data: u64) -> String {
FastInsecureHasher::new_without_deno_version()
.write_str(source)
.write_u64(hash_data)
.finish()
.to_string()
}
/// Hash the URL so it can be sent to `tsc` in a supportable way
fn hash_url(specifier: &ModuleSpecifier, media_type: MediaType) -> String {
let hash = checksum::r#gen(&[specifier.path().as_bytes()]);
format!(
"{}:///{}{}",
specifier.scheme(),
hash,
media_type.as_ts_extension()
)
}
#[derive(Debug, Clone, Default, Eq, PartialEq)]
#[allow(dead_code)]
pub struct EmittedFile {
pub data: String,
pub maybe_specifiers: Option<Vec<ModuleSpecifier>>,
pub media_type: MediaType,
}
pub fn into_specifier_and_media_type(
specifier: Option<ModuleSpecifier>,
) -> (ModuleSpecifier, MediaType) {
match specifier {
Some(specifier) => {
let media_type = MediaType::from_specifier(&specifier);
(specifier, media_type)
}
None => (
Url::parse(MISSING_DEPENDENCY_SPECIFIER).unwrap(),
MediaType::Dts,
),
}
}
#[derive(Debug)]
pub struct TypeCheckingCjsTracker {
cjs_tracker: Arc<CliCjsTracker>,
module_info_cache: Arc<ModuleInfoCache>,
}
impl TypeCheckingCjsTracker {
pub fn new(
cjs_tracker: Arc<CliCjsTracker>,
module_info_cache: Arc<ModuleInfoCache>,
) -> Self {
Self {
cjs_tracker,
module_info_cache,
}
}
pub fn is_cjs(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
code: &Arc<str>,
) -> bool {
let maybe_is_script = self
.module_info_cache
.as_module_analyzer()
.analyze_sync(specifier, media_type, code)
.ok()
.map(|info| info.is_script);
maybe_is_script
.and_then(|is_script| {
self
.cjs_tracker
.is_cjs_with_known_is_script(specifier, media_type, is_script)
.ok()
})
.unwrap_or_else(|| {
self
.cjs_tracker
.is_maybe_cjs(specifier, media_type)
.unwrap_or(false)
})
}
pub fn is_cjs_with_known_is_script(
&self,
specifier: &ModuleSpecifier,
media_type: MediaType,
is_script: bool,
) -> Result<bool, node_resolver::errors::PackageJsonLoadError> {
self
.cjs_tracker
.is_cjs_with_known_is_script(specifier, media_type, is_script)
}
}
#[derive(Debug)]
pub struct RequestNpmState {
pub cjs_tracker: Arc<TypeCheckingCjsTracker>,
pub node_resolver: Arc<CliNodeResolver>,
pub npm_resolver: CliNpmResolver,
pub package_json_resolver: Arc<CliPackageJsonResolver>,
}
/// A structure representing a request to be sent to the tsc runtime.
#[derive(Debug)]
pub struct Request {
/// The TypeScript compiler options which will be serialized and sent to
/// tsc.
pub config: Arc<CompilerOptions>,
/// Indicates to the tsc runtime if debug logging should occur.
pub debug: bool,
pub graph: Arc<ModuleGraph>,
pub jsx_import_source_config_resolver: Arc<JsxImportSourceConfigResolver>,
pub hash_data: u64,
pub maybe_npm: Option<RequestNpmState>,
pub maybe_tsbuildinfo: Option<String>,
/// A vector of strings that represent the root/entry point modules for the
/// program.
pub root_names: Vec<(ModuleSpecifier, MediaType)>,
pub check_mode: TypeCheckMode,
pub initial_cwd: PathBuf,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Response {
/// Any diagnostics that have been returned from the checker.
pub diagnostics: Diagnostics,
/// If there was any build info associated with the exec request.
pub maybe_tsbuildinfo: Option<String>,
pub ambient_modules: Vec<String>,
/// Statistics from the check.
pub stats: Stats,
}
pub fn as_ts_script_kind(media_type: MediaType) -> i32 {
match media_type {
MediaType::JavaScript => 1,
MediaType::Jsx => 2,
MediaType::Mjs => 1,
MediaType::Cjs => 1,
MediaType::TypeScript => 3,
MediaType::Mts => 3,
MediaType::Cts => 3,
MediaType::Dts => 3,
MediaType::Dmts => 3,
MediaType::Dcts => 3,
MediaType::Tsx => 4,
MediaType::Json => 6,
MediaType::SourceMap
| MediaType::Css
| MediaType::Html
| MediaType::Jsonc
| MediaType::Json5
| MediaType::Sql
| MediaType::Wasm
| MediaType::Unknown => 0,
}
}
pub const MISSING_DEPENDENCY_SPECIFIER: &str =
"internal:///missing_dependency.d.ts";
#[derive(Debug, Error, deno_error::JsError)]
pub enum LoadError {
#[class(generic)]
#[error("Unable to load {path}: {error}")]
LoadFromNodeModule { path: String, error: std::io::Error },
#[class(inherit)]
#[error("{0}")]
ResolveUrlOrPathError(#[from] deno_path_util::ResolveUrlOrPathError),
#[class(inherit)]
#[error("Error converting a string module specifier for \"op_resolve\": {0}")]
ModuleResolution(#[from] deno_core::ModuleResolutionError),
#[class(inherit)]
#[error("{0}")]
ClosestPkgJson(#[from] node_resolver::errors::PackageJsonLoadError),
}
pub fn load_raw_import_source(specifier: &Url) -> Option<&'static str> {
let raw_import = get_specifier_raw_import(specifier)?;
let source = match raw_import {
RawImportKind::Bytes => {
"const data: Uint8Array<ArrayBuffer>;\nexport default data;\n"
}
RawImportKind::Text => "export const data: string;\nexport default data;\n",
};
Some(source)
}
enum RawImportKind {
Bytes,
Text,
}
/// We store the raw import kind in the fragment of the Url
/// like `#denoRawImport=text`. This is necessary because
/// TypeScript can't handle different modules at the same
/// specifier.
fn get_specifier_raw_import(specifier: &Url) -> Option<RawImportKind> {
// this is purposefully relaxed about matching in order to keep the
// code less complex. If someone is doing something to cause this to
// incorrectly match then they most likely deserve the bug they sought.
let fragment = specifier.fragment()?;
let key_text = "denoRawImport=";
let raw_import_index = fragment.find(key_text)?;
let remaining = &fragment[raw_import_index + key_text.len()..];
if remaining.starts_with("text") {
Some(RawImportKind::Text)
} else if remaining.starts_with("bytes") {
Some(RawImportKind::Bytes)
} else {
None
}
}
#[derive(Debug, Error, deno_error::JsError)]
pub enum ResolveError {
#[class(inherit)]
#[error("Error converting a string module specifier for \"op_resolve\": {0}")]
ModuleResolution(#[from] deno_core::ModuleResolutionError),
#[class(inherit)]
#[error(transparent)]
FilePathToUrl(#[from] deno_path_util::PathToUrlError),
#[class(inherit)]
#[error("{0}")]
PackageSubpathResolve(PackageSubpathFromDenoModuleResolveError),
#[class(inherit)]
#[error("{0}")]
ResolveUrlOrPathError(#[from] deno_path_util::ResolveUrlOrPathError),
#[class(inherit)]
#[error("{0}")]
ResolvePkgFolderFromDenoModule(#[from] ResolvePkgFolderFromDenoModuleError),
#[class(inherit)]
#[error("{0}")]
ResolveNonGraphSpecifierTypes(#[from] ResolveNonGraphSpecifierTypesError),
#[class(inherit)]
#[error("{0}")]
ResolvePkgFolderFromDenoReq(#[from] ResolvePkgFolderFromDenoReqError),
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ResolveArgs<'a> {
/// The base specifier that the supplied specifier strings should be resolved
/// relative to.
pub base: &'a str,
/// A list of specifiers that should be resolved.
/// (is_cjs: bool, raw_specifier: String)
pub specifiers: Vec<(bool, String)>,
}
fn resolve_graph_specifier_types(
specifier: &ModuleSpecifier,
referrer: &ModuleSpecifier,
resolution_mode: ResolutionMode,
graph: &ModuleGraph,
maybe_npm: Option<&RequestNpmState>,
) -> Result<Option<(ModuleSpecifier, MediaType)>, ResolveError> {
let maybe_module = match graph.try_get(specifier) {
Ok(Some(module)) => Some(module),
Ok(None) => None,
Err(err) => match err.as_kind() {
deno_graph::ModuleErrorKind::UnsupportedMediaType {
specifier,
media_type,
..
} => {
return Ok(Some((specifier.clone(), *media_type)));
}
_ => None,
},
};
// follow the types reference directive, which may be pointing at an npm package
let maybe_module = match maybe_module {
Some(Module::Js(module)) => {
let maybe_types_dep = module
.maybe_types_dependency
.as_ref()
.map(|d| &d.dependency);
match maybe_types_dep.and_then(|d| d.maybe_specifier()) {
Some(specifier) => graph.get(specifier),
_ => maybe_module,
}
}
maybe_module => maybe_module,
};
// now get the types from the resolved module
match maybe_module {
Some(Module::Js(module)) => {
Ok(Some((module.specifier.clone(), module.media_type)))
}
Some(Module::Json(module)) => {
Ok(Some((module.specifier.clone(), module.media_type)))
}
Some(Module::Wasm(module)) => {
Ok(Some((module.specifier.clone(), MediaType::Dmts)))
}
Some(Module::Npm(_)) => {
if let Some(npm) = maybe_npm
&& let Ok(req_ref) = NpmPackageReqReference::from_specifier(specifier)
{
let package_folder = npm
.npm_resolver
.resolve_pkg_folder_from_deno_module_req(req_ref.req(), referrer)?;
let res_result =
npm.node_resolver.resolve_package_subpath_from_deno_module(
&package_folder,
req_ref.sub_path(),
Some(referrer),
resolution_mode,
NodeResolutionKind::Types,
);
let maybe_url = match res_result {
Ok(path_or_url) => Some(path_or_url.into_url()?),
Err(err) => match err.code() {
NodeJsErrorCode::ERR_MODULE_NOT_FOUND
| NodeJsErrorCode::ERR_TYPES_NOT_FOUND => None,
_ => return Err(ResolveError::PackageSubpathResolve(err)),
},
};
Ok(Some(into_specifier_and_media_type(maybe_url)))
} else {
Ok(None)
}
}
Some(Module::External(module)) => {
// we currently only use "External" for when the module is in an npm package
Ok(maybe_npm.map(|_| {
let specifier = resolve_specifier_into_node_modules(
&CliSys::default(),
&module.specifier,
);
into_specifier_and_media_type(Some(specifier))
}))
}
Some(Module::Node(_)) | None => Ok(None),
}
}
#[derive(Debug, Error, deno_error::JsError)]
pub enum ResolveNonGraphSpecifierTypesError {
#[class(inherit)]
#[error(transparent)]
FilePathToUrl(#[from] deno_path_util::PathToUrlError),
#[class(inherit)]
#[error(transparent)]
ResolvePkgFolderFromDenoReq(#[from] ResolvePkgFolderFromDenoReqError),
#[class(inherit)]
#[error(transparent)]
PackageSubpathResolve(#[from] PackageSubpathFromDenoModuleResolveError),
}
fn resolve_non_graph_specifier_types(
raw_specifier: &str,
referrer: &ModuleSpecifier,
resolution_mode: ResolutionMode,
maybe_npm: Option<&RequestNpmState>,
) -> Result<
Option<(ModuleSpecifier, MediaType)>,
ResolveNonGraphSpecifierTypesError,
> {
let npm = match maybe_npm {
Some(npm) => npm,
None => return Ok(None), // we only support non-graph types for npm packages
};
let node_resolver = &npm.node_resolver;
if node_resolver.in_npm_package(referrer) {
// we're in an npm package, so use node resolution
Ok(Some(into_specifier_and_media_type(
node_resolver
.resolve(
raw_specifier,
referrer,
resolution_mode,
NodeResolutionKind::Types,
)
.and_then(|res| res.into_url())
.ok(),
)))
} else {
match NpmPackageReqReference::from_str(raw_specifier) {
Ok(npm_req_ref) => {
debug_assert_eq!(resolution_mode, ResolutionMode::Import);
// This could occur when resolving npm:@types/node when it is
// injected and not part of the graph
let package_folder =
npm.npm_resolver.resolve_pkg_folder_from_deno_module_req(
npm_req_ref.req(),
referrer,
)?;
let res_result = node_resolver
.resolve_package_subpath_from_deno_module(
&package_folder,
npm_req_ref.sub_path(),
Some(referrer),
resolution_mode,
NodeResolutionKind::Types,
);
let maybe_url = match res_result {
Ok(url_or_path) => Some(url_or_path.into_url()?),
Err(err) => match err.code() {
NodeJsErrorCode::ERR_MODULE_NOT_FOUND
| NodeJsErrorCode::ERR_TYPES_NOT_FOUND => None,
_ => return Err(err.into()),
},
};
Ok(Some(into_specifier_and_media_type(maybe_url)))
}
_ => Ok(None),
}
}
}
#[derive(Debug, Error, deno_error::JsError)]
pub enum ExecError {
#[class(generic)]
#[error("The response for the exec request was not set.")]
ResponseNotSet,
#[class(inherit)]
#[error(transparent)]
Js(Box<deno_core::error::JsError>),
#[class(inherit)]
#[error(transparent)]
Go(#[from] go::ExecError),
}
#[derive(Clone)]
pub(crate) struct CompressedSource {
bytes: &'static [u8],
uncompressed: OnceLock<Arc<str>>,
}
impl CompressedSource {
#[cfg_attr(any(debug_assertions, feature = "hmr"), allow(dead_code))]
pub(crate) const fn new(bytes: &'static [u8]) -> Self {
Self {
bytes,
uncompressed: OnceLock::new(),
}
}
pub(crate) fn get(&self) -> &str {
self
.uncompressed
.get_or_init(|| decompress_source(self.bytes))
.as_ref()
}
}
pub(crate) static MAIN_COMPILER_SOURCE: StaticAssetSource =
maybe_compressed_source!("tsc/99_main_compiler.js");
pub(crate) static LSP_SOURCE: StaticAssetSource =
maybe_compressed_source!("tsc/98_lsp.js");
pub(crate) static TS_HOST_SOURCE: StaticAssetSource =
maybe_compressed_source!("tsc/97_ts_host.js");
pub(crate) static TYPESCRIPT_SOURCE: StaticAssetSource =
maybe_compressed_source!("tsc/00_typescript.js");
pub(crate) fn decompress_source(contents: &[u8]) -> Arc<str> {
let len_bytes = contents[0..4].try_into().unwrap();
let len = u32::from_le_bytes(len_bytes);
let uncompressed =
zstd::bulk::decompress(&contents[4..], len as usize).unwrap();
String::from_utf8(uncompressed).unwrap().into()
}
/// Execute a request on the supplied snapshot, returning a response which
/// contains information, like any emitted files, diagnostics, statistics and
/// optionally an updated TypeScript build info.
#[allow(clippy::result_large_err)]
pub fn exec(
request: Request,
code_cache: Option<Arc<dyn deno_runtime::code_cache::CodeCache>>,
maybe_tsgo_path: Option<&Path>,
) -> Result<Response, ExecError> {
// tsc cannot handle root specifiers that don't have one of the "acceptable"
// extensions. Therefore, we have to check the root modules against their
// extensions and remap any that are unacceptable to tsc and add them to the
// op state so when requested, we can remap to the original specifier.
let mut root_map = HashMap::new();
let mut remapped_specifiers = HashMap::new();
log::debug!("exec request, root_names: {:?}", request.root_names);
let root_names: Vec<String> = request
.root_names
.iter()
.map(|(s, mt)| match s.scheme() {
"data" | "blob" => {
let specifier_str = hash_url(s, *mt);
remapped_specifiers.insert(specifier_str.clone(), s.clone());
specifier_str
}
// "file" if tsgo => {
// let specifier_str = s.to_string();
// let out = specifier_str.strip_prefix("file://").unwrap().to_string();
// remapped_specifiers.insert(out.to_string(), s.clone());
// out
// }
_ => {
if let Some(new_specifier) = mapped_specifier_for_tsc(s, *mt) {
root_map.insert(new_specifier.clone(), s.clone());
new_specifier
} else {
s.to_string()
}
}
})
.collect();
if let Some(tsgo_path) = maybe_tsgo_path {
go::exec_request(
request,
root_names,
root_map,
remapped_specifiers,
tsgo_path,
)
} else {
js::exec_request(
request,
root_names,
root_map,
remapped_specifiers,
code_cache,
)
}
}
pub fn resolve_specifier_for_tsc(
specifier: String,
referrer: &ModuleSpecifier,
graph: &ModuleGraph,
resolution_mode: ResolutionMode,
maybe_npm: Option<&RequestNpmState>,
referrer_module: Option<&Module>,
remapped_specifiers: &mut HashMap<String, ModuleSpecifier>,
) -> Result<(String, Option<&'static str>), ResolveError> {
if specifier.starts_with("node:") {
return Ok((
MISSING_DEPENDENCY_SPECIFIER.to_string(),
Some(MediaType::Dts.as_ts_extension()),
));
}
if specifier.starts_with("asset:///") {
let ext = MediaType::from_str(&specifier).as_ts_extension();
return Ok((specifier, Some(ext)));
}
let resolved_dep = referrer_module
.and_then(|m| match m {
Module::Js(m) => m.dependencies_prefer_fast_check().get(&specifier),
Module::Json(_) => None,
Module::Wasm(m) => m.dependencies.get(&specifier),
Module::Npm(_) | Module::Node(_) | Module::External(_) => None,
})
.and_then(|d| d.maybe_type.ok().or_else(|| d.maybe_code.ok()));
let maybe_result = match resolved_dep {
Some(deno_graph::ResolutionResolved { specifier, .. }) => {
resolve_graph_specifier_types(
specifier,
referrer,
// we could get this from the resolved dep, but for now assume
// the value resolved in TypeScript is better
resolution_mode,
graph,
maybe_npm,
)?
}
_ => {
match resolve_non_graph_specifier_types(
&specifier,
referrer,
resolution_mode,
maybe_npm,
) {
Ok(maybe_result) => maybe_result,
Err(
err
@ ResolveNonGraphSpecifierTypesError::ResolvePkgFolderFromDenoReq(
ResolvePkgFolderFromDenoReqError::Managed(_),
),
) => {
// it's most likely requesting the jsxImportSource, which isn't loaded
// into the graph when not using jsx, so just ignore this error
if specifier.ends_with("/jsx-runtime")
// ignore in order to support attempt to load when it doesn't exist
|| specifier == "npm:@types/node"
{
None
} else {
return Err(err.into());
}
}
Err(err) => return Err(err.into()),
}
}
};
let result = match maybe_result {
Some((specifier, media_type)) => {
let specifier_str = match specifier.scheme() {
"data" | "blob" => {
let specifier_str = hash_url(&specifier, media_type);
remapped_specifiers.insert(specifier_str.clone(), specifier);
specifier_str
}
_ => {
if let Some(specifier_str) =
mapped_specifier_for_tsc(&specifier, media_type)
{
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tsc/go/setup.rs | cli/tsc/go/setup.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::OnceLock;
use deno_core::error::AnyError;
use deno_error::JsErrorBox;
use sha2::Digest;
use super::tsgo_version;
use crate::cache::DenoDir;
use crate::http_util::HttpClientProvider;
fn get_download_url(platform: &str) -> String {
format!(
"{}/typescript-go-{}-{}.zip",
tsgo_version::DOWNLOAD_BASE_URL,
tsgo_version::VERSION,
platform
)
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
#[class(generic)]
pub enum DownloadError {
#[error("unsupported platform for typescript-go: {0}")]
UnsupportedPlatform(String),
#[error("invalid download url: {0}")]
InvalidDownloadUrl(String, #[source] deno_core::url::ParseError),
#[error("failed to unpack typescript-go: {0}")]
UnpackFailed(#[source] AnyError),
#[error("failed to rename or copy typescript-go from {0} to {1}: {2}")]
RenameOrCopyFailed(String, String, #[source] std::io::Error),
#[error("failed to write zip file to {0}: {1}")]
WriteZipFailed(String, #[source] std::io::Error),
#[error("failed to download typescript-go: {0}")]
DownloadFailed(#[source] crate::http_util::DownloadError),
#[error("{0}")]
HttpClient(#[source] JsErrorBox),
#[error("failed to create temp directory: {0}")]
CreateTempDirFailed(#[source] std::io::Error),
#[error("hash mismatch: expected {0}, got {1}")]
HashMismatch(String, String),
#[error("binary not found: {0}")]
BinaryNotFound(String),
}
fn verify_hash(platform: &str, data: &[u8]) -> Result<(), DownloadError> {
let expected_hash = match platform {
"windows-x64" => tsgo_version::HASHES.windows_x64,
"macos-x64" => tsgo_version::HASHES.macos_x64,
"macos-arm64" => tsgo_version::HASHES.macos_arm64,
"linux-x64" => tsgo_version::HASHES.linux_x64,
"linux-arm64" => tsgo_version::HASHES.linux_arm64,
_ => unreachable!(),
};
let (algorithm, expected_hash) = expected_hash.split_once(':').unwrap();
if algorithm != "sha256" {
panic!("Hash algorithm is not sha256");
}
let mut hash = sha2::Sha256::new();
hash.update(data);
let hash = hash.finalize();
let hash = faster_hex::hex_string(&hash);
if hash != expected_hash {
return Err(DownloadError::HashMismatch(expected_hash.to_string(), hash));
}
Ok(())
}
pub async fn ensure_tsgo(
deno_dir: &DenoDir,
http_client_provider: Arc<HttpClientProvider>,
) -> Result<&'static PathBuf, DownloadError> {
static TSGO_PATH: OnceLock<PathBuf> = OnceLock::new();
if let Some(bin_path) = TSGO_PATH.get() {
return Ok(bin_path);
}
if let Ok(tsgo_path) = std::env::var("DENO_TSGO_PATH") {
let tsgo_path = Path::new(&tsgo_path);
if tsgo_path.exists() {
return Ok(TSGO_PATH.get_or_init(|| PathBuf::from(tsgo_path)));
} else {
return Err(DownloadError::BinaryNotFound(
tsgo_path.to_string_lossy().into_owned(),
));
}
}
let platform = match (std::env::consts::OS, std::env::consts::ARCH) {
("windows", "x86_64") => "windows-x64",
("macos", "x86_64") => "macos-x64",
("macos", "aarch64") => "macos-arm64",
("linux", "x86_64") => "linux-x64",
("linux", "aarch64") => "linux-arm64",
_ => {
return Err(DownloadError::UnsupportedPlatform(format!(
"{} {}",
std::env::consts::OS,
std::env::consts::ARCH
)));
}
};
let folder_path = deno_dir
.dl_folder_path()
.join(format!("tsgo-{}", tsgo_version::VERSION));
let bin_path = folder_path.join(format!(
"tsgo-{}{}",
platform,
if cfg!(windows) { ".exe" } else { "" }
));
if bin_path.exists() {
return Ok(TSGO_PATH.get_or_init(|| bin_path));
}
std::fs::create_dir_all(&folder_path)
.map_err(DownloadError::CreateTempDirFailed)?;
let client = http_client_provider
.get_or_create()
.map_err(DownloadError::HttpClient)?;
let download_url = get_download_url(platform);
log::debug!("Downloading tsgo from {}", download_url);
let temp = tempfile::tempdir().map_err(DownloadError::CreateTempDirFailed)?;
let path = temp.path().join("tsgo.zip");
log::debug!("Downloading tsgo to {}", path.display());
let data = client
.download(
deno_core::url::Url::parse(&download_url)
.map_err(|e| DownloadError::InvalidDownloadUrl(download_url, e))?,
)
.await
.map_err(DownloadError::DownloadFailed)?;
verify_hash(platform, &data)?;
std::fs::write(&path, &data).map_err(|e| {
DownloadError::WriteZipFailed(path.display().to_string(), e)
})?;
log::debug!(
"Unpacking tsgo from {} to {}",
path.display(),
temp.path().display()
);
let unpacked_path =
crate::util::archive::unpack_into_dir(crate::util::archive::UnpackArgs {
exe_name: "tsgo",
archive_name: "tsgo.zip",
archive_data: &data,
is_windows: cfg!(windows),
dest_path: temp.path(),
})
.map_err(DownloadError::UnpackFailed)?;
std::fs::rename(&unpacked_path, &bin_path)
.or_else(|_| std::fs::copy(&unpacked_path, &bin_path).map(|_| ()))
.map_err(|e| {
DownloadError::RenameOrCopyFailed(
unpacked_path.to_string_lossy().into_owned(),
bin_path.to_string_lossy().into_owned(),
e,
)
})?;
Ok(TSGO_PATH.get_or_init(|| bin_path))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/tsc/go/tsgo_version.rs | cli/tsc/go/tsgo_version.rs | // Copyright 2018-2025 the Deno authors. MIT license.
// This file is auto-generated by tools/update_tsgo.ts
// DO NOT EDIT THIS FILE MANUALLY
pub struct Hashes {
pub windows_x64: &'static str,
pub macos_x64: &'static str,
pub macos_arm64: &'static str,
pub linux_x64: &'static str,
pub linux_arm64: &'static str,
}
impl Hashes {
pub const fn all(&self) -> [&'static str; 5] {
[
self.windows_x64,
self.macos_x64,
self.macos_arm64,
self.linux_x64,
self.linux_arm64,
]
}
}
pub const VERSION: &str = "0.1.13";
pub const DOWNLOAD_BASE_URL: &str =
"https://github.com/denoland/typescript-go/releases/download/v0.1.13";
pub const HASHES: Hashes = Hashes {
windows_x64: "sha256:0d884056a42af3943a2d9b17eb862f199a5b763d132506036205853ffd499142",
macos_x64: "sha256:5d34443bd0af95debbc9edbfea1ae6d45d78af26a6da1fe5f547b5824b63c2d0",
macos_arm64: "sha256:e93c671629e6929cb0b3ebad2a8834641b77e4953c3038a50b29daeb2f36e070",
linux_x64: "sha256:dd29cd82d8f89bc70d23011e9cc687d7d4d9631ef395ffeac43f605ab8409255",
linux_arm64: "sha256:bf6380027b63e3758cdd29be7d35daaa02371ce60e285b7da3d37a2c3f950fc0",
};
const _: () = {
let sha256 = "sha256".as_bytes();
let mut i = 0;
let hashes = HASHES.all();
while i < hashes.len() {
let hash = hashes[i].as_bytes();
let mut j = 0;
while j < 6 {
if hash[j] != sha256[j] {
panic!("Hash algorithm is not sha256");
}
j += 1;
}
i += 1;
}
};
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/standalone/mod.rs | cli/standalone/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub mod binary;
mod virtual_fs;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/standalone/binary.rs | cli/standalone/binary.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::env;
use std::ffi::OsString;
use std::fs;
use std::fs::File;
use std::path::Component;
use std::path::Path;
use std::path::PathBuf;
use capacity_builder::BytesAppendable;
use deno_ast::MediaType;
use deno_ast::ModuleKind;
use deno_ast::ModuleSpecifier;
use deno_cache_dir::CACHE_PERM;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_graph::ModuleGraph;
use deno_lib::args::CaData;
use deno_lib::args::UnstableConfig;
use deno_lib::shared::ReleaseChannel;
use deno_lib::standalone::binary::CjsExportAnalysisEntry;
use deno_lib::standalone::binary::MAGIC_BYTES;
use deno_lib::standalone::binary::Metadata;
use deno_lib::standalone::binary::NodeModules;
use deno_lib::standalone::binary::RemoteModuleEntry;
use deno_lib::standalone::binary::SerializedResolverWorkspaceJsrPackage;
use deno_lib::standalone::binary::SerializedWorkspaceResolver;
use deno_lib::standalone::binary::SerializedWorkspaceResolverImportMap;
use deno_lib::standalone::binary::SpecifierDataStore;
use deno_lib::standalone::binary::SpecifierId;
use deno_lib::standalone::virtual_fs::BuiltVfs;
use deno_lib::standalone::virtual_fs::DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME;
use deno_lib::standalone::virtual_fs::VfsBuilder;
use deno_lib::standalone::virtual_fs::VfsEntry;
use deno_lib::standalone::virtual_fs::VirtualDirectory;
use deno_lib::standalone::virtual_fs::VirtualDirectoryEntries;
use deno_lib::standalone::virtual_fs::WindowsSystemRootablePath;
use deno_lib::util::hash::FastInsecureHasher;
use deno_lib::util::text_encoding::is_valid_utf8;
use deno_lib::util::v8::construct_v8_flags;
use deno_lib::version::DENO_VERSION_INFO;
use deno_npm::NpmSystemInfo;
use deno_npm::resolution::SerializedNpmResolutionSnapshot;
use deno_npm::resolution::ValidSerializedNpmResolutionSnapshot;
use deno_path_util::fs::atomic_write_file_with_retries;
use deno_path_util::url_from_directory_path;
use deno_path_util::url_to_file_path;
use deno_resolver::file_fetcher::FetchLocalOptions;
use deno_resolver::file_fetcher::FetchOptions;
use deno_resolver::file_fetcher::FetchPermissionsOptionRef;
use deno_resolver::workspace::WorkspaceResolver;
use deno_semver::npm::NpmPackageReqReference;
use indexmap::IndexMap;
use node_resolver::analyze::ResolvedCjsAnalysis;
use super::virtual_fs::output_vfs;
use crate::args::CliOptions;
use crate::args::CompileFlags;
use crate::args::get_default_v8_flags;
use crate::cache::DenoDir;
use crate::file_fetcher::CliFileFetcher;
use crate::http_util::HttpClientProvider;
use crate::module_loader::CliEmitter;
use crate::node::CliCjsModuleExportAnalyzer;
use crate::npm::CliNpmResolver;
use crate::resolver::CliCjsTracker;
use crate::sys::CliSys;
use crate::util::archive;
use crate::util::progress_bar::ProgressBar;
use crate::util::progress_bar::ProgressBarStyle;
/// A URL that can be designated as the base for relative URLs.
///
/// After creation, this URL may be used to get the key for a
/// module in the binary.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum StandaloneRelativeFileBaseUrl<'a> {
WindowsSystemRoot,
Path(&'a Url),
}
impl StandaloneRelativeFileBaseUrl<'_> {
/// Gets the module map key of the provided specifier.
///
/// * Descendant file specifiers will be made relative to the base.
/// * Non-descendant file specifiers will stay as-is (absolute).
/// * Non-file specifiers will stay as-is.
pub fn specifier_key<'b>(&self, target: &'b Url) -> Cow<'b, str> {
if target.scheme() != "file" {
return Cow::Borrowed(target.as_str());
}
let base = match self {
Self::Path(base) => base,
Self::WindowsSystemRoot => return Cow::Borrowed(target.path()),
};
match base.make_relative(target) {
Some(relative) => {
// This is not a great scenario to have because it means that the
// specifier is outside the vfs and could cause the binary to act
// strangely. If you encounter this, the fix is to add more paths
// to the vfs builder by calling `add_possible_min_root_dir`.
debug_assert!(
!relative.starts_with("../"),
"{} -> {} ({})",
base.as_str(),
target.as_str(),
relative,
);
Cow::Owned(relative)
}
None => Cow::Borrowed(target.as_str()),
}
}
}
struct SpecifierStore<'a> {
data: IndexMap<&'a Url, SpecifierId>,
}
impl<'a> SpecifierStore<'a> {
pub fn with_capacity(capacity: usize) -> Self {
Self {
data: IndexMap::with_capacity(capacity),
}
}
pub fn get_or_add(&mut self, specifier: &'a Url) -> SpecifierId {
let len = self.data.len();
let entry = self.data.entry(specifier);
match entry {
indexmap::map::Entry::Occupied(occupied_entry) => *occupied_entry.get(),
indexmap::map::Entry::Vacant(vacant_entry) => {
let new_id = SpecifierId::new(len as u32);
vacant_entry.insert(new_id);
new_id
}
}
}
pub fn for_serialization(
self,
base_url: &StandaloneRelativeFileBaseUrl<'a>,
) -> SpecifierStoreForSerialization<'a> {
SpecifierStoreForSerialization {
data: self
.data
.into_iter()
.map(|(specifier, id)| (base_url.specifier_key(specifier), id))
.collect(),
}
}
}
struct SpecifierStoreForSerialization<'a> {
data: Vec<(Cow<'a, str>, SpecifierId)>,
}
impl<'a> BytesAppendable<'a> for &'a SpecifierStoreForSerialization<'a> {
fn append_to_builder<TBytes: capacity_builder::BytesType>(
self,
builder: &mut capacity_builder::BytesBuilder<'a, TBytes>,
) {
builder.append_le(self.data.len() as u32);
for (specifier_str, id) in &self.data {
builder.append_le(specifier_str.len() as u32);
builder.append(specifier_str.as_ref());
builder.append(*id);
}
}
}
pub fn is_standalone_binary(exe_path: &Path) -> bool {
let Ok(data) = std::fs::read(exe_path) else {
return false;
};
libsui::utils::is_elf(&data)
|| libsui::utils::is_pe(&data)
|| libsui::utils::is_macho(&data)
|| data.is_empty()
}
pub struct WriteBinOptions<'a> {
pub writer: File,
pub display_output_filename: &'a str,
pub graph: &'a ModuleGraph,
pub entrypoint: &'a ModuleSpecifier,
pub include_paths: &'a [ModuleSpecifier],
pub exclude_paths: Vec<PathBuf>,
pub compile_flags: &'a CompileFlags,
}
pub struct DenoCompileBinaryWriter<'a> {
cjs_module_export_analyzer: &'a CliCjsModuleExportAnalyzer,
cjs_tracker: &'a CliCjsTracker,
cli_options: &'a CliOptions,
deno_dir: &'a DenoDir,
emitter: &'a CliEmitter,
file_fetcher: &'a CliFileFetcher,
http_client_provider: &'a HttpClientProvider,
npm_resolver: &'a CliNpmResolver,
workspace_resolver: &'a WorkspaceResolver<CliSys>,
npm_system_info: NpmSystemInfo,
}
impl<'a> DenoCompileBinaryWriter<'a> {
#[allow(clippy::too_many_arguments)]
pub fn new(
cjs_module_export_analyzer: &'a CliCjsModuleExportAnalyzer,
cjs_tracker: &'a CliCjsTracker,
cli_options: &'a CliOptions,
deno_dir: &'a DenoDir,
emitter: &'a CliEmitter,
file_fetcher: &'a CliFileFetcher,
http_client_provider: &'a HttpClientProvider,
npm_resolver: &'a CliNpmResolver,
workspace_resolver: &'a WorkspaceResolver<CliSys>,
npm_system_info: NpmSystemInfo,
) -> Self {
Self {
cjs_module_export_analyzer,
cjs_tracker,
cli_options,
deno_dir,
emitter,
file_fetcher,
http_client_provider,
npm_resolver,
workspace_resolver,
npm_system_info,
}
}
pub async fn write_bin(
&self,
options: WriteBinOptions<'_>,
) -> Result<(), AnyError> {
// Select base binary based on target
let mut original_binary =
self.get_base_binary(options.compile_flags).await?;
if options.compile_flags.no_terminal {
let target = options.compile_flags.resolve_target();
if !target.contains("windows") {
bail!(
"The `--no-terminal` flag is only available when targeting Windows (current: {})",
target,
)
}
set_windows_binary_to_gui(&mut original_binary)
.context("Setting windows binary to GUI.")?;
}
if options.compile_flags.icon.is_some() {
let target = options.compile_flags.resolve_target();
if !target.contains("windows") {
bail!(
"The `--icon` flag is only available when targeting Windows (current: {})",
target,
);
}
}
self.write_standalone_binary(options, original_binary).await
}
async fn get_base_binary(
&self,
compile_flags: &CompileFlags,
) -> Result<Vec<u8>, AnyError> {
// Used for testing.
//
// Phase 2 of the 'min sized' deno compile RFC talks
// about adding this as a flag.
if let Some(path) = get_dev_binary_path() {
log::debug!("Resolved denort: {}", path.to_string_lossy());
return std::fs::read(&path).with_context(|| {
format!("Could not find denort at '{}'", path.to_string_lossy())
});
}
let target = compile_flags.resolve_target();
let binary_name = format!("denort-{target}.zip");
let binary_path_suffix = match DENO_VERSION_INFO.release_channel {
ReleaseChannel::Canary => {
format!("canary/{}/{}", DENO_VERSION_INFO.git_hash, binary_name)
}
_ => {
format!("release/v{}/{}", DENO_VERSION_INFO.deno, binary_name)
}
};
let download_directory = self.deno_dir.dl_folder_path();
let binary_path = download_directory.join(&binary_path_suffix);
log::debug!("Resolved denort: {}", binary_path.display());
let read_file = |path: &Path| -> Result<Vec<u8>, AnyError> {
std::fs::read(path).with_context(|| format!("Reading {}", path.display()))
};
let archive_data = if binary_path.exists() {
read_file(&binary_path)?
} else {
self
.download_base_binary(&binary_path, &binary_path_suffix)
.await
.context("Setting up base binary.")?
};
let temp_dir = tempfile::TempDir::new()?;
let base_binary_path = archive::unpack_into_dir(archive::UnpackArgs {
exe_name: "denort",
archive_name: &binary_name,
archive_data: &archive_data,
is_windows: target.contains("windows"),
dest_path: temp_dir.path(),
})?;
let base_binary = read_file(&base_binary_path)?;
drop(temp_dir); // delete the temp dir
Ok(base_binary)
}
async fn download_base_binary(
&self,
output_path: &Path,
binary_path_suffix: &str,
) -> Result<Vec<u8>, AnyError> {
let download_url = format!("https://dl.deno.land/{binary_path_suffix}");
let response = {
let progress_bars = ProgressBar::new(ProgressBarStyle::DownloadBars);
let progress = progress_bars.update(&download_url);
self
.http_client_provider
.get_or_create()?
.download_with_progress_and_retries(
download_url.parse()?,
&Default::default(),
&progress,
)
.await?
};
let bytes = response
.into_bytes()
.with_context(|| format!("Failed downloading '{}'", download_url))?;
let create_dir_all = |dir: &Path| {
std::fs::create_dir_all(dir)
.with_context(|| format!("Creating {}", dir.display()))
};
create_dir_all(output_path.parent().unwrap())?;
atomic_write_file_with_retries(
&CliSys::default(),
output_path,
&bytes,
CACHE_PERM,
)
.with_context(|| format!("Writing {}", output_path.display()))?;
Ok(bytes)
}
/// This functions creates a standalone deno binary by appending a bundle
/// and magic trailer to the currently executing binary.
#[allow(clippy::too_many_arguments)]
async fn write_standalone_binary(
&self,
options: WriteBinOptions<'_>,
original_bin: Vec<u8>,
) -> Result<(), AnyError> {
let WriteBinOptions {
writer,
display_output_filename,
graph,
entrypoint,
include_paths,
exclude_paths,
compile_flags,
} = options;
let ca_data = match self.cli_options.ca_data() {
Some(CaData::File(ca_file)) => Some(
std::fs::read(ca_file).with_context(|| format!("Reading {ca_file}"))?,
),
Some(CaData::Bytes(bytes)) => Some(bytes.clone()),
None => None,
};
let mut vfs = VfsBuilder::new();
for path in exclude_paths {
vfs.add_exclude_path(path);
}
let npm_snapshot = match &self.npm_resolver {
CliNpmResolver::Managed(managed) => {
if graph.npm_packages.is_empty() {
None
} else {
let snapshot = managed.resolution().snapshot();
let snapshot = if self.cli_options.unstable_npm_lazy_caching() {
let reqs = graph
.specifiers()
.filter_map(|(s, _)| {
NpmPackageReqReference::from_specifier(s)
.ok()
.map(|req_ref| req_ref.into_inner().req)
})
.collect::<Vec<_>>();
snapshot.subset(&reqs)
} else {
snapshot
}
.as_valid_serialized_for_system(&self.npm_system_info);
if !snapshot.as_serialized().packages.is_empty() {
self
.fill_npm_vfs(&mut vfs, Some(&snapshot))
.context("Building npm vfs.")?;
Some(snapshot)
} else {
None
}
}
}
CliNpmResolver::Byonm(_) => {
self.fill_npm_vfs(&mut vfs, None)?;
None
}
};
for include_file in include_paths {
let path = deno_path_util::url_to_file_path(include_file)?;
vfs
.add_path(&path)
.with_context(|| format!("Including {}", path.display()))?;
}
let specifiers_count = graph.specifiers_count();
let mut specifier_store = SpecifierStore::with_capacity(specifiers_count);
let mut remote_modules_store =
SpecifierDataStore::with_capacity(specifiers_count);
let mut asset_module_urls = graph.asset_module_urls();
// todo(dsherret): transpile and analyze CJS in parallel
for module in graph.modules() {
if module.specifier().scheme() == "data" {
continue; // don't store data urls as an entry as they're in the code
}
let mut maybe_source_map = None;
let mut maybe_transpiled = None;
let mut maybe_cjs_analysis = None;
let (maybe_original_source, media_type) = match module {
deno_graph::Module::Js(m) => {
let specifier = &m.specifier;
let original_bytes = match m.source.try_get_original_bytes() {
Some(bytes) => bytes,
None => self.load_asset_bypass_permissions(specifier).await?.source,
};
if self.cjs_tracker.is_maybe_cjs(specifier, m.media_type)? {
if self.cjs_tracker.is_cjs_with_known_is_script(
specifier,
m.media_type,
m.is_script,
)? {
let cjs_analysis = self
.cjs_module_export_analyzer
.analyze_all_exports(
module.specifier(),
Some(Cow::Borrowed(m.source.text.as_ref())),
)
.await?;
maybe_cjs_analysis = Some(match cjs_analysis {
ResolvedCjsAnalysis::Esm(_) => CjsExportAnalysisEntry::Esm,
ResolvedCjsAnalysis::Cjs(exports) => {
CjsExportAnalysisEntry::Cjs(
exports.into_iter().collect::<Vec<_>>(),
)
}
});
} else {
maybe_cjs_analysis = Some(CjsExportAnalysisEntry::Esm);
}
}
if m.media_type.is_emittable() {
let module_kind = match maybe_cjs_analysis.as_ref() {
Some(CjsExportAnalysisEntry::Cjs(_)) => ModuleKind::Cjs,
_ => ModuleKind::Esm,
};
let (source, source_map) =
self.emitter.emit_source_for_deno_compile(
&m.specifier,
m.media_type,
module_kind,
&m.source.text,
)?;
if source != m.source.text.as_ref() {
maybe_source_map = Some(source_map.into_bytes());
maybe_transpiled = Some(source.into_bytes());
}
}
(Some(original_bytes), m.media_type)
}
deno_graph::Module::Json(m) => {
let original_bytes = match m.source.try_get_original_bytes() {
Some(bytes) => bytes,
None => {
self
.load_asset_bypass_permissions(&m.specifier)
.await?
.source
}
};
(Some(original_bytes), m.media_type)
}
deno_graph::Module::Wasm(m) => {
(Some(m.source.clone()), MediaType::Wasm)
}
deno_graph::Module::Npm(_)
| deno_graph::Module::Node(_)
| deno_graph::Module::External(_) => (None, MediaType::Unknown),
};
if let Some(original_source) = maybe_original_source {
asset_module_urls.swap_remove(module.specifier());
let maybe_cjs_export_analysis = maybe_cjs_analysis
.as_ref()
.map(bincode::serialize)
.transpose()?;
if module.specifier().scheme() == "file" {
let file_path = deno_path_util::url_to_file_path(module.specifier())?;
vfs
.add_file_with_data(
&file_path,
deno_lib::standalone::virtual_fs::AddFileDataOptions {
data: original_source.to_vec(),
maybe_transpiled,
maybe_source_map,
maybe_cjs_export_analysis,
mtime: file_path
.metadata()
.ok()
.and_then(|m| m.modified().ok()),
},
)
.with_context(|| {
format!("Failed adding '{}'", file_path.display())
})?;
} else {
let specifier_id = specifier_store.get_or_add(module.specifier());
remote_modules_store.add(
specifier_id,
RemoteModuleEntry {
media_type,
is_valid_utf8: is_valid_utf8(&original_source),
data: Cow::Owned(original_source.to_vec()),
maybe_transpiled: maybe_transpiled.map(Cow::Owned),
maybe_source_map: maybe_source_map.map(Cow::Owned),
maybe_cjs_export_analysis: maybe_cjs_export_analysis
.map(Cow::Owned),
},
);
}
}
}
for url in asset_module_urls {
if graph.try_get(url).is_err() {
// skip because there was an error loading this module
continue;
}
match url.scheme() {
"file" => {
let file_path = deno_path_util::url_to_file_path(url)?;
vfs.add_file_at_path(&file_path)?;
}
"http" | "https" => {
let specifier_id = specifier_store.get_or_add(url);
if !remote_modules_store.contains(specifier_id) {
// it's ok to bypass permissions here because we verified the module
// loaded successfully in the graph
let file = self.load_asset_bypass_permissions(url).await?;
remote_modules_store.add(
specifier_id,
RemoteModuleEntry {
media_type: MediaType::from_specifier_and_headers(
&file.url,
file.maybe_headers.as_ref(),
),
is_valid_utf8: is_valid_utf8(&file.source),
data: Cow::Owned(file.source.to_vec()),
maybe_cjs_export_analysis: None,
maybe_source_map: None,
maybe_transpiled: None,
},
);
}
}
_ => {}
}
}
let mut redirects_store =
SpecifierDataStore::with_capacity(graph.redirects.len());
for (from, to) in &graph.redirects {
redirects_store.add(
specifier_store.get_or_add(from),
specifier_store.get_or_add(to),
);
}
if let Some(import_map) = self.workspace_resolver.maybe_import_map()
&& let Ok(file_path) = url_to_file_path(import_map.base_url())
&& let Some(import_map_parent_dir) = file_path.parent()
{
// tell the vfs about the import map's parent directory in case it
// falls outside what the root of where the VFS will be based
vfs.add_possible_min_root_dir(import_map_parent_dir);
}
if let Some(node_modules_dir) = self.npm_resolver.root_node_modules_path() {
// ensure the vfs doesn't go below the node_modules directory's parent
if let Some(parent) = node_modules_dir.parent() {
vfs.add_possible_min_root_dir(parent);
}
}
// do CJS export analysis on all the files in the VFS
// todo(dsherret): analyze cjs in parallel
let mut to_add = Vec::new();
for (file_path, file) in vfs.iter_files() {
if file.cjs_export_analysis_offset.is_some() {
continue; // already analyzed
}
let specifier = deno_path_util::url_from_file_path(&file_path)?;
let media_type = MediaType::from_specifier(&specifier);
if self.cjs_tracker.is_maybe_cjs(&specifier, media_type)? {
let maybe_source = vfs
.file_bytes(file.offset)
.map(|text| String::from_utf8_lossy(text));
let cjs_analysis_result = self
.cjs_module_export_analyzer
.analyze_all_exports(&specifier, maybe_source)
.await;
let analysis = match cjs_analysis_result {
Ok(ResolvedCjsAnalysis::Esm(_)) => CjsExportAnalysisEntry::Esm,
Ok(ResolvedCjsAnalysis::Cjs(exports)) => {
CjsExportAnalysisEntry::Cjs(exports.into_iter().collect::<Vec<_>>())
}
Err(err) => {
log::debug!(
"Had cjs export analysis error for '{}': {}",
specifier,
err
);
CjsExportAnalysisEntry::Error(err.to_string())
}
};
to_add.push((file_path, bincode::serialize(&analysis)?));
}
}
for (file_path, analysis) in to_add {
vfs.add_cjs_export_analysis(&file_path, analysis);
}
let vfs = self.build_vfs_consolidating_global_npm_cache(vfs);
let root_dir_url = match &vfs.root_path {
WindowsSystemRootablePath::Path(dir) => {
Some(url_from_directory_path(dir)?)
}
WindowsSystemRootablePath::WindowSystemRoot => None,
};
let root_dir_url = match &root_dir_url {
Some(url) => StandaloneRelativeFileBaseUrl::Path(url),
None => StandaloneRelativeFileBaseUrl::WindowsSystemRoot,
};
let code_cache_key = if self.cli_options.code_cache_enabled() {
let mut hasher = FastInsecureHasher::new_deno_versioned();
for module in graph.modules() {
if let Some(source) = module.source() {
hasher
.write(root_dir_url.specifier_key(module.specifier()).as_bytes());
hasher.write(source.as_bytes());
}
}
Some(hasher.finish())
} else {
None
};
let node_modules = match &self.npm_resolver {
CliNpmResolver::Managed(_) => {
npm_snapshot.as_ref().map(|_| NodeModules::Managed {
node_modules_dir: self.npm_resolver.root_node_modules_path().map(
|path| {
root_dir_url
.specifier_key(
&ModuleSpecifier::from_directory_path(path).unwrap(),
)
.into_owned()
},
),
})
}
CliNpmResolver::Byonm(resolver) => Some(NodeModules::Byonm {
root_node_modules_dir: resolver.root_node_modules_path().map(
|node_modules_dir| {
root_dir_url
.specifier_key(
&ModuleSpecifier::from_directory_path(node_modules_dir)
.unwrap(),
)
.into_owned()
},
),
}),
};
let env_vars_from_env_file = match self.cli_options.env_file_name() {
Some(env_filenames) => {
let mut aggregated_env_vars = IndexMap::new();
for env_filename in env_filenames.iter().rev() {
log::info!(
"{} Environment variables from the file \"{}\" were embedded in the generated executable file",
crate::colors::yellow("Warning"),
env_filename
);
let env_vars = get_file_env_vars(env_filename.to_string())?;
aggregated_env_vars.extend(env_vars);
}
aggregated_env_vars
}
None => Default::default(),
};
output_vfs(&vfs, display_output_filename);
let preload_modules = self
.cli_options
.preload_modules()?
.into_iter()
.map(|s| root_dir_url.specifier_key(&s).into_owned())
.collect::<Vec<_>>();
let require_modules = self
.cli_options
.require_modules()?
.into_iter()
.map(|s| root_dir_url.specifier_key(&s).into_owned())
.collect::<Vec<_>>();
let metadata = Metadata {
argv: compile_flags.args.clone(),
seed: self.cli_options.seed(),
code_cache_key,
location: self.cli_options.location_flag().clone(),
permissions: self.cli_options.permissions_options()?,
v8_flags: construct_v8_flags(
&get_default_v8_flags(),
self.cli_options.v8_flags(),
vec![],
),
unsafely_ignore_certificate_errors: self
.cli_options
.unsafely_ignore_certificate_errors()
.clone(),
log_level: self.cli_options.log_level(),
ca_stores: self.cli_options.ca_stores().clone(),
ca_data,
env_vars_from_env_file,
entrypoint_key: root_dir_url.specifier_key(entrypoint).into_owned(),
preload_modules,
require_modules,
workspace_resolver: SerializedWorkspaceResolver {
import_map: self.workspace_resolver.maybe_import_map().map(|i| {
SerializedWorkspaceResolverImportMap {
specifier: if i.base_url().scheme() == "file" {
root_dir_url.specifier_key(i.base_url()).into_owned()
} else {
// just make a remote url local
"deno.json".to_string()
},
json: i.to_json(),
}
}),
jsr_pkgs: self
.workspace_resolver
.jsr_packages()
.iter()
.map(|pkg| SerializedResolverWorkspaceJsrPackage {
relative_base: root_dir_url.specifier_key(&pkg.base).into_owned(),
name: pkg.name.clone(),
version: pkg.version.clone(),
exports: pkg.exports.clone(),
})
.collect(),
package_jsons: self
.workspace_resolver
.package_jsons()
.map(|pkg_json| {
(
root_dir_url
.specifier_key(&pkg_json.specifier())
.into_owned(),
serde_json::to_value(pkg_json).unwrap(),
)
})
.collect(),
pkg_json_resolution: self.workspace_resolver.pkg_json_dep_resolution(),
},
node_modules,
unstable_config: UnstableConfig {
legacy_flag_enabled: false,
bare_node_builtins: self.cli_options.unstable_bare_node_builtins(),
detect_cjs: self.cli_options.unstable_detect_cjs(),
features: self
.cli_options
.unstable_features()
.into_iter()
.map(|s| s.to_string())
.collect(),
lazy_dynamic_imports: self.cli_options.unstable_lazy_dynamic_imports(),
npm_lazy_caching: self.cli_options.unstable_npm_lazy_caching(),
raw_imports: self.cli_options.unstable_raw_imports(),
sloppy_imports: self.cli_options.unstable_sloppy_imports(),
tsgo: self.cli_options.unstable_tsgo(),
},
otel_config: self.cli_options.otel_config(),
vfs_case_sensitivity: vfs.case_sensitivity,
};
let (data_section_bytes, section_sizes) = serialize_binary_data_section(
&metadata,
npm_snapshot.map(|s| s.into_serialized()),
&specifier_store.for_serialization(&root_dir_url),
&redirects_store,
&remote_modules_store,
&vfs,
)
.context("Serializing binary data section.")?;
log::info!(
"\n{} {}",
crate::colors::bold("Files:"),
crate::util::display::human_size(section_sizes.vfs as f64)
);
log::info!(
"{} {}",
crate::colors::bold("Metadata:"),
crate::util::display::human_size(section_sizes.metadata as f64)
);
log::info!(
"{} {}\n",
crate::colors::bold("Remote modules:"),
crate::util::display::human_size(section_sizes.remote_modules as f64)
);
write_binary_bytes(writer, original_bin, data_section_bytes, compile_flags)
.context("Writing binary bytes")
}
async fn load_asset_bypass_permissions(
&self,
specifier: &ModuleSpecifier,
) -> Result<
deno_cache_dir::file_fetcher::File,
deno_resolver::file_fetcher::FetchError,
> {
self
.file_fetcher
.fetch_with_options(
specifier,
FetchPermissionsOptionRef::AllowAll,
FetchOptions {
local: FetchLocalOptions {
include_mtime: false,
},
maybe_auth: None,
maybe_accept: None,
maybe_cache_setting: Some(
&deno_cache_dir::file_fetcher::CacheSetting::Use,
),
},
)
.await
}
fn fill_npm_vfs(
&self,
builder: &mut VfsBuilder,
snapshot: Option<&ValidSerializedNpmResolutionSnapshot>,
) -> Result<(), AnyError> {
fn maybe_warn_different_system(system_info: &NpmSystemInfo) {
if system_info != &NpmSystemInfo::default() {
log::warn!(
"{} The node_modules directory may be incompatible with the target system.",
crate::colors::yellow("Warning")
);
}
}
match &self.npm_resolver {
CliNpmResolver::Managed(npm_resolver) => {
if let Some(node_modules_path) = npm_resolver.root_node_modules_path() {
maybe_warn_different_system(&self.npm_system_info);
builder.add_dir_recursive(node_modules_path)?;
Ok(())
} else {
let snapshot = snapshot.unwrap();
// we'll flatten to remove any custom registries later
let mut packages =
snapshot.as_serialized().packages.iter().collect::<Vec<_>>();
packages.sort_by(|a, b| a.id.cmp(&b.id)); // determinism
let current_system = NpmSystemInfo::default();
for package in packages {
let folder =
npm_resolver.resolve_pkg_folder_from_pkg_id(&package.id)?;
if !package.system.matches_system(¤t_system)
&& !folder.exists()
{
log::warn!(
"{} Ignoring 'npm:{}' because it was not present on the current system.",
crate::colors::yellow("Warning"),
package.id
);
} else {
builder.add_dir_recursive(&folder)?;
}
}
Ok(())
}
}
CliNpmResolver::Byonm(_) => {
maybe_warn_different_system(&self.npm_system_info);
for pkg_json in self.cli_options.workspace().package_jsons() {
builder.add_file_at_path(&pkg_json.path)?;
}
// traverse and add all the node_modules directories in the workspace
let mut pending_dirs = VecDeque::new();
pending_dirs.push_back(
self
.cli_options
.workspace()
.root_dir_url()
.to_file_path()
.unwrap(),
);
while let Some(pending_dir) = pending_dirs.pop_front() {
let Ok(entries) = fs::read_dir(&pending_dir) else {
// Don't bother surfacing this error as it might be an error
// like "access denied". In this case, just skip over it.
continue;
};
let mut entries = entries.filter_map(|e| e.ok()).collect::<Vec<_>>();
entries.sort_by_cached_key(|entry| entry.file_name()); // determinism
for entry in entries {
let path = entry.path();
if !path.is_dir() {
continue;
}
if path.ends_with("node_modules") {
builder.add_dir_recursive(&path)?;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/standalone/virtual_fs.rs | cli/standalone/virtual_fs.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashSet;
use std::path::PathBuf;
use deno_lib::standalone::virtual_fs::BuiltVfs;
use deno_lib::standalone::virtual_fs::DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME;
use deno_lib::standalone::virtual_fs::OffsetWithLength;
use deno_lib::standalone::virtual_fs::VfsEntry;
use deno_lib::standalone::virtual_fs::VirtualDirectory;
use deno_lib::standalone::virtual_fs::VirtualDirectoryEntries;
use deno_lib::standalone::virtual_fs::VirtualFile;
use deno_lib::standalone::virtual_fs::VirtualSymlinkParts;
use deno_lib::standalone::virtual_fs::WindowsSystemRootablePath;
use deno_resolver::display::DisplayTreeNode;
use crate::util::display::human_size;
pub fn output_vfs(vfs: &BuiltVfs, executable_name: &str) {
if !log::log_enabled!(log::Level::Info) {
return; // no need to compute if won't output
}
if vfs.entries.is_empty() {
return; // nothing to output
}
let mut text = String::new();
let display_tree = vfs_as_display_tree(vfs, executable_name);
display_tree.print(&mut text).unwrap(); // unwrap ok because it's writing to a string
log::info!("\n{}\n", deno_terminal::colors::bold("Embedded Files"));
log::info!("{}", text.trim());
}
fn vfs_as_display_tree(
vfs: &BuiltVfs,
executable_name: &str,
) -> DisplayTreeNode {
/// The VFS only stores duplicate files once, so track that and display
/// it to the user so that it's not confusing.
#[derive(Debug, Default, Copy, Clone)]
struct Size {
unique: u64,
total: u64,
}
impl std::ops::Add for Size {
type Output = Self;
fn add(self, other: Self) -> Self {
Self {
unique: self.unique + other.unique,
total: self.total + other.total,
}
}
}
impl std::iter::Sum for Size {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::default(), std::ops::Add::add)
}
}
enum EntryOutput<'a> {
All(Size),
Subset(Vec<DirEntryOutput<'a>>),
File(Size),
Symlink(&'a VirtualSymlinkParts),
}
impl EntryOutput<'_> {
pub fn size(&self) -> Size {
match self {
EntryOutput::All(size) => *size,
EntryOutput::Subset(children) => {
children.iter().map(|c| c.output.size()).sum()
}
EntryOutput::File(size) => *size,
EntryOutput::Symlink(_) => Size {
unique: 0,
total: 0,
},
}
}
}
impl EntryOutput<'_> {
pub fn as_display_tree(&self, name: String) -> DisplayTreeNode {
fn format_size(size: Size) -> String {
if size.unique == size.total {
human_size(size.unique as f64)
} else {
format!(
"{}{}",
human_size(size.total as f64),
deno_terminal::colors::gray(format!(
" - {} unique",
human_size(size.unique as f64)
))
)
}
}
DisplayTreeNode {
text: match self {
EntryOutput::All(size) => {
format!("{}/* ({})", name, format_size(*size))
}
EntryOutput::Subset(children) => {
let size = children.iter().map(|c| c.output.size()).sum::<Size>();
format!("{} ({})", name, format_size(size))
}
EntryOutput::File(size) => {
format!("{} ({})", name, format_size(*size))
}
EntryOutput::Symlink(parts) => {
format!("{} --> {}", name, parts.display())
}
},
children: match self {
EntryOutput::All(_) => Vec::new(),
EntryOutput::Subset(children) => children
.iter()
.map(|entry| entry.output.as_display_tree(entry.name.to_string()))
.collect(),
EntryOutput::File(_) => Vec::new(),
EntryOutput::Symlink(_) => Vec::new(),
},
}
}
}
pub struct DirEntryOutput<'a> {
name: Cow<'a, str>,
output: EntryOutput<'a>,
}
impl DirEntryOutput<'_> {
/// Collapses leaf nodes so they don't take up so much space when being
/// displayed.
///
/// We only want to collapse leafs so that nodes of the same depth have
/// the same indentation.
pub fn collapse_leaf_nodes(&mut self) {
let EntryOutput::Subset(vec) = &mut self.output else {
return;
};
for dir_entry in vec.iter_mut() {
dir_entry.collapse_leaf_nodes();
}
if vec.len() != 1 {
return;
}
let child = &mut vec[0];
let child_name = &child.name;
match &mut child.output {
EntryOutput::All(size) => {
self.name = Cow::Owned(format!("{}/{}", self.name, child_name));
self.output = EntryOutput::All(*size);
}
EntryOutput::Subset(children) => {
if children.is_empty() {
self.name = Cow::Owned(format!("{}/{}", self.name, child_name));
self.output = EntryOutput::Subset(vec![]);
}
}
EntryOutput::File(size) => {
self.name = Cow::Owned(format!("{}/{}", self.name, child_name));
self.output = EntryOutput::File(*size);
}
EntryOutput::Symlink(parts) => {
let new_name = format!("{}/{}", self.name, child_name);
self.output = EntryOutput::Symlink(parts);
self.name = Cow::Owned(new_name);
}
}
}
}
fn file_size(file: &VirtualFile, seen_offsets: &mut HashSet<u64>) -> Size {
fn add_offset_to_size(
offset: OffsetWithLength,
size: &mut Size,
seen_offsets: &mut HashSet<u64>,
) {
if offset.len == 0 {
// some empty files have a dummy offset, so don't
// insert them into the seen offsets
return;
}
if seen_offsets.insert(offset.offset) {
size.total += offset.len;
size.unique += offset.len;
} else {
size.total += offset.len;
}
}
let mut size = Size::default();
add_offset_to_size(file.offset, &mut size, seen_offsets);
let maybe_offsets = [
file.transpiled_offset,
file.source_map_offset,
file.cjs_export_analysis_offset,
];
for offset in maybe_offsets.into_iter().flatten() {
add_offset_to_size(offset, &mut size, seen_offsets);
}
size
}
fn dir_size(dir: &VirtualDirectory, seen_offsets: &mut HashSet<u64>) -> Size {
let mut size = Size::default();
for entry in dir.entries.iter() {
match entry {
VfsEntry::Dir(virtual_directory) => {
size = size + dir_size(virtual_directory, seen_offsets);
}
VfsEntry::File(file) => {
size = size + file_size(file, seen_offsets);
}
VfsEntry::Symlink(_) => {
// ignore
}
}
}
size
}
fn show_global_node_modules_dir<'a>(
vfs_dir: &'a VirtualDirectory,
seen_offsets: &mut HashSet<u64>,
) -> Vec<DirEntryOutput<'a>> {
fn show_subset_deep<'a>(
vfs_dir: &'a VirtualDirectory,
depth: usize,
seen_offsets: &mut HashSet<u64>,
) -> EntryOutput<'a> {
if depth == 0 {
EntryOutput::All(dir_size(vfs_dir, seen_offsets))
} else {
EntryOutput::Subset(show_subset(vfs_dir, depth, seen_offsets))
}
}
fn show_subset<'a>(
vfs_dir: &'a VirtualDirectory,
depth: usize,
seen_offsets: &mut HashSet<u64>,
) -> Vec<DirEntryOutput<'a>> {
vfs_dir
.entries
.iter()
.map(|entry| DirEntryOutput {
name: Cow::Borrowed(entry.name()),
output: match entry {
VfsEntry::Dir(virtual_directory) => {
show_subset_deep(virtual_directory, depth - 1, seen_offsets)
}
VfsEntry::File(file) => {
EntryOutput::File(file_size(file, seen_offsets))
}
VfsEntry::Symlink(virtual_symlink) => {
EntryOutput::Symlink(&virtual_symlink.dest_parts)
}
},
})
.collect()
}
// in this scenario, we want to show
// .deno_compile_node_modules/localhost/<package_name>/<version>/*
show_subset(vfs_dir, 3, seen_offsets)
}
fn include_all_entries<'a>(
dir_path: &WindowsSystemRootablePath,
entries: &'a VirtualDirectoryEntries,
seen_offsets: &mut HashSet<u64>,
) -> Vec<DirEntryOutput<'a>> {
entries
.iter()
.map(|entry| DirEntryOutput {
name: Cow::Borrowed(entry.name()),
output: analyze_entry(dir_path.join(entry.name()), entry, seen_offsets),
})
.collect()
}
fn analyze_entry<'a>(
path: PathBuf,
entry: &'a VfsEntry,
seen_offsets: &mut HashSet<u64>,
) -> EntryOutput<'a> {
match entry {
VfsEntry::Dir(virtual_directory) => {
analyze_dir(path, virtual_directory, seen_offsets)
}
VfsEntry::File(file) => EntryOutput::File(file_size(file, seen_offsets)),
VfsEntry::Symlink(virtual_symlink) => {
EntryOutput::Symlink(&virtual_symlink.dest_parts)
}
}
}
fn analyze_dir<'a>(
dir: PathBuf,
vfs_dir: &'a VirtualDirectory,
seen_offsets: &mut HashSet<u64>,
) -> EntryOutput<'a> {
if vfs_dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
return EntryOutput::Subset(show_global_node_modules_dir(
vfs_dir,
seen_offsets,
));
}
let real_entry_count = std::fs::read_dir(&dir)
.ok()
.map(|entries| entries.flat_map(|e| e.ok()).count())
.unwrap_or(0);
if real_entry_count == vfs_dir.entries.len() {
let children = vfs_dir
.entries
.iter()
.map(|entry| DirEntryOutput {
name: Cow::Borrowed(entry.name()),
output: analyze_entry(dir.join(entry.name()), entry, seen_offsets),
})
.collect::<Vec<_>>();
if children
.iter()
.all(|c| !matches!(c.output, EntryOutput::Subset { .. }))
{
EntryOutput::All(children.iter().map(|c| c.output.size()).sum())
} else {
EntryOutput::Subset(children)
}
} else if vfs_dir.name == DENO_COMPILE_GLOBAL_NODE_MODULES_DIR_NAME {
EntryOutput::Subset(show_global_node_modules_dir(vfs_dir, seen_offsets))
} else {
EntryOutput::Subset(include_all_entries(
&WindowsSystemRootablePath::Path(dir),
&vfs_dir.entries,
seen_offsets,
))
}
}
// always include all the entries for the root directory, otherwise the
// user might not have context about what's being shown
let mut seen_offsets = HashSet::with_capacity(vfs.files.len());
let mut child_entries =
include_all_entries(&vfs.root_path, &vfs.entries, &mut seen_offsets);
for child_entry in &mut child_entries {
child_entry.collapse_leaf_nodes();
}
DisplayTreeNode {
text: deno_terminal::colors::italic(executable_name).to_string(),
children: child_entries
.iter()
.map(|entry| entry.output.as_display_tree(entry.name.to_string()))
.collect(),
}
}
#[cfg(test)]
mod test {
use console_static_text::ansi::strip_ansi_codes;
use deno_lib::standalone::virtual_fs::VfsBuilder;
use test_util::TempDir;
use super::*;
#[test]
fn test_vfs_as_display_tree() {
let temp_dir = TempDir::new();
temp_dir.write("root.txt", "");
temp_dir.create_dir_all("a");
temp_dir.write("a/a.txt", "data");
temp_dir.write("a/b.txt", "other data");
temp_dir.create_dir_all("b");
temp_dir.write("b/a.txt", "");
temp_dir.write("b/b.txt", "");
temp_dir.create_dir_all("c");
temp_dir.write("c/a.txt", "contents");
temp_dir.symlink_file("c/a.txt", "c/b.txt");
assert_eq!(temp_dir.read_to_string("c/b.txt"), "contents"); // ensure the symlink works
let mut vfs_builder = VfsBuilder::new();
// full dir
vfs_builder
.add_dir_recursive(temp_dir.path().join("a").as_path())
.unwrap();
// part of the dir
vfs_builder
.add_file_at_path(temp_dir.path().join("b/a.txt").as_path())
.unwrap();
// symlink
vfs_builder
.add_dir_recursive(temp_dir.path().join("c").as_path())
.unwrap();
temp_dir.write("c/c.txt", ""); // write an extra file so it shows the whole directory
let node = vfs_as_display_tree(&vfs_builder.build(), "executable");
let mut text = String::new();
node.print(&mut text).unwrap();
assert_eq!(
strip_ansi_codes(&text),
r#"executable
├── a/* (14B)
├── b/a.txt (0B)
└─┬ c (8B)
├── a.txt (8B)
└── b.txt --> c/a.txt
"#
);
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/ops/bench.rs | cli/ops/bench.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_core::op2;
use deno_core::v8;
use deno_error::JsErrorBox;
use deno_runtime::deno_permissions::ChildPermissionsArg;
use deno_runtime::deno_permissions::PermissionsContainer;
use deno_runtime::deno_web::StartTime;
use tokio::sync::mpsc::UnboundedSender;
use uuid::Uuid;
use crate::tools::bench::BenchDescription;
use crate::tools::bench::BenchEvent;
#[derive(Default)]
pub(crate) struct BenchContainer(
pub Vec<(BenchDescription, v8::Global<v8::Function>)>,
);
deno_core::extension!(deno_bench,
ops = [
op_pledge_test_permissions,
op_restore_test_permissions,
op_register_bench,
op_bench_get_origin,
op_dispatch_bench_event,
op_bench_now,
],
options = {
sender: UnboundedSender<BenchEvent>,
},
state = |state, options| {
state.put(options.sender);
state.put(BenchContainer::default());
},
);
#[op2]
#[string]
fn op_bench_get_origin(state: &mut OpState) -> String {
state.borrow::<ModuleSpecifier>().to_string()
}
#[derive(Clone)]
struct PermissionsHolder(Uuid, PermissionsContainer);
#[op2(stack_trace)]
#[serde]
pub fn op_pledge_test_permissions(
state: &mut OpState,
#[serde] args: ChildPermissionsArg,
) -> Result<Uuid, deno_runtime::deno_permissions::ChildPermissionError> {
let token = Uuid::new_v4();
let parent_permissions = state.borrow_mut::<PermissionsContainer>();
let worker_permissions = parent_permissions.create_child_permissions(args)?;
let parent_permissions = parent_permissions.clone();
if state.try_take::<PermissionsHolder>().is_some() {
panic!("pledge test permissions called before restoring previous pledge");
}
state.put::<PermissionsHolder>(PermissionsHolder(token, parent_permissions));
// NOTE: This call overrides current permission set for the worker
state.put::<PermissionsContainer>(worker_permissions);
Ok(token)
}
#[op2]
pub fn op_restore_test_permissions(
state: &mut OpState,
#[serde] token: Uuid,
) -> Result<(), JsErrorBox> {
match state.try_take::<PermissionsHolder>() {
Some(permissions_holder) => {
if token != permissions_holder.0 {
panic!(
"restore test permissions token does not match the stored token"
);
}
let permissions = permissions_holder.1;
state.put::<PermissionsContainer>(permissions);
Ok(())
}
_ => Err(JsErrorBox::generic("no permissions to restore")),
}
}
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
#[allow(clippy::too_many_arguments)]
#[op2]
fn op_register_bench(
state: &mut OpState,
#[global] function: v8::Global<v8::Function>,
#[string] name: String,
baseline: bool,
#[string] group: Option<String>,
ignore: bool,
only: bool,
warmup: bool,
#[buffer] ret_buf: &mut [u8],
) -> Result<(), JsErrorBox> {
if ret_buf.len() != 4 {
return Err(JsErrorBox::type_error(format!(
"Invalid ret_buf length: {}",
ret_buf.len()
)));
}
let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
let origin = state.borrow::<ModuleSpecifier>().to_string();
let description = BenchDescription {
id,
name,
origin: origin.clone(),
baseline,
group,
ignore,
only,
warmup,
};
state
.borrow_mut::<BenchContainer>()
.0
.push((description.clone(), function));
let sender = state.borrow::<UnboundedSender<BenchEvent>>().clone();
sender.send(BenchEvent::Register(description)).ok();
ret_buf.copy_from_slice(&(id as u32).to_le_bytes());
Ok(())
}
#[op2]
fn op_dispatch_bench_event(state: &mut OpState, #[serde] event: BenchEvent) {
assert!(
matches!(event, BenchEvent::Output(_)),
"Only output events are expected from JS."
);
let sender = state.borrow::<UnboundedSender<BenchEvent>>().clone();
sender.send(event).ok();
}
#[op2(fast)]
#[number]
fn op_bench_now(state: &mut OpState) -> Result<u64, std::num::TryFromIntError> {
let ns = state.borrow::<StartTime>().elapsed().as_nanos();
let ns_u64 = u64::try_from(ns)?;
Ok(ns_u64)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/ops/lint.rs | cli/ops/lint.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_ast::ParseDiagnostic;
use deno_ast::SourceRange;
use deno_ast::SourceTextInfo;
use deno_ast::SourceTextProvider;
use deno_core::OpState;
use deno_core::op2;
use deno_lint::diagnostic::LintDiagnostic;
use deno_lint::diagnostic::LintDiagnosticDetails;
use deno_lint::diagnostic::LintDiagnosticRange;
use deno_lint::diagnostic::LintDocsUrl;
use deno_lint::diagnostic::LintFix;
use deno_lint::diagnostic::LintFixChange;
use tokio_util::sync::CancellationToken;
use crate::tools::lint;
use crate::tools::lint::PluginLogger;
use crate::util::text_encoding::Utf16Map;
deno_core::extension!(
deno_lint_ext,
ops = [
op_lint_create_serialized_ast,
op_lint_report,
op_lint_get_source,
op_is_cancelled
],
options = {
logger: PluginLogger,
},
// TODO(bartlomieju): this should only be done,
// if not in the "test worker".
middleware = |op| match op.name {
"op_print" => op_print(),
_ => op,
},
state = |state, options| {
state.put(options.logger);
state.put(LintPluginContainer::default());
},
);
deno_core::extension!(
deno_lint_ext_for_test,
ops = [op_lint_create_serialized_ast, op_is_cancelled],
state = |state| {
state.put(LintPluginContainer::default());
},
);
#[derive(Default)]
pub struct LintPluginContainer {
pub diagnostics: Vec<LintDiagnostic>,
pub source_text_info: Option<SourceTextInfo>,
pub utf_16_map: Option<Utf16Map>,
pub specifier: Option<ModuleSpecifier>,
pub token: CancellationToken,
}
impl LintPluginContainer {
pub fn set_info_for_file(
&mut self,
specifier: ModuleSpecifier,
source_text_info: SourceTextInfo,
utf16_map: Utf16Map,
maybe_token: Option<CancellationToken>,
) {
self.specifier = Some(specifier);
self.utf_16_map = Some(utf16_map);
self.source_text_info = Some(source_text_info);
self.diagnostics.clear();
self.token = maybe_token.unwrap_or_default();
}
fn report(
&mut self,
id: String,
message: String,
hint: Option<String>,
start_utf16: usize,
end_utf16: usize,
raw_fixes: Vec<LintReportFix>,
) -> Result<(), LintReportError> {
fn out_of_range_err(
map: &Utf16Map,
start_utf16: usize,
end_utf16: usize,
) -> LintReportError {
LintReportError::IncorrectRange {
start: start_utf16,
end: end_utf16,
source_end: map.text_content_length_utf16().into(),
}
}
fn utf16_to_utf8_range(
utf16_map: &Utf16Map,
source_text_info: &SourceTextInfo,
start_utf16: usize,
end_utf16: usize,
) -> Result<SourceRange, LintReportError> {
let Some(start) =
utf16_map.utf16_to_utf8_offset((start_utf16 as u32).into())
else {
return Err(out_of_range_err(utf16_map, start_utf16, end_utf16));
};
let Some(end) = utf16_map.utf16_to_utf8_offset((end_utf16 as u32).into())
else {
return Err(out_of_range_err(utf16_map, start_utf16, end_utf16));
};
let start_pos = source_text_info.start_pos();
Ok(SourceRange::new(
start_pos + start.into(),
start_pos + end.into(),
))
}
let source_text_info = self.source_text_info.as_ref().unwrap();
let utf16_map = self.utf_16_map.as_ref().unwrap();
let specifier = self.specifier.clone().unwrap();
let diagnostic_range =
utf16_to_utf8_range(utf16_map, source_text_info, start_utf16, end_utf16)?;
let range = LintDiagnosticRange {
range: diagnostic_range,
description: None,
text_info: source_text_info.clone(),
};
let changes = raw_fixes
.into_iter()
.map(|fix| {
let fix_range = utf16_to_utf8_range(
utf16_map,
source_text_info,
fix.range.0,
fix.range.1,
)?;
Ok(LintFixChange {
new_text: fix.text.into(),
range: fix_range,
})
})
.collect::<Result<Vec<LintFixChange>, LintReportError>>()?;
let mut fixes = vec![];
if !changes.is_empty() {
fixes.push(LintFix {
changes,
description: format!("Fix this {} problem", id).into(),
});
}
let lint_diagnostic = LintDiagnostic {
specifier,
range: Some(range),
details: LintDiagnosticDetails {
message,
code: id,
hint,
fixes,
// TODO(bartlomieju): allow plugins to actually specify custom url for docs
custom_docs_url: LintDocsUrl::None,
info: vec![],
},
};
self.diagnostics.push(lint_diagnostic);
Ok(())
}
}
#[op2(fast)]
pub fn op_print(state: &mut OpState, #[string] msg: &str, is_err: bool) {
let logger = state.borrow::<PluginLogger>();
if is_err {
logger.error(msg);
} else {
logger.log(msg);
}
}
#[op2(fast)]
fn op_is_cancelled(state: &mut OpState) -> bool {
let container = state.borrow::<LintPluginContainer>();
container.token.is_cancelled()
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum LintError {
#[class(inherit)]
#[error(transparent)]
Io(#[from] std::io::Error),
#[class(inherit)]
#[error(transparent)]
ParseDiagnostic(#[from] ParseDiagnostic),
#[class(type)]
#[error("Failed to parse path as URL: {0}")]
PathParse(std::path::PathBuf),
}
#[op2]
#[buffer]
#[allow(clippy::result_large_err)]
fn op_lint_create_serialized_ast(
#[string] file_name: &str,
#[string] source: String,
) -> Result<Vec<u8>, LintError> {
let file_text = deno_ast::strip_bom(source);
let path = std::env::current_dir()?.join(file_name);
let specifier = ModuleSpecifier::from_file_path(&path)
.map_err(|_| LintError::PathParse(path))?;
let media_type = MediaType::from_specifier(&specifier);
let parsed_source = deno_ast::parse_program(deno_ast::ParseParams {
specifier,
text: file_text.into(),
media_type,
capture_tokens: false,
scope_analysis: false,
maybe_syntax: None,
})?;
let utf16_map = Utf16Map::new(parsed_source.text().as_ref());
Ok(lint::serialize_ast_to_buffer(&parsed_source, &utf16_map))
}
#[derive(serde::Deserialize)]
struct LintReportFix {
text: String,
range: (usize, usize),
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum LintReportError {
#[class(type)]
#[error(
"Invalid range [{start}, {end}], the source has a range of [0, {source_end}]"
)]
IncorrectRange {
start: usize,
end: usize,
source_end: u32,
},
}
#[op2]
fn op_lint_report(
state: &mut OpState,
#[string] id: String,
#[string] message: String,
#[string] hint: Option<String>,
#[smi] start_utf16: usize,
#[smi] end_utf16: usize,
#[serde] fix: Vec<LintReportFix>,
) -> Result<(), LintReportError> {
let container = state.borrow_mut::<LintPluginContainer>();
container.report(id, message, hint, start_utf16, end_utf16, fix)?;
Ok(())
}
#[op2]
#[string]
fn op_lint_get_source(state: &mut OpState) -> String {
let container = state.borrow_mut::<LintPluginContainer>();
container
.source_text_info
.as_ref()
.unwrap()
.text_str()
.to_string()
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/ops/deploy.rs | cli/ops/deploy.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_core::op2;
use deno_error::JsErrorBox;
use crate::tools::deploy::get_token_entry;
deno_core::extension!(
deno_deploy,
ops = [
op_deploy_token_get,
op_deploy_token_set,
op_deploy_token_delete,
],
);
#[op2]
#[string]
pub fn op_deploy_token_get() -> Result<Option<String>, JsErrorBox> {
match get_token_entry()
.map_err(|e| JsErrorBox::type_error(e.to_string()))?
.get_password()
{
Ok(password) => Ok(Some(password)),
Err(keyring::Error::NoEntry) => Ok(None),
Err(e) => Err(JsErrorBox::type_error(e.to_string())),
}
}
#[op2(fast)]
#[string]
pub fn op_deploy_token_set(#[string] s: &str) -> Result<(), JsErrorBox> {
get_token_entry()
.map_err(|e| JsErrorBox::type_error(e.to_string()))?
.set_password(s)
.map_err(|e| JsErrorBox::type_error(e.to_string()))
}
#[op2(fast)]
#[string]
pub fn op_deploy_token_delete() -> Result<(), JsErrorBox> {
get_token_entry()
.map_err(|e| JsErrorBox::type_error(e.to_string()))?
.delete_credential()
.map_err(|e| JsErrorBox::type_error(e.to_string()))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/ops/testing.rs | cli/ops/testing.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_core::op2;
use deno_core::v8;
use deno_error::JsErrorBox;
use deno_runtime::deno_permissions::ChildPermissionsArg;
use deno_runtime::deno_permissions::PermissionsContainer;
use uuid::Uuid;
use crate::tools::test::TestContainer;
use crate::tools::test::TestDescription;
use crate::tools::test::TestEvent;
use crate::tools::test::TestEventSender;
use crate::tools::test::TestFailure;
use crate::tools::test::TestLocation;
use crate::tools::test::TestStepDescription;
use crate::tools::test::TestStepResult;
deno_core::extension!(deno_test,
ops = [
op_pledge_test_permissions,
op_restore_test_permissions,
op_register_test,
op_register_test_step,
op_register_test_hook,
op_test_get_origin,
op_test_event_step_wait,
op_test_event_step_result_ok,
op_test_event_step_result_ignored,
op_test_event_step_result_failed,
],
options = {
sender: TestEventSender,
},
state = |state, options| {
state.put(options.sender);
state.put(TestContainer::default());
},
);
#[derive(Clone)]
struct PermissionsHolder(Uuid, PermissionsContainer);
#[op2(stack_trace)]
#[serde]
pub fn op_pledge_test_permissions(
state: &mut OpState,
#[serde] args: ChildPermissionsArg,
) -> Result<Uuid, deno_runtime::deno_permissions::ChildPermissionError> {
let token = Uuid::new_v4();
let parent_permissions = state.borrow_mut::<PermissionsContainer>();
let worker_permissions = parent_permissions.create_child_permissions(args)?;
let parent_permissions = parent_permissions.clone();
if state.try_take::<PermissionsHolder>().is_some() {
panic!("pledge test permissions called before restoring previous pledge");
}
state.put::<PermissionsHolder>(PermissionsHolder(token, parent_permissions));
// NOTE: This call overrides current permission set for the worker
state.put::<PermissionsContainer>(worker_permissions);
Ok(token)
}
#[op2]
pub fn op_restore_test_permissions(
state: &mut OpState,
#[serde] token: Uuid,
) -> Result<(), JsErrorBox> {
match state.try_take::<PermissionsHolder>() {
Some(permissions_holder) => {
if token != permissions_holder.0 {
panic!(
"restore test permissions token does not match the stored token"
);
}
let permissions = permissions_holder.1;
state.put::<PermissionsContainer>(permissions);
Ok(())
}
_ => Err(JsErrorBox::generic("no permissions to restore")),
}
}
static NEXT_ID: AtomicUsize = AtomicUsize::new(0);
#[allow(clippy::too_many_arguments)]
#[op2]
fn op_register_test(
state: &mut OpState,
#[global] function: v8::Global<v8::Function>,
#[string] name: String,
ignore: bool,
only: bool,
sanitize_ops: bool,
sanitize_resources: bool,
#[string] file_name: String,
#[smi] line_number: u32,
#[smi] column_number: u32,
#[buffer] ret_buf: &mut [u8],
) -> Result<(), JsErrorBox> {
if ret_buf.len() != 4 {
return Err(JsErrorBox::type_error(format!(
"Invalid ret_buf length: {}",
ret_buf.len()
)));
}
let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
let origin = state.borrow::<ModuleSpecifier>().to_string();
let description = TestDescription {
id,
name,
ignore,
only,
sanitize_ops,
sanitize_resources,
origin: origin.clone(),
location: TestLocation {
file_name,
line_number,
column_number,
},
};
state
.borrow_mut::<TestContainer>()
.register(description, function);
ret_buf.copy_from_slice(&(id as u32).to_le_bytes());
Ok(())
}
#[op2]
fn op_register_test_hook(
state: &mut OpState,
#[string] hook_type: String,
#[global] function: v8::Global<v8::Function>,
) -> Result<(), JsErrorBox> {
let container = state.borrow_mut::<TestContainer>();
container.register_hook(hook_type, function);
Ok(())
}
#[op2]
#[string]
fn op_test_get_origin(state: &mut OpState) -> String {
state.borrow::<ModuleSpecifier>().to_string()
}
#[op2(fast)]
#[smi]
#[allow(clippy::too_many_arguments)]
fn op_register_test_step(
state: &mut OpState,
#[string] name: String,
#[string] file_name: String,
#[smi] line_number: u32,
#[smi] column_number: u32,
#[smi] level: usize,
#[smi] parent_id: usize,
#[smi] root_id: usize,
#[string] root_name: String,
) -> usize {
let id = NEXT_ID.fetch_add(1, Ordering::SeqCst);
let origin = state.borrow::<ModuleSpecifier>().to_string();
let description = TestStepDescription {
id,
name,
origin: origin.clone(),
location: TestLocation {
file_name,
line_number,
column_number,
},
level,
parent_id,
root_id,
root_name,
};
let sender = state.borrow_mut::<TestEventSender>();
sender.send(TestEvent::StepRegister(description)).ok();
id
}
#[op2(fast)]
fn op_test_event_step_wait(state: &mut OpState, #[smi] id: usize) {
let sender = state.borrow_mut::<TestEventSender>();
sender.send(TestEvent::StepWait(id)).ok();
}
#[op2(fast)]
fn op_test_event_step_result_ok(
state: &mut OpState,
#[smi] id: usize,
#[smi] duration: u64,
) {
let sender = state.borrow_mut::<TestEventSender>();
sender
.send(TestEvent::StepResult(id, TestStepResult::Ok, duration))
.ok();
}
#[op2(fast)]
fn op_test_event_step_result_ignored(
state: &mut OpState,
#[smi] id: usize,
#[smi] duration: u64,
) {
let sender = state.borrow_mut::<TestEventSender>();
sender
.send(TestEvent::StepResult(id, TestStepResult::Ignored, duration))
.ok();
}
#[op2]
fn op_test_event_step_result_failed(
state: &mut OpState,
#[smi] id: usize,
#[serde] failure: TestFailure,
#[smi] duration: u64,
) {
let sender = state.borrow_mut::<TestEventSender>();
sender
.send(TestEvent::StepResult(
id,
TestStepResult::Failed(failure),
duration,
))
.ok();
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/ops/mod.rs | cli/ops/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub mod bench;
pub mod deploy;
pub mod jupyter;
pub mod lint;
pub mod testing;
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/ops/jupyter.rs | cli/ops/jupyter.rs | // Copyright 2018-2025 the Deno authors. MIT license.
// NOTE(bartlomieju): unfortunately it appears that clippy is broken
// and can't allow a single line ignore for `await_holding_lock`.
#![allow(clippy::await_holding_lock)]
use std::cell::RefCell;
use std::rc::Rc;
use std::sync::Arc;
use deno_core::OpState;
use deno_core::error::AnyError;
use deno_core::op2;
use deno_core::parking_lot::Mutex;
use deno_core::serde_json;
use deno_error::JsErrorBox;
use jupyter_runtime::InputRequest;
use jupyter_runtime::JupyterMessage;
use jupyter_runtime::JupyterMessageContent;
use jupyter_runtime::KernelIoPubConnection;
use jupyter_runtime::StreamContent;
use tokio::sync::mpsc;
use crate::tools::jupyter::server::StdinConnectionProxy;
deno_core::extension!(deno_jupyter,
ops = [
op_jupyter_broadcast,
op_jupyter_input,
op_jupyter_create_png_from_texture,
op_jupyter_get_buffer,
],
options = {
sender: mpsc::UnboundedSender<StreamContent>,
},
middleware = |op| match op.name {
"op_print" => op_print(),
_ => op,
},
state = |state, options| {
state.put(options.sender);
},
);
deno_core::extension!(deno_jupyter_for_test,
ops = [
op_jupyter_broadcast,
op_jupyter_input,
op_jupyter_create_png_from_texture,
op_jupyter_get_buffer,
],
options = {
sender: mpsc::UnboundedSender<StreamContent>,
},
state = |state, options| {
state.put(options.sender);
},
);
#[op2]
#[string]
pub fn op_jupyter_input(
state: &mut OpState,
#[string] prompt: String,
is_password: bool,
) -> Option<String> {
let (last_execution_request, stdin_connection_proxy) = {
(
state.borrow::<Arc<Mutex<Option<JupyterMessage>>>>().clone(),
state.borrow::<Arc<Mutex<StdinConnectionProxy>>>().clone(),
)
};
let maybe_last_request = last_execution_request.lock().clone();
if let Some(last_request) = maybe_last_request {
let JupyterMessageContent::ExecuteRequest(msg) = &last_request.content
else {
return None;
};
if !msg.allow_stdin {
return None;
}
let content = InputRequest {
prompt,
password: is_password,
};
let msg = JupyterMessage::new(content, Some(&last_request));
let Ok(()) = stdin_connection_proxy.lock().tx.send(msg) else {
return None;
};
// Need to spawn a separate thread here, because `blocking_recv()` can't
// be used from the Tokio runtime context.
let join_handle = std::thread::spawn(move || {
stdin_connection_proxy.lock().rx.blocking_recv()
});
let Ok(Some(response)) = join_handle.join() else {
return None;
};
let JupyterMessageContent::InputReply(msg) = response.content else {
return None;
};
return Some(msg.value);
}
None
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum JupyterBroadcastError {
#[class(inherit)]
#[error(transparent)]
SerdeJson(serde_json::Error),
#[class(generic)]
#[error(transparent)]
ZeroMq(AnyError),
}
#[op2(async)]
pub async fn op_jupyter_broadcast(
state: Rc<RefCell<OpState>>,
#[string] message_type: String,
#[serde] content: serde_json::Value,
#[serde] metadata: serde_json::Value,
#[serde] buffers: Vec<deno_core::JsBuffer>,
) -> Result<(), JupyterBroadcastError> {
let (iopub_connection, last_execution_request) = {
let s = state.borrow();
(
s.borrow::<Arc<Mutex<KernelIoPubConnection>>>().clone(),
s.borrow::<Arc<Mutex<Option<JupyterMessage>>>>().clone(),
)
};
let maybe_last_request = last_execution_request.lock().clone();
if let Some(last_request) = maybe_last_request {
let content = JupyterMessageContent::from_type_and_content(
&message_type,
content.clone(),
)
.map_err(|err| {
log::error!(
"Error deserializing content from jupyter.broadcast, message_type: {}:\n\n{}\n\n{}",
&message_type,
content,
err
);
JupyterBroadcastError::SerdeJson(err)
})?;
let jupyter_message = JupyterMessage::new(content, Some(&last_request))
.with_metadata(metadata)
.with_buffers(buffers.into_iter().map(|b| b.to_vec().into()).collect());
iopub_connection
.lock()
.send(jupyter_message)
.await
.map_err(JupyterBroadcastError::ZeroMq)?;
}
Ok(())
}
#[op2(fast)]
pub fn op_print(state: &mut OpState, #[string] msg: &str, is_err: bool) {
let sender = state.borrow_mut::<mpsc::UnboundedSender<StreamContent>>();
if is_err {
if let Err(err) = sender.send(StreamContent::stderr(msg)) {
log::error!("Failed to send stderr message: {}", err);
}
return;
}
if let Err(err) = sender.send(StreamContent::stdout(msg)) {
log::error!("Failed to send stdout message: {}", err);
}
}
#[op2]
#[string]
pub fn op_jupyter_create_png_from_texture(
#[cppgc] texture: &deno_runtime::deno_webgpu::texture::GPUTexture,
) -> Result<String, JsErrorBox> {
use deno_runtime::deno_canvas::image::ExtendedColorType;
use deno_runtime::deno_canvas::image::ImageEncoder;
use deno_runtime::deno_webgpu::error::GPUError;
use deno_runtime::deno_webgpu::*;
use texture::GPUTextureFormat;
// We only support the 8 bit per pixel formats with 4 channels
// as such a pixel has 4 bytes
const BYTES_PER_PIXEL: u32 = 4;
let unpadded_bytes_per_row = texture.size.width * BYTES_PER_PIXEL;
let padded_bytes_per_row_padding = (wgpu_types::COPY_BYTES_PER_ROW_ALIGNMENT
- (unpadded_bytes_per_row % wgpu_types::COPY_BYTES_PER_ROW_ALIGNMENT))
% wgpu_types::COPY_BYTES_PER_ROW_ALIGNMENT;
let padded_bytes_per_row =
unpadded_bytes_per_row + padded_bytes_per_row_padding;
let (buffer, maybe_err) = texture.instance.device_create_buffer(
texture.device_id,
&wgpu_types::BufferDescriptor {
label: None,
size: (padded_bytes_per_row * texture.size.height) as _,
usage: wgpu_types::BufferUsages::MAP_READ
| wgpu_types::BufferUsages::COPY_DST,
mapped_at_creation: false,
},
None,
);
if let Some(maybe_err) = maybe_err {
return Err(JsErrorBox::from_err::<GPUError>(maybe_err.into()));
}
let (command_encoder, maybe_err) =
texture.instance.device_create_command_encoder(
texture.device_id,
&wgpu_types::CommandEncoderDescriptor { label: None },
None,
);
if let Some(maybe_err) = maybe_err {
return Err(JsErrorBox::from_err::<GPUError>(maybe_err.into()));
}
texture
.instance
.command_encoder_copy_texture_to_buffer(
command_encoder,
&wgpu_types::TexelCopyTextureInfo {
texture: texture.id,
mip_level: 0,
origin: Default::default(),
aspect: Default::default(),
},
&wgpu_types::TexelCopyBufferInfo {
buffer,
layout: wgpu_types::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(padded_bytes_per_row),
rows_per_image: None,
},
},
&texture.size,
)
.map_err(|e| JsErrorBox::from_err::<GPUError>(e.into()))?;
let (command_buffer, maybe_err) = texture.instance.command_encoder_finish(
command_encoder,
&wgpu_types::CommandBufferDescriptor { label: None },
None,
);
if let Some((_, maybe_err)) = maybe_err {
return Err(JsErrorBox::from_err::<GPUError>(maybe_err.into()));
}
let maybe_err = texture
.instance
.queue_submit(texture.queue_id, &[command_buffer])
.err();
if let Some((_, maybe_err)) = maybe_err {
return Err(JsErrorBox::from_err::<GPUError>(maybe_err.into()));
}
let index = texture
.instance
.buffer_map_async(
buffer,
0,
None,
wgpu_core::resource::BufferMapOperation {
host: wgpu_core::device::HostMap::Read,
callback: None,
},
)
.map_err(|e| JsErrorBox::from_err::<GPUError>(e.into()))?;
texture
.instance
.device_poll(
texture.device_id,
wgpu_types::PollType::Wait {
submission_index: Some(index),
timeout: None,
},
)
.unwrap();
let (slice_pointer, range_size) = texture
.instance
.buffer_get_mapped_range(buffer, 0, None)
.map_err(|e| JsErrorBox::from_err::<GPUError>(e.into()))?;
let data = {
// SAFETY: creating a slice from pointer and length provided by wgpu and
// then dropping it before unmapping
let slice = unsafe {
std::slice::from_raw_parts(slice_pointer.as_ptr(), range_size as usize)
};
let mut unpadded =
Vec::with_capacity((unpadded_bytes_per_row * texture.size.height) as _);
for i in 0..texture.size.height {
unpadded.extend_from_slice(
&slice[((i * padded_bytes_per_row) as usize)
..(((i + 1) * padded_bytes_per_row) as usize)]
[..(unpadded_bytes_per_row as usize)],
);
}
unpadded
};
let color_type = match texture.format {
GPUTextureFormat::Rgba8unorm => ExtendedColorType::Rgba8,
GPUTextureFormat::Rgba8unormSrgb => ExtendedColorType::Rgba8,
GPUTextureFormat::Rgba8snorm => ExtendedColorType::Rgba8,
GPUTextureFormat::Rgba8uint => ExtendedColorType::Rgba8,
GPUTextureFormat::Rgba8sint => ExtendedColorType::Rgba8,
GPUTextureFormat::Bgra8unorm => ExtendedColorType::Bgra8,
GPUTextureFormat::Bgra8unormSrgb => ExtendedColorType::Bgra8,
_ => {
return Err(JsErrorBox::type_error(format!(
"Unsupported texture format '{}'",
texture.format.as_str()
)));
}
};
let mut out: Vec<u8> = vec![];
let img =
deno_runtime::deno_canvas::image::codecs::png::PngEncoder::new(&mut out);
img
.write_image(&data, texture.size.width, texture.size.height, color_type)
.map_err(|e| JsErrorBox::type_error(e.to_string()))?;
texture
.instance
.buffer_unmap(buffer)
.map_err(|e| JsErrorBox::from_err::<GPUError>(e.into()))?;
texture.instance.buffer_drop(buffer);
Ok(deno_runtime::deno_web::forgiving_base64_encode(&out))
}
#[op2]
#[serde]
pub fn op_jupyter_get_buffer(
#[cppgc] buffer: &deno_runtime::deno_webgpu::buffer::GPUBuffer,
) -> Result<Vec<u8>, deno_runtime::deno_webgpu::error::GPUError> {
use deno_runtime::deno_webgpu::*;
let index = buffer.instance.buffer_map_async(
buffer.id,
0,
None,
wgpu_core::resource::BufferMapOperation {
host: wgpu_core::device::HostMap::Read,
callback: None,
},
)?;
buffer
.instance
.device_poll(
buffer.device,
wgpu_types::PollType::Wait {
submission_index: Some(index),
timeout: None,
},
)
.unwrap();
let (slice_pointer, range_size) = buffer
.instance
.buffer_get_mapped_range(buffer.id, 0, None)?;
let data = {
// SAFETY: creating a slice from pointer and length provided by wgpu and
// then dropping it before unmapping
let slice = unsafe {
std::slice::from_raw_parts(slice_pointer.as_ptr(), range_size as usize)
};
slice.to_vec()
};
buffer.instance.buffer_unmap(buffer.id)?;
Ok(data)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/args/flags.rs | cli/args/flags.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::collections::HashSet;
use std::env;
use std::ffi::OsString;
use std::net::SocketAddr;
use std::num::NonZeroU8;
use std::num::NonZeroU32;
use std::num::NonZeroUsize;
use std::path::Path;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use std::sync::LazyLock;
use clap::Arg;
use clap::ArgAction;
use clap::ArgMatches;
use clap::ColorChoice;
use clap::Command;
use clap::ValueHint;
use clap::builder::FalseyValueParser;
use clap::builder::styling::AnsiColor;
use clap::error::ErrorKind;
use clap::value_parser;
use clap_complete::CompletionCandidate;
use clap_complete::engine::SubcommandCandidates;
use clap_complete::env::EnvCompleter;
use clap_complete::env::Shells;
use color_print::cstr;
use deno_bundle_runtime::BundleFormat;
use deno_bundle_runtime::BundlePlatform;
use deno_bundle_runtime::PackageHandling;
use deno_bundle_runtime::SourceMapType;
use deno_config::deno_json::NewestDependencyDate;
use deno_config::deno_json::NodeModulesDirMode;
use deno_config::glob::FilePatterns;
use deno_config::glob::PathOrPatternSet;
use deno_core::anyhow::Context;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::serde_json;
use deno_core::url::Url;
use deno_graph::GraphKind;
use deno_lib::args::CaData;
use deno_lib::args::UnstableConfig;
use deno_lib::version::DENO_VERSION_INFO;
use deno_npm::NpmSystemInfo;
use deno_npm_installer::PackagesAllowedScripts;
use deno_path_util::normalize_path;
use deno_path_util::resolve_url_or_path;
use deno_path_util::url_to_file_path;
use deno_runtime::UnstableFeatureKind;
use deno_runtime::deno_permissions::SysDescriptor;
use deno_semver::jsr::JsrDepPackageReq;
use deno_semver::package::PackageKind;
use deno_telemetry::OtelConfig;
use deno_telemetry::OtelConsoleConfig;
use deno_telemetry::OtelPropagators;
use log::Level;
use log::debug;
use serde::Deserialize;
use serde::Serialize;
use super::flags_net;
use crate::util::fs::canonicalize_path;
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub enum ConfigFlag {
#[default]
Discover,
Path(String),
Disabled,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct FileFlags {
pub ignore: Vec<String>,
pub include: Vec<String>,
}
impl FileFlags {
pub fn as_file_patterns(
&self,
base: &Path,
) -> Result<FilePatterns, AnyError> {
Ok(FilePatterns {
include: if self.include.is_empty() {
None
} else {
Some(PathOrPatternSet::from_include_relative_path_or_patterns(
base,
&self.include,
)?)
},
exclude: PathOrPatternSet::from_exclude_relative_path_or_patterns(
base,
&self.ignore,
)?,
base: base.to_path_buf(),
})
}
}
#[derive(Clone, Debug, Copy, Eq, PartialEq)]
pub enum DefaultRegistry {
Npm,
Jsr,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct AddFlags {
pub packages: Vec<String>,
pub dev: bool,
pub default_registry: Option<DefaultRegistry>,
pub lockfile_only: bool,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct AuditFlags {
pub severity: String,
pub ignore_registry_errors: bool,
pub ignore_unfixable: bool,
pub dev: bool,
pub prod: bool,
pub optional: bool,
pub ignore: Vec<String>,
pub socket: bool,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct RemoveFlags {
pub packages: Vec<String>,
pub lockfile_only: bool,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct BenchFlags {
pub files: FileFlags,
pub filter: Option<String>,
pub json: bool,
pub no_run: bool,
pub permit_no_files: bool,
pub watch: Option<WatchFlags>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CacheFlags {
pub files: Vec<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CheckFlags {
pub files: Vec<String>,
pub doc: bool,
pub doc_only: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CompileFlags {
pub source_file: String,
pub output: Option<String>,
pub args: Vec<String>,
pub target: Option<String>,
pub no_terminal: bool,
pub icon: Option<String>,
pub include: Vec<String>,
pub exclude: Vec<String>,
pub eszip: bool,
}
impl CompileFlags {
pub fn resolve_target(&self) -> String {
self
.target
.clone()
.unwrap_or_else(|| env!("TARGET").to_string())
}
}
#[derive(Clone)]
pub enum CompletionsFlags {
Static(Box<[u8]>),
Dynamic(Arc<dyn Fn() -> Result<(), AnyError> + Send + Sync + 'static>),
}
impl PartialEq for CompletionsFlags {
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(Self::Static(l0), Self::Static(r0)) => l0 == r0,
(Self::Dynamic(l0), Self::Dynamic(r0)) => Arc::ptr_eq(l0, r0),
_ => false,
}
}
}
impl Eq for CompletionsFlags {}
impl std::fmt::Debug for CompletionsFlags {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Static(arg0) => f.debug_tuple("Static").field(arg0).finish(),
Self::Dynamic(_) => f.debug_tuple("Dynamic").finish(),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub enum CoverageType {
#[default]
Summary,
Detailed,
Lcov,
Html,
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct CoverageFlags {
pub files: FileFlags,
pub output: Option<String>,
pub include: Vec<String>,
pub exclude: Vec<String>,
pub r#type: CoverageType,
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct DeployFlags {
pub sandbox: bool,
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub enum DocSourceFileFlag {
#[default]
Builtin,
Paths(Vec<String>),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct DocHtmlFlag {
pub name: Option<String>,
pub category_docs_path: Option<String>,
pub symbol_redirect_map_path: Option<String>,
pub default_symbol_map_path: Option<String>,
pub strip_trailing_html: bool,
pub output: String,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct DocFlags {
pub private: bool,
pub json: bool,
pub lint: bool,
pub html: Option<DocHtmlFlag>,
pub source_files: DocSourceFileFlag,
pub filter: Option<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct EvalFlags {
pub print: bool,
pub code: String,
}
#[derive(Clone, Default, Debug, Eq, PartialEq)]
pub struct FmtFlags {
pub check: bool,
pub files: FileFlags,
pub permit_no_files: bool,
pub use_tabs: Option<bool>,
pub line_width: Option<NonZeroU32>,
pub indent_width: Option<NonZeroU8>,
pub single_quote: Option<bool>,
pub prose_wrap: Option<String>,
pub no_semicolons: Option<bool>,
pub watch: Option<WatchFlags>,
pub unstable_component: bool,
pub unstable_sql: bool,
}
impl FmtFlags {
pub fn is_stdin(&self) -> bool {
let args = &self.files.include;
args.len() == 1 && args[0] == "-"
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InitFlags {
pub package: Option<String>,
pub package_args: Vec<String>,
pub dir: Option<String>,
pub lib: bool,
pub serve: bool,
pub empty: bool,
pub yes: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InfoFlags {
pub json: bool,
pub file: Option<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InstallFlagsGlobal {
pub module_urls: Vec<String>,
pub args: Vec<String>,
pub name: Option<String>,
pub root: Option<String>,
pub force: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum InstallFlags {
Local(InstallFlagsLocal),
Global(InstallFlagsGlobal),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum InstallFlagsLocal {
Add(AddFlags),
TopLevel(InstallTopLevelFlags),
Entrypoints(InstallEntrypointsFlags),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InstallTopLevelFlags {
pub lockfile_only: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct InstallEntrypointsFlags {
pub entrypoints: Vec<String>,
pub lockfile_only: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct JSONReferenceFlags {
pub json: deno_core::serde_json::Value,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct JupyterFlags {
pub install: bool,
pub name: Option<String>,
pub display: Option<String>,
pub kernel: bool,
pub conn_file: Option<String>,
pub force: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct UninstallFlagsGlobal {
pub name: String,
pub root: Option<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum UninstallKind {
Local(RemoveFlags),
Global(UninstallFlagsGlobal),
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct UninstallFlags {
pub kind: UninstallKind,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct LintFlags {
pub files: FileFlags,
pub rules: bool,
pub fix: bool,
pub maybe_rules_tags: Option<Vec<String>>,
pub maybe_rules_include: Option<Vec<String>>,
pub maybe_rules_exclude: Option<Vec<String>>,
pub permit_no_files: bool,
pub json: bool,
pub compact: bool,
pub watch: Option<WatchFlags>,
}
impl LintFlags {
pub fn is_stdin(&self) -> bool {
let args = &self.files.include;
args.len() == 1 && args[0] == "-"
}
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct ReplFlags {
pub eval_files: Option<Vec<String>>,
pub eval: Option<String>,
pub is_default_command: bool,
pub json: bool,
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct RunFlags {
pub script: String,
pub watch: Option<WatchFlagsWithPaths>,
pub bare: bool,
pub coverage_dir: Option<String>,
pub print_task_list: bool,
}
impl RunFlags {
#[cfg(test)]
pub fn new_default(script: String) -> Self {
Self {
script,
watch: None,
bare: false,
coverage_dir: None,
print_task_list: false,
}
}
pub fn is_stdin(&self) -> bool {
self.script == "-"
}
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub enum DenoXShimName {
#[default]
Dx,
Denox,
Dnx,
Other(String),
}
impl DenoXShimName {
pub fn name(&self) -> &str {
match self {
Self::Dx => "dx",
Self::Denox => "denox",
Self::Dnx => "dnx",
Self::Other(name) => name,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum XFlagsKind {
InstallAlias(DenoXShimName),
Command(XCommandFlags),
Print,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct XCommandFlags {
pub yes: bool,
pub command: String,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct XFlags {
pub kind: XFlagsKind,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct ServeFlags {
pub script: String,
pub watch: Option<WatchFlagsWithPaths>,
pub port: u16,
pub host: String,
pub parallel: bool,
pub open_site: bool,
}
impl ServeFlags {
#[cfg(test)]
pub fn new_default(script: String, port: u16, host: &str) -> Self {
Self {
script,
watch: None,
port,
host: host.to_owned(),
parallel: false,
open_site: false,
}
}
}
pub enum WatchFlagsRef<'a> {
Watch(&'a WatchFlags),
WithPaths(&'a WatchFlagsWithPaths),
}
#[derive(Clone, Default, Debug, Eq, PartialEq)]
pub struct WatchFlags {
pub hmr: bool,
pub no_clear_screen: bool,
pub exclude: Vec<String>,
}
#[derive(Clone, Default, Debug, Eq, PartialEq)]
pub struct WatchFlagsWithPaths {
pub hmr: bool,
pub paths: Vec<String>,
pub no_clear_screen: bool,
pub exclude: Vec<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct TaskFlags {
pub cwd: Option<String>,
pub task: Option<String>,
pub is_run: bool,
pub recursive: bool,
pub filter: Option<String>,
pub eval: bool,
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum TestReporterConfig {
#[default]
Pretty,
Dot,
Junit,
Tap,
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct TestFlags {
pub doc: bool,
pub no_run: bool,
pub coverage_dir: Option<String>,
pub coverage_raw_data_only: bool,
pub clean: bool,
pub fail_fast: Option<NonZeroUsize>,
pub files: FileFlags,
pub parallel: bool,
pub permit_no_files: bool,
pub filter: Option<String>,
pub shuffle: Option<u64>,
pub trace_leaks: bool,
pub watch: Option<WatchFlagsWithPaths>,
pub reporter: TestReporterConfig,
pub junit_path: Option<String>,
pub hide_stacktraces: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct UpgradeFlags {
pub dry_run: bool,
pub force: bool,
pub release_candidate: bool,
pub canary: bool,
pub version: Option<String>,
pub output: Option<String>,
pub version_or_hash_or_channel: Option<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PublishFlags {
pub token: Option<String>,
pub dry_run: bool,
pub allow_slow_types: bool,
pub allow_dirty: bool,
pub no_provenance: bool,
pub set_version: Option<String>,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HelpFlags {
pub help: clap::builder::StyledStr,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct CleanFlags {
pub except_paths: Vec<String>,
pub dry_run: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct BundleFlags {
pub entrypoints: Vec<String>,
pub output_path: Option<String>,
pub output_dir: Option<String>,
pub external: Vec<String>,
pub format: BundleFormat,
pub minify: bool,
pub code_splitting: bool,
pub inline_imports: bool,
pub packages: PackageHandling,
pub sourcemap: Option<SourceMapType>,
pub platform: BundlePlatform,
pub watch: bool,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum DenoSubcommand {
Add(AddFlags),
Audit(AuditFlags),
ApproveScripts(ApproveScriptsFlags),
Remove(RemoveFlags),
Bench(BenchFlags),
Bundle(BundleFlags),
Cache(CacheFlags),
Check(CheckFlags),
Clean(CleanFlags),
Compile(CompileFlags),
Completions(CompletionsFlags),
Coverage(CoverageFlags),
Deploy(DeployFlags),
Doc(DocFlags),
Eval(EvalFlags),
Fmt(FmtFlags),
Init(InitFlags),
Info(InfoFlags),
Install(InstallFlags),
JSONReference(JSONReferenceFlags),
Jupyter(JupyterFlags),
Uninstall(UninstallFlags),
Lsp,
Lint(LintFlags),
Repl(ReplFlags),
Run(RunFlags),
Serve(ServeFlags),
Task(TaskFlags),
Test(TestFlags),
Outdated(OutdatedFlags),
Types,
Upgrade(UpgradeFlags),
Vendor,
Publish(PublishFlags),
Help(HelpFlags),
X(XFlags),
}
impl DenoSubcommand {
pub fn watch_flags(&self) -> Option<WatchFlagsRef<'_>> {
match self {
Self::Run(RunFlags {
watch: Some(flags), ..
})
| Self::Test(TestFlags {
watch: Some(flags), ..
}) => Some(WatchFlagsRef::WithPaths(flags)),
Self::Bench(BenchFlags {
watch: Some(flags), ..
})
| Self::Lint(LintFlags {
watch: Some(flags), ..
})
| Self::Fmt(FmtFlags {
watch: Some(flags), ..
}) => Some(WatchFlagsRef::Watch(flags)),
_ => None,
}
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum OutdatedKind {
Update {
latest: bool,
interactive: bool,
lockfile_only: bool,
},
PrintOutdated {
compatible: bool,
},
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct OutdatedFlags {
pub filters: Vec<String>,
pub recursive: bool,
pub kind: OutdatedKind,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct ApproveScriptsFlags {
pub lockfile_only: bool,
pub packages: Vec<String>,
}
impl DenoSubcommand {
pub fn is_run(&self) -> bool {
matches!(self, Self::Run(_))
}
// Returns `true` if the subcommand depends on testing infrastructure.
pub fn needs_test(&self) -> bool {
matches!(
self,
Self::Test(_)
| Self::Jupyter(_)
| Self::Repl(_)
| Self::Bench(_)
| Self::Lint(_)
| Self::Lsp
)
}
pub fn npm_system_info(&self) -> NpmSystemInfo {
match self {
DenoSubcommand::Compile(CompileFlags {
target: Some(target),
..
}) => {
// the values of NpmSystemInfo align with the possible values for the
// `arch` and `platform` fields of Node.js' `process` global:
// https://nodejs.org/api/process.html
match target.as_str() {
"aarch64-apple-darwin" => NpmSystemInfo {
os: "darwin".into(),
cpu: "arm64".into(),
},
"aarch64-unknown-linux-gnu" => NpmSystemInfo {
os: "linux".into(),
cpu: "arm64".into(),
},
"x86_64-apple-darwin" => NpmSystemInfo {
os: "darwin".into(),
cpu: "x64".into(),
},
"x86_64-unknown-linux-gnu" => NpmSystemInfo {
os: "linux".into(),
cpu: "x64".into(),
},
"x86_64-pc-windows-msvc" => NpmSystemInfo {
os: "win32".into(),
cpu: "x64".into(),
},
value => {
log::warn!(
concat!(
"Not implemented npm system info for target '{}'. Using current ",
"system default. This may impact architecture specific dependencies."
),
value,
);
NpmSystemInfo::default()
}
}
}
_ => {
let arch = std::env::var_os("DENO_INSTALL_ARCH");
if let Some(var) = arch.as_ref().and_then(|s| s.to_str()) {
NpmSystemInfo::from_rust(std::env::consts::OS, var)
} else {
NpmSystemInfo::default()
}
}
}
}
}
impl Default for DenoSubcommand {
fn default() -> DenoSubcommand {
DenoSubcommand::Repl(ReplFlags {
eval_files: None,
eval: None,
is_default_command: true,
json: false,
})
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Default)]
pub enum TypeCheckMode {
/// Type-check all modules.
All,
/// Skip type-checking of all modules. The default value for "deno run" and
/// several other subcommands.
#[default]
None,
/// Only type-check local modules. The default value for "deno test" and
/// several other subcommands.
Local,
}
impl TypeCheckMode {
/// Gets if type checking will occur under this mode.
pub fn is_true(&self) -> bool {
match self {
Self::None => false,
Self::Local | Self::All => true,
}
}
/// Gets the corresponding module `GraphKind` that should be created
/// for the current `TypeCheckMode`.
pub fn as_graph_kind(&self) -> GraphKind {
match self.is_true() {
true => GraphKind::All,
false => GraphKind::CodeOnly,
}
}
}
fn minutes_duration_or_date_parser(
s: &str,
) -> Result<NewestDependencyDate, clap::Error> {
deno_config::parse_minutes_duration_or_date(&sys_traits::impls::RealSys, s)
.map_err(|e| clap::Error::raw(clap::error::ErrorKind::InvalidValue, e))
}
fn parse_packages_allowed_scripts(s: &str) -> Result<String, AnyError> {
if !s.starts_with("npm:") {
bail!(
"Invalid package for --allow-scripts: '{}'. An 'npm:' specifier is required",
s
);
} else {
Ok(s.into())
}
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct InternalFlags {
/// Used when the language server is configured with an
/// explicit cache option.
pub cache_path: Option<PathBuf>,
/// Override the path to use for the node_modules directory.
pub root_node_modules_dir_override: Option<PathBuf>,
/// Only reads to the lockfile instead of writing to it.
pub lockfile_skip_write: bool,
}
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct Flags {
pub initial_cwd: Option<PathBuf>,
/// Vector of CLI arguments - these are user script arguments, all Deno
/// specific flags are removed.
pub argv: Vec<String>,
pub subcommand: DenoSubcommand,
pub frozen_lockfile: Option<bool>,
pub ca_stores: Option<Vec<String>>,
pub ca_data: Option<CaData>,
pub cache_blocklist: Vec<String>,
pub cached_only: bool,
pub type_check_mode: TypeCheckMode,
pub config_flag: ConfigFlag,
pub node_modules_dir: Option<NodeModulesDirMode>,
pub vendor: Option<bool>,
pub enable_op_summary_metrics: bool,
pub enable_testing_features: bool,
pub ext: Option<String>,
/// Flags that aren't exposed in the CLI, but are used internally.
pub internal: InternalFlags,
pub ignore: Vec<String>,
pub import_map_path: Option<String>,
pub env_file: Option<Vec<String>>,
pub inspect_brk: Option<SocketAddr>,
pub inspect_wait: Option<SocketAddr>,
pub inspect: Option<SocketAddr>,
pub location: Option<Url>,
pub lock: Option<String>,
pub log_level: Option<Level>,
pub minimum_dependency_age: Option<NewestDependencyDate>,
pub no_remote: bool,
pub no_lock: bool,
pub no_npm: bool,
pub reload: bool,
pub seed: Option<u64>,
pub trace_ops: Option<Vec<String>>,
pub unstable_config: UnstableConfig,
pub unsafely_ignore_certificate_errors: Option<Vec<String>>,
pub v8_flags: Vec<String>,
pub code_cache_enabled: bool,
pub permissions: PermissionFlags,
pub allow_scripts: PackagesAllowedScripts,
pub permission_set: Option<String>,
pub eszip: bool,
pub node_conditions: Vec<String>,
pub preload: Vec<String>,
pub require: Vec<String>,
pub tunnel: bool,
}
#[derive(Clone, Debug, Eq, PartialEq, Default, Serialize, Deserialize)]
pub struct PermissionFlags {
pub allow_all: bool,
pub allow_env: Option<Vec<String>>,
pub deny_env: Option<Vec<String>>,
pub ignore_env: Option<Vec<String>>,
pub allow_ffi: Option<Vec<String>>,
pub deny_ffi: Option<Vec<String>>,
pub allow_net: Option<Vec<String>>,
pub deny_net: Option<Vec<String>>,
pub allow_read: Option<Vec<String>>,
pub deny_read: Option<Vec<String>>,
pub ignore_read: Option<Vec<String>>,
pub allow_run: Option<Vec<String>>,
pub deny_run: Option<Vec<String>>,
pub allow_sys: Option<Vec<String>>,
pub deny_sys: Option<Vec<String>>,
pub allow_write: Option<Vec<String>>,
pub deny_write: Option<Vec<String>>,
pub no_prompt: bool,
pub allow_import: Option<Vec<String>>,
pub deny_import: Option<Vec<String>>,
}
impl PermissionFlags {
pub fn has_permission(&self) -> bool {
self.allow_all
|| self.allow_env.is_some()
|| self.deny_env.is_some()
|| self.ignore_env.is_some()
|| self.allow_ffi.is_some()
|| self.deny_ffi.is_some()
|| self.allow_net.is_some()
|| self.deny_net.is_some()
|| self.allow_read.is_some()
|| self.deny_read.is_some()
|| self.ignore_read.is_some()
|| self.allow_run.is_some()
|| self.deny_run.is_some()
|| self.allow_sys.is_some()
|| self.deny_sys.is_some()
|| self.allow_write.is_some()
|| self.deny_write.is_some()
|| self.allow_import.is_some()
|| self.deny_import.is_some()
}
}
fn join_paths(allowlist: &[String], d: &str) -> String {
allowlist
.iter()
.map(|path| path.to_string())
.collect::<Vec<String>>()
.join(d)
}
impl Flags {
/// Return list of permission arguments that are equivalent
/// to the ones used to create `self`.
pub fn to_permission_args(&self) -> Vec<String> {
let mut args = vec![];
if self.permissions.allow_all {
args.push("--allow-all".to_string());
return args;
}
match &self.permissions.allow_read {
Some(read_allowlist) if read_allowlist.is_empty() => {
args.push("--allow-read".to_string());
}
Some(read_allowlist) => {
let s = format!("--allow-read={}", join_paths(read_allowlist, ","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_read {
Some(read_denylist) if read_denylist.is_empty() => {
args.push("--deny-read".to_string());
}
Some(read_denylist) => {
let s = format!("--deny-read={}", join_paths(read_denylist, ","));
args.push(s);
}
_ => {}
}
match &self.permissions.allow_write {
Some(write_allowlist) if write_allowlist.is_empty() => {
args.push("--allow-write".to_string());
}
Some(write_allowlist) => {
let s = format!("--allow-write={}", join_paths(write_allowlist, ","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_write {
Some(write_denylist) if write_denylist.is_empty() => {
args.push("--deny-write".to_string());
}
Some(write_denylist) => {
let s = format!("--deny-write={}", join_paths(write_denylist, ","));
args.push(s);
}
_ => {}
}
match &self.permissions.allow_net {
Some(net_allowlist) if net_allowlist.is_empty() => {
args.push("--allow-net".to_string());
}
Some(net_allowlist) => {
let s = format!("--allow-net={}", net_allowlist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_net {
Some(net_denylist) if net_denylist.is_empty() => {
args.push("--deny-net".to_string());
}
Some(net_denylist) => {
let s = format!("--deny-net={}", net_denylist.join(","));
args.push(s);
}
_ => {}
}
match &self.unsafely_ignore_certificate_errors {
Some(ic_allowlist) if ic_allowlist.is_empty() => {
args.push("--unsafely-ignore-certificate-errors".to_string());
}
Some(ic_allowlist) => {
let s = format!(
"--unsafely-ignore-certificate-errors={}",
ic_allowlist.join(",")
);
args.push(s);
}
_ => {}
}
match &self.permissions.allow_env {
Some(env_allowlist) if env_allowlist.is_empty() => {
args.push("--allow-env".to_string());
}
Some(env_allowlist) => {
let s = format!("--allow-env={}", env_allowlist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_env {
Some(env_denylist) if env_denylist.is_empty() => {
args.push("--deny-env".to_string());
}
Some(env_denylist) => {
let s = format!("--deny-env={}", env_denylist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.ignore_env {
Some(ignorelist) if ignorelist.is_empty() => {
args.push("--ignore-env".to_string());
}
Some(ignorelist) => {
let s = format!("--ignore-env={}", ignorelist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.ignore_read {
Some(ignorelist) if ignorelist.is_empty() => {
args.push("--ignore-read".to_string());
}
Some(ignorelist) => {
let s = format!("--ignore-read={}", ignorelist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.allow_run {
Some(run_allowlist) if run_allowlist.is_empty() => {
args.push("--allow-run".to_string());
}
Some(run_allowlist) => {
let s = format!("--allow-run={}", run_allowlist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_run {
Some(run_denylist) if run_denylist.is_empty() => {
args.push("--deny-run".to_string());
}
Some(run_denylist) => {
let s = format!("--deny-run={}", run_denylist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.allow_sys {
Some(sys_allowlist) if sys_allowlist.is_empty() => {
args.push("--allow-sys".to_string());
}
Some(sys_allowlist) => {
let s = format!("--allow-sys={}", sys_allowlist.join(","));
args.push(s)
}
_ => {}
}
match &self.permissions.deny_sys {
Some(sys_denylist) if sys_denylist.is_empty() => {
args.push("--deny-sys".to_string());
}
Some(sys_denylist) => {
let s = format!("--deny-sys={}", sys_denylist.join(","));
args.push(s)
}
_ => {}
}
match &self.permissions.allow_ffi {
Some(ffi_allowlist) if ffi_allowlist.is_empty() => {
args.push("--allow-ffi".to_string());
}
Some(ffi_allowlist) => {
let s = format!("--allow-ffi={}", join_paths(ffi_allowlist, ","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_ffi {
Some(ffi_denylist) if ffi_denylist.is_empty() => {
args.push("--deny-ffi".to_string());
}
Some(ffi_denylist) => {
let s = format!("--deny-ffi={}", join_paths(ffi_denylist, ","));
args.push(s);
}
_ => {}
}
match &self.permissions.allow_import {
Some(allowlist) if allowlist.is_empty() => {
args.push("--allow-import".to_string());
}
Some(allowlist) => {
let s = format!("--allow-import={}", allowlist.join(","));
args.push(s);
}
_ => {}
}
match &self.permissions.deny_import {
Some(denylist) if denylist.is_empty() => {
args.push("--deny-import".to_string());
}
Some(denylist) => {
let s = format!("--deny-import={}", denylist.join(","));
args.push(s);
}
_ => {}
}
args
}
pub fn no_legacy_abort(&self) -> bool {
self
.unstable_config
.features
.contains(&String::from("no-legacy-abort"))
}
pub fn otel_config(&self) -> OtelConfig {
let otel_var = |name| match std::env::var(name) {
Ok(s) if s.eq_ignore_ascii_case("true") => Some(true),
Ok(s) if s.eq_ignore_ascii_case("false") => Some(false),
Ok(_) => {
log::warn!(
"'{name}' env var value not recognized, only 'true' and 'false' are accepted"
);
None
}
Err(_) => None,
};
let disabled = otel_var("OTEL_SDK_DISABLED").unwrap_or(false);
let default = !disabled && otel_var("OTEL_DENO").unwrap_or(false);
let propagators = if default {
if let Ok(propagators) = std::env::var("OTEL_PROPAGATORS") {
propagators
.split(',')
.filter_map(|p| match p.trim() {
"tracecontext" => Some(OtelPropagators::TraceContext),
"baggage" => Some(OtelPropagators::Baggage),
_ => None,
})
.collect()
} else {
HashSet::from([OtelPropagators::TraceContext, OtelPropagators::Baggage])
}
} else {
HashSet::default()
};
OtelConfig {
tracing_enabled: !disabled
&& otel_var("OTEL_DENO_TRACING").unwrap_or(default),
metrics_enabled: !disabled
&& otel_var("OTEL_DENO_METRICS").unwrap_or(default),
propagators,
console: match std::env::var("OTEL_DENO_CONSOLE").as_deref() {
Ok(_) if disabled => OtelConsoleConfig::Ignore,
Ok("ignore") => OtelConsoleConfig::Ignore,
Ok("capture") => OtelConsoleConfig::Capture,
Ok("replace") => OtelConsoleConfig::Replace,
res => {
if res.is_ok() {
log::warn!("'OTEL_DENO_CONSOLE' env var value not recognized, only 'ignore', 'capture', or 'replace' are accepted");
}
if default {
OtelConsoleConfig::Capture
} else {
OtelConsoleConfig::Ignore
}
}
},
deterministic_prefix: std::env::var("DENO_UNSTABLE_OTEL_DETERMINISTIC")
.as_deref()
.map(u8::from_str)
.map(|x| match x {
Ok(x) => Some(x),
Err(_) => {
log::warn!("'DENO_UNSTABLE_OTEL_DETERMINISTIC' env var value not recognized, only integers are accepted");
None
}
})
.ok()
.flatten(),
}
}
/// Extract the paths the config file should be discovered from.
///
/// Returns `None` if the config file should not be auto-discovered.
pub fn config_path_args(&self, current_dir: &Path) -> Option<Vec<PathBuf>> {
fn resolve_multiple_files(
files_or_dirs: &[String],
current_dir: &Path,
) -> Vec<PathBuf> {
let mut seen = HashSet::with_capacity(files_or_dirs.len());
let result = files_or_dirs
.iter()
.filter_map(|p| {
let path = normalize_path(Cow::Owned(current_dir.join(p)));
if seen.insert(path.clone()) {
Some(path.into_owned())
} else {
None
}
})
.collect::<Vec<_>>();
if result.is_empty() {
vec![current_dir.to_path_buf()]
} else {
result
}
}
fn resolve_single_folder_path(
arg: &str,
current_dir: &Path,
maybe_resolve_directory: impl FnOnce(PathBuf) -> Option<PathBuf>,
) -> Option<PathBuf> {
if let Ok(module_specifier) = resolve_url_or_path(arg, current_dir) {
if module_specifier.scheme() == "file"
|| module_specifier.scheme() == "npm"
{
if let Ok(p) = url_to_file_path(&module_specifier) {
maybe_resolve_directory(p)
} else {
Some(current_dir.to_path_buf())
}
} else {
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/args/mod.rs | cli/args/mod.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod flags;
mod flags_net;
use std::borrow::Cow;
use std::collections::HashMap;
use std::env;
use std::net::SocketAddr;
use std::num::NonZeroUsize;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_ast::MediaType;
use deno_ast::ModuleSpecifier;
use deno_cache_dir::file_fetcher::CacheSetting;
pub use deno_config::deno_json::BenchConfig;
pub use deno_config::deno_json::CompilerOptions;
pub use deno_config::deno_json::ConfigFile;
use deno_config::deno_json::FmtConfig;
pub use deno_config::deno_json::FmtOptionsConfig;
pub use deno_config::deno_json::LintRulesConfig;
use deno_config::deno_json::NodeModulesDirMode;
use deno_config::deno_json::PermissionConfigValue;
use deno_config::deno_json::PermissionsObjectWithBase;
pub use deno_config::deno_json::ProseWrap;
use deno_config::deno_json::TestConfig;
pub use deno_config::glob::FilePatterns;
pub use deno_config::workspace::TsTypeLib;
use deno_config::workspace::Workspace;
use deno_config::workspace::WorkspaceDirLintConfig;
use deno_config::workspace::WorkspaceDirectory;
use deno_config::workspace::WorkspaceDirectoryRc;
use deno_config::workspace::WorkspaceLintConfig;
use deno_core::anyhow::bail;
use deno_core::error::AnyError;
use deno_core::url::Url;
use deno_graph::GraphKind;
use deno_lib::args::CaData;
use deno_lib::args::has_flag_env_var;
use deno_lib::args::npm_pkg_req_ref_to_binary_command;
use deno_lib::args::npm_process_state;
use deno_lib::version::DENO_VERSION_INFO;
use deno_lib::worker::StorageKeyResolver;
use deno_npm::NpmSystemInfo;
use deno_npm_installer::LifecycleScriptsConfig;
use deno_npm_installer::graph::NpmCachingStrategy;
use deno_path_util::resolve_url_or_path;
use deno_resolver::factory::resolve_jsr_url;
use deno_runtime::deno_node::ops::ipc::ChildIpcSerialization;
use deno_runtime::deno_permissions::AllowRunDescriptor;
use deno_runtime::deno_permissions::PathDescriptor;
use deno_runtime::deno_permissions::PermissionsOptions;
use deno_runtime::inspector_server::InspectorServer;
use deno_semver::StackString;
use deno_semver::npm::NpmPackageReqReference;
use deno_telemetry::OtelConfig;
use deno_terminal::colors;
pub use flags::*;
use once_cell::sync::Lazy;
use thiserror::Error;
use crate::sys::CliSys;
pub type CliLockfile = deno_resolver::lockfile::LockfileLock<CliSys>;
pub fn jsr_url() -> &'static Url {
static JSR_URL: Lazy<Url> = Lazy::new(|| resolve_jsr_url(&CliSys::default()));
&JSR_URL
}
pub fn jsr_api_url() -> &'static Url {
static JSR_API_URL: Lazy<Url> = Lazy::new(|| {
let mut jsr_api_url = jsr_url().clone();
jsr_api_url.set_path("api/");
jsr_api_url
});
&JSR_API_URL
}
pub struct WorkspaceBenchOptions {
pub filter: Option<String>,
pub json: bool,
pub no_run: bool,
pub permit_no_files: bool,
}
impl WorkspaceBenchOptions {
pub fn resolve(bench_flags: &BenchFlags) -> Self {
Self {
filter: bench_flags.filter.clone(),
json: bench_flags.json,
no_run: bench_flags.no_run,
permit_no_files: bench_flags.permit_no_files,
}
}
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct BenchOptions {
pub files: FilePatterns,
}
impl BenchOptions {
pub fn resolve(bench_config: BenchConfig, _bench_flags: &BenchFlags) -> Self {
// this is the same, but keeping the same pattern as everywhere else for the future
Self {
files: bench_config.files,
}
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
pub struct UnstableFmtOptions {
pub component: bool,
pub sql: bool,
}
#[derive(Clone, Debug)]
pub struct FmtOptions {
pub options: FmtOptionsConfig,
pub unstable: UnstableFmtOptions,
pub files: FilePatterns,
}
impl Default for FmtOptions {
fn default() -> Self {
Self::new_with_base(PathBuf::from("/"))
}
}
impl FmtOptions {
pub fn new_with_base(base: PathBuf) -> Self {
Self {
options: FmtOptionsConfig::default(),
unstable: Default::default(),
files: FilePatterns::new_with_base(base),
}
}
pub fn resolve(
fmt_config: FmtConfig,
unstable: UnstableFmtOptions,
fmt_flags: &FmtFlags,
) -> Self {
Self {
options: resolve_fmt_options(fmt_flags, fmt_config.options),
unstable: UnstableFmtOptions {
component: unstable.component || fmt_flags.unstable_component,
sql: unstable.sql || fmt_flags.unstable_sql,
},
files: fmt_config.files,
}
}
}
fn resolve_fmt_options(
fmt_flags: &FmtFlags,
mut options: FmtOptionsConfig,
) -> FmtOptionsConfig {
if let Some(use_tabs) = fmt_flags.use_tabs {
options.use_tabs = Some(use_tabs);
}
if let Some(line_width) = fmt_flags.line_width {
options.line_width = Some(line_width.get());
}
if let Some(indent_width) = fmt_flags.indent_width {
options.indent_width = Some(indent_width.get());
}
if let Some(single_quote) = fmt_flags.single_quote {
options.single_quote = Some(single_quote);
}
if let Some(prose_wrap) = &fmt_flags.prose_wrap {
options.prose_wrap = Some(match prose_wrap.as_str() {
"always" => ProseWrap::Always,
"never" => ProseWrap::Never,
"preserve" => ProseWrap::Preserve,
// validators in `flags.rs` makes other values unreachable
_ => unreachable!(),
});
}
if let Some(no_semis) = &fmt_flags.no_semicolons {
options.semi_colons = Some(!no_semis);
}
options
}
#[derive(Clone, Debug)]
pub struct WorkspaceTestOptions {
pub doc: bool,
pub no_run: bool,
pub fail_fast: Option<NonZeroUsize>,
pub permit_no_files: bool,
pub filter: Option<String>,
pub shuffle: Option<u64>,
pub concurrent_jobs: NonZeroUsize,
pub trace_leaks: bool,
pub reporter: TestReporterConfig,
pub junit_path: Option<String>,
pub hide_stacktraces: bool,
}
impl WorkspaceTestOptions {
pub fn resolve(test_flags: &TestFlags) -> Self {
Self {
permit_no_files: test_flags.permit_no_files,
concurrent_jobs: parallelism_count(test_flags.parallel),
doc: test_flags.doc,
fail_fast: test_flags.fail_fast,
filter: test_flags.filter.clone(),
no_run: test_flags.no_run,
shuffle: test_flags.shuffle,
trace_leaks: test_flags.trace_leaks,
reporter: test_flags.reporter,
junit_path: test_flags.junit_path.clone(),
hide_stacktraces: test_flags.hide_stacktraces,
}
}
}
#[derive(Debug, Clone)]
pub struct TestOptions {
pub files: FilePatterns,
}
impl TestOptions {
pub fn resolve(test_config: TestConfig, _test_flags: &TestFlags) -> Self {
// this is the same, but keeping the same pattern as everywhere else for the future
Self {
files: test_config.files,
}
}
}
#[derive(Clone, Copy, Default, Debug)]
pub enum LintReporterKind {
#[default]
Pretty,
Json,
Compact,
}
#[derive(Clone, Debug)]
pub struct WorkspaceLintOptions {
pub reporter_kind: LintReporterKind,
}
impl WorkspaceLintOptions {
pub fn resolve(
lint_config: &WorkspaceLintConfig,
lint_flags: &LintFlags,
) -> Result<Self, AnyError> {
let mut maybe_reporter_kind = if lint_flags.json {
Some(LintReporterKind::Json)
} else if lint_flags.compact {
Some(LintReporterKind::Compact)
} else {
None
};
if maybe_reporter_kind.is_none() {
// Flag not set, so try to get lint reporter from the config file.
maybe_reporter_kind = match lint_config.report.as_deref() {
Some("json") => Some(LintReporterKind::Json),
Some("compact") => Some(LintReporterKind::Compact),
Some("pretty") => Some(LintReporterKind::Pretty),
Some(_) => {
bail!("Invalid lint report type in config file")
}
None => None,
}
}
Ok(Self {
reporter_kind: maybe_reporter_kind.unwrap_or_default(),
})
}
}
#[derive(Clone, Debug)]
pub struct LintOptions {
pub rules: LintRulesConfig,
pub files: FilePatterns,
pub fix: bool,
pub plugins: Vec<Url>,
}
impl Default for LintOptions {
fn default() -> Self {
Self::new_with_base(PathBuf::from("/"))
}
}
impl LintOptions {
pub fn new_with_base(base: PathBuf) -> Self {
Self {
rules: Default::default(),
files: FilePatterns::new_with_base(base),
fix: false,
plugins: vec![],
}
}
pub fn resolve(
lint_config: WorkspaceDirLintConfig,
lint_flags: &LintFlags,
) -> Result<Self, AnyError> {
let rules = resolve_lint_rules_options(
lint_config.rules,
lint_flags.maybe_rules_tags.clone(),
lint_flags.maybe_rules_include.clone(),
lint_flags.maybe_rules_exclude.clone(),
);
let mut plugins = lint_config.plugins;
plugins.sort_unstable();
Ok(Self {
files: lint_config.files,
rules,
fix: lint_flags.fix,
plugins,
})
}
}
fn resolve_lint_rules_options(
config_rules: LintRulesConfig,
mut maybe_rules_tags: Option<Vec<String>>,
mut maybe_rules_include: Option<Vec<String>>,
mut maybe_rules_exclude: Option<Vec<String>>,
) -> LintRulesConfig {
// Try to get configured rules. CLI flags take precedence
// over config file, i.e. if there's `rules.include` in config file
// and `--rules-include` CLI flag, only the flag value is taken into account.
if maybe_rules_include.is_none() {
maybe_rules_include = config_rules.include;
}
if maybe_rules_exclude.is_none() {
maybe_rules_exclude = config_rules.exclude;
}
if maybe_rules_tags.is_none() {
maybe_rules_tags = config_rules.tags;
}
LintRulesConfig {
exclude: maybe_rules_exclude,
include: maybe_rules_include,
tags: maybe_rules_tags,
}
}
pub struct WorkspaceMainModuleResolver {
workspace_resolver: Arc<deno_resolver::workspace::WorkspaceResolver<CliSys>>,
node_resolver: Arc<crate::node::CliNodeResolver>,
}
impl WorkspaceMainModuleResolver {
pub fn new(
workspace_resolver: Arc<
deno_resolver::workspace::WorkspaceResolver<CliSys>,
>,
node_resolver: Arc<crate::node::CliNodeResolver>,
) -> Self {
Self {
workspace_resolver,
node_resolver,
}
}
}
impl WorkspaceMainModuleResolver {
fn resolve_main_module(
&self,
specifier: &str,
cwd: &Url,
) -> Result<Url, AnyError> {
let resolution = self.workspace_resolver.resolve(
specifier,
cwd,
deno_resolver::workspace::ResolutionKind::Execution,
)?;
let url = match resolution {
deno_resolver::workspace::MappedResolution::Normal {
specifier, ..
} => specifier,
deno_resolver::workspace::MappedResolution::WorkspaceJsrPackage {
specifier,
..
} => specifier,
deno_resolver::workspace::MappedResolution::WorkspaceNpmPackage {
target_pkg_json,
sub_path,
..
} => self
.node_resolver
.resolve_package_subpath_from_deno_module(
target_pkg_json.clone().dir_path(),
sub_path.as_deref(),
Some(cwd),
node_resolver::ResolutionMode::Import,
node_resolver::NodeResolutionKind::Execution,
)?
.into_url()?,
deno_resolver::workspace::MappedResolution::PackageJson {
sub_path,
dep_result,
alias,
..
} => {
let result = dep_result
.as_ref()
.map_err(|e| deno_core::anyhow::anyhow!("{e}"))?;
match result {
deno_package_json::PackageJsonDepValue::File(file) => {
let cwd_path = deno_path_util::url_to_file_path(cwd)?;
deno_path_util::resolve_path(file, &cwd_path)?
}
deno_package_json::PackageJsonDepValue::Req(package_req) => {
ModuleSpecifier::parse(&format!(
"npm:{}{}",
package_req,
sub_path.map(|s| format!("/{}", s)).unwrap_or_default()
))?
}
deno_package_json::PackageJsonDepValue::Workspace(version_req) => {
let pkg_folder = self
.workspace_resolver
.resolve_workspace_pkg_json_folder_for_pkg_json_dep(
alias,
version_req,
)?;
self
.node_resolver
.resolve_package_subpath_from_deno_module(
pkg_folder,
sub_path.as_deref(),
Some(cwd),
node_resolver::ResolutionMode::Import,
node_resolver::NodeResolutionKind::Execution,
)?
.into_url()?
}
deno_package_json::PackageJsonDepValue::JsrReq(_) => {
return Err(
deno_resolver::DenoResolveErrorKind::UnsupportedPackageJsonJsrReq
.into_box()
.into(),
);
}
}
}
deno_resolver::workspace::MappedResolution::PackageJsonImport {
pkg_json,
} => self
.node_resolver
.resolve_package_import(
specifier,
Some(&node_resolver::UrlOrPathRef::from_url(cwd)),
Some(pkg_json),
node_resolver::ResolutionMode::Import,
node_resolver::NodeResolutionKind::Execution,
)?
.into_url()?,
};
Ok(url)
}
}
/// Holds the resolved options of many sources used by subcommands
/// and provides some helper function for creating common objects.
#[derive(Debug)]
pub struct CliOptions {
// the source of the options is a detail the rest of the
// application need not concern itself with, so keep these private
flags: Arc<Flags>,
initial_cwd: PathBuf,
main_module_cell: std::sync::OnceLock<Result<ModuleSpecifier, AnyError>>,
pub start_dir: Arc<WorkspaceDirectory>,
}
impl CliOptions {
#[allow(clippy::too_many_arguments)]
pub fn new(
flags: Arc<Flags>,
initial_cwd: PathBuf,
start_dir: Arc<WorkspaceDirectory>,
) -> Result<Self, AnyError> {
if let Some(insecure_allowlist) =
flags.unsafely_ignore_certificate_errors.as_ref()
{
let domains = if insecure_allowlist.is_empty() {
"for all hostnames".to_string()
} else {
format!("for: {}", insecure_allowlist.join(", "))
};
let msg =
format!("DANGER: TLS certificate validation is disabled {}", domains);
{
log::error!("{}", colors::yellow(msg));
}
}
Ok(Self {
flags,
initial_cwd,
main_module_cell: std::sync::OnceLock::new(),
start_dir,
})
}
pub fn from_flags(
flags: Arc<Flags>,
initial_cwd: PathBuf,
start_dir: Arc<WorkspaceDirectory>,
) -> Result<Self, AnyError> {
for diagnostic in start_dir.workspace.diagnostics() {
log::warn!("{} {}", colors::yellow("Warning"), diagnostic);
}
log::debug!("Finished config loading.");
Self::new(flags, initial_cwd, start_dir)
}
#[inline(always)]
pub fn initial_cwd(&self) -> &Path {
&self.initial_cwd
}
#[inline(always)]
pub fn workspace(&self) -> &Arc<Workspace> {
&self.start_dir.workspace
}
pub fn graph_kind(&self) -> GraphKind {
match self.sub_command() {
DenoSubcommand::Add(_) => GraphKind::All,
DenoSubcommand::Cache(_) => GraphKind::All,
DenoSubcommand::Check(_) => GraphKind::TypesOnly,
DenoSubcommand::Install(InstallFlags::Local(_)) => GraphKind::All,
_ => self.type_check_mode().as_graph_kind(),
}
}
pub fn ts_type_lib_window(&self) -> TsTypeLib {
TsTypeLib::DenoWindow
}
pub fn ts_type_lib_worker(&self) -> TsTypeLib {
TsTypeLib::DenoWorker
}
pub fn cache_setting(&self) -> CacheSetting {
if self.flags.cached_only {
CacheSetting::Only
} else if !self.flags.cache_blocklist.is_empty() {
CacheSetting::ReloadSome(self.flags.cache_blocklist.clone())
} else if self.flags.reload {
CacheSetting::ReloadAll
} else {
CacheSetting::Use
}
}
pub fn npm_system_info(&self) -> NpmSystemInfo {
self.sub_command().npm_system_info()
}
/// Resolve the specifier for a specified import map.
///
/// This will NOT include the config file if it
/// happens to be an import map.
pub fn resolve_specified_import_map_specifier(
&self,
) -> Result<Option<ModuleSpecifier>, ImportMapSpecifierResolveError> {
resolve_import_map_specifier(
self.flags.import_map_path.as_deref(),
self.workspace().root_deno_json().map(|c| c.as_ref()),
&self.initial_cwd,
)
}
pub fn node_ipc_init(
&self,
) -> Result<Option<(i64, ChildIpcSerialization)>, AnyError> {
let maybe_node_channel_fd = std::env::var("NODE_CHANNEL_FD").ok();
let maybe_node_channel_serialization = if let Ok(serialization) =
std::env::var("NODE_CHANNEL_SERIALIZATION_MODE")
{
Some(serialization.parse::<ChildIpcSerialization>()?)
} else {
None
};
if let Some(node_channel_fd) = maybe_node_channel_fd {
// Remove so that child processes don't inherit this environment variables.
#[allow(clippy::undocumented_unsafe_blocks)]
unsafe {
std::env::remove_var("NODE_CHANNEL_FD");
std::env::remove_var("NODE_CHANNEL_SERIALIZATION_MODE");
}
let node_channel_fd = node_channel_fd.parse::<i64>()?;
Ok(Some((
node_channel_fd,
maybe_node_channel_serialization.unwrap_or(ChildIpcSerialization::Json),
)))
} else {
Ok(None)
}
}
pub fn serve_port(&self) -> Option<u16> {
if let DenoSubcommand::Serve(flags) = self.sub_command() {
Some(flags.port)
} else {
None
}
}
pub fn serve_host(&self) -> Option<String> {
if let DenoSubcommand::Serve(flags) = self.sub_command() {
Some(flags.host.clone())
} else {
None
}
}
pub fn eszip(&self) -> bool {
self.flags.eszip
}
pub fn node_conditions(&self) -> &[String] {
self.flags.node_conditions.as_ref()
}
pub fn otel_config(&self) -> OtelConfig {
self.flags.otel_config()
}
pub fn no_legacy_abort(&self) -> bool {
self.flags.no_legacy_abort()
}
pub fn env_file_name(&self) -> Option<&Vec<String>> {
self.flags.env_file.as_ref()
}
pub fn preload_modules(&self) -> Result<Vec<ModuleSpecifier>, AnyError> {
if self.flags.preload.is_empty() {
return Ok(vec![]);
}
let mut modules = Vec::with_capacity(self.flags.preload.len());
for preload_specifier in self.flags.preload.iter() {
modules.push(resolve_url_or_path(preload_specifier, self.initial_cwd())?);
}
Ok(modules)
}
pub fn require_modules(&self) -> Result<Vec<ModuleSpecifier>, AnyError> {
if self.flags.require.is_empty() {
return Ok(vec![]);
}
let mut require = Vec::with_capacity(self.flags.require.len());
for require_specifier in self.flags.require.iter() {
require.push(resolve_url_or_path(require_specifier, self.initial_cwd())?);
}
Ok(require)
}
fn resolve_main_module_with_resolver_if_bare(
&self,
raw_specifier: &str,
resolver: Option<&WorkspaceMainModuleResolver>,
default_resolve: impl Fn() -> Result<ModuleSpecifier, AnyError>,
) -> Result<ModuleSpecifier, AnyError> {
match resolver {
Some(resolver)
if !raw_specifier.starts_with('.')
&& !Path::new(raw_specifier).is_absolute() =>
{
let cwd = deno_path_util::url_from_directory_path(self.initial_cwd())?;
resolver
.resolve_main_module(raw_specifier, &cwd)
.or_else(|_| default_resolve())
}
_ => default_resolve(),
}
}
pub fn resolve_main_module_with_resolver(
&self,
resolver: Option<&WorkspaceMainModuleResolver>,
) -> Result<&ModuleSpecifier, AnyError> {
self
.main_module_cell
.get_or_init(|| {
Ok(match &self.flags.subcommand {
DenoSubcommand::Compile(compile_flags) => {
resolve_url_or_path(&compile_flags.source_file, self.initial_cwd())?
}
DenoSubcommand::Eval(_) => {
let specifier = format!(
"./$deno$eval.{}",
self.flags.ext.as_deref().unwrap_or("mts")
);
deno_path_util::resolve_path(&specifier, self.initial_cwd())?
}
DenoSubcommand::Repl(_) => deno_path_util::resolve_path(
"./$deno$repl.mts",
self.initial_cwd(),
)?,
DenoSubcommand::Run(run_flags) => {
if run_flags.is_stdin() {
let specifier = format!(
"./$deno$stdin.{}",
self.flags.ext.as_deref().unwrap_or("mts")
);
deno_path_util::resolve_path(&specifier, self.initial_cwd())?
} else {
let default_resolve = || {
let url =
resolve_url_or_path(&run_flags.script, self.initial_cwd())?;
if self.is_node_main()
&& url.scheme() == "file"
&& MediaType::from_specifier(&url) == MediaType::Unknown
{
Ok::<_, AnyError>(
try_resolve_node_binary_main_entrypoint(
&run_flags.script,
self.initial_cwd(),
)?
.unwrap_or(url),
)
} else {
Ok(url)
}
};
self.resolve_main_module_with_resolver_if_bare(
&run_flags.script,
resolver,
default_resolve,
)?
}
}
DenoSubcommand::Serve(run_flags) => self
.resolve_main_module_with_resolver_if_bare(
&run_flags.script,
resolver,
|| {
resolve_url_or_path(&run_flags.script, self.initial_cwd())
.map_err(|e| e.into())
},
)?,
_ => {
bail!("No main module.")
}
})
})
.as_ref()
.map_err(|err| deno_core::anyhow::anyhow!("{}", err))
}
pub fn resolve_main_module(&self) -> Result<&ModuleSpecifier, AnyError> {
self.resolve_main_module_with_resolver(None)
}
pub fn resolve_file_header_overrides(
&self,
) -> HashMap<ModuleSpecifier, HashMap<String, String>> {
let maybe_main_specifier = self.resolve_main_module().ok();
let maybe_content_type = self.flags.ext.as_ref().and_then(|ext| {
let media_type = MediaType::from_filename(&format!("file.{}", ext));
media_type.as_content_type()
});
if let (Some(main_specifier), Some(content_type)) =
(maybe_main_specifier, maybe_content_type)
{
HashMap::from([(
main_specifier.clone(),
HashMap::from([("content-type".to_string(), content_type.to_string())]),
)])
} else {
HashMap::default()
}
}
pub fn resolve_storage_key_resolver(&self) -> StorageKeyResolver {
if let Some(location) = &self.flags.location {
StorageKeyResolver::from_flag(location)
} else if let Some(deno_json) = self.start_dir.member_or_root_deno_json() {
StorageKeyResolver::from_config_file_url(&deno_json.specifier)
} else {
StorageKeyResolver::new_use_main_module()
}
}
// If the main module should be treated as being in an npm package.
// This is triggered via a secret environment variable which is used
// for functionality like child_process.fork. Users should NOT depend
// on this functionality.
pub fn is_node_main(&self) -> bool {
npm_process_state(&CliSys::default()).is_some()
}
/// Gets the explicitly specified NodeModulesDir setting.
///
/// Use `WorkspaceFactory.node_modules_dir_mode()` to get the resolved value.
pub fn specified_node_modules_dir(
&self,
) -> Result<
Option<NodeModulesDirMode>,
deno_config::deno_json::NodeModulesDirParseError,
> {
if let Some(flag) = self.flags.node_modules_dir {
return Ok(Some(flag));
}
self.workspace().node_modules_dir()
}
pub fn vendor_dir_path(&self) -> Option<&PathBuf> {
self.workspace().vendor_dir_path()
}
pub fn resolve_inspector_server(
&self,
) -> Result<Option<InspectorServer>, AnyError> {
let maybe_inspect_host = self
.flags
.inspect
.or(self.flags.inspect_brk)
.or(self.flags.inspect_wait);
let Some(host) = maybe_inspect_host else {
return Ok(None);
};
Ok(Some(InspectorServer::new(
host,
DENO_VERSION_INFO.user_agent,
)?))
}
pub fn resolve_fmt_options_for_members(
&self,
fmt_flags: &FmtFlags,
) -> Result<Vec<(WorkspaceDirectoryRc, FmtOptions)>, AnyError> {
let cli_arg_patterns =
fmt_flags.files.as_file_patterns(self.initial_cwd())?;
let member_configs = self
.workspace()
.resolve_fmt_config_for_members(&cli_arg_patterns)?;
let unstable = self.resolve_config_unstable_fmt_options();
let mut result = Vec::with_capacity(member_configs.len());
for (ctx, config) in member_configs {
let options = FmtOptions::resolve(config, unstable.clone(), fmt_flags);
result.push((ctx, options));
}
Ok(result)
}
pub fn resolve_config_unstable_fmt_options(&self) -> UnstableFmtOptions {
let workspace = self.workspace();
UnstableFmtOptions {
component: workspace.has_unstable("fmt-component"),
sql: workspace.has_unstable("fmt-sql"),
}
}
pub fn resolve_workspace_lint_options(
&self,
lint_flags: &LintFlags,
) -> Result<WorkspaceLintOptions, AnyError> {
let lint_config = self.workspace().to_lint_config()?;
WorkspaceLintOptions::resolve(&lint_config, lint_flags)
}
pub fn resolve_lint_options_for_members(
&self,
lint_flags: &LintFlags,
) -> Result<Vec<(WorkspaceDirectoryRc, LintOptions)>, AnyError> {
let cli_arg_patterns =
lint_flags.files.as_file_patterns(self.initial_cwd())?;
let member_configs = self
.workspace()
.resolve_lint_config_for_members(&cli_arg_patterns)?;
let mut result = Vec::with_capacity(member_configs.len());
for (ctx, config) in member_configs {
let options = LintOptions::resolve(config, lint_flags)?;
result.push((ctx, options));
}
Ok(result)
}
pub fn resolve_workspace_test_options(
&self,
test_flags: &TestFlags,
) -> WorkspaceTestOptions {
WorkspaceTestOptions::resolve(test_flags)
}
pub fn resolve_test_options_for_members(
&self,
test_flags: &TestFlags,
) -> Result<Vec<(WorkspaceDirectoryRc, TestOptions)>, AnyError> {
let cli_arg_patterns =
test_flags.files.as_file_patterns(self.initial_cwd())?;
let workspace_dir_configs = self
.workspace()
.resolve_test_config_for_members(&cli_arg_patterns)?;
let mut result = Vec::with_capacity(workspace_dir_configs.len());
for (member_dir, config) in workspace_dir_configs {
let options = TestOptions::resolve(config, test_flags);
result.push((member_dir, options));
}
Ok(result)
}
pub fn resolve_workspace_bench_options(
&self,
bench_flags: &BenchFlags,
) -> WorkspaceBenchOptions {
WorkspaceBenchOptions::resolve(bench_flags)
}
pub fn resolve_bench_options_for_members(
&self,
bench_flags: &BenchFlags,
) -> Result<Vec<(WorkspaceDirectoryRc, BenchOptions)>, AnyError> {
let cli_arg_patterns =
bench_flags.files.as_file_patterns(self.initial_cwd())?;
let workspace_dir_configs = self
.workspace()
.resolve_bench_config_for_members(&cli_arg_patterns)?;
let mut result = Vec::with_capacity(workspace_dir_configs.len());
for (member_dir, config) in workspace_dir_configs {
let options = BenchOptions::resolve(config, bench_flags);
result.push((member_dir, options));
}
Ok(result)
}
/// Vector of user script CLI arguments.
pub fn argv(&self) -> &Vec<String> {
&self.flags.argv
}
pub fn ca_data(&self) -> &Option<CaData> {
&self.flags.ca_data
}
pub fn ca_stores(&self) -> &Option<Vec<String>> {
&self.flags.ca_stores
}
pub fn coverage_dir(&self) -> Option<PathBuf> {
match &self.flags.subcommand {
DenoSubcommand::Test(test) => test
.coverage_dir
.as_ref()
.map(|dir| self.initial_cwd.join(dir))
.or_else(|| env::var_os("DENO_COVERAGE_DIR").map(PathBuf::from)),
DenoSubcommand::Run(flags) => flags
.coverage_dir
.as_ref()
.map(|dir| self.initial_cwd.join(dir))
.or_else(|| env::var_os("DENO_COVERAGE_DIR").map(PathBuf::from)),
_ => None,
}
}
pub fn enable_op_summary_metrics(&self) -> bool {
self.flags.enable_op_summary_metrics
|| matches!(
self.flags.subcommand,
DenoSubcommand::Test(_)
| DenoSubcommand::Repl(_)
| DenoSubcommand::Jupyter(_)
)
}
pub fn enable_testing_features(&self) -> bool {
self.flags.enable_testing_features
}
pub fn ext_flag(&self) -> &Option<String> {
&self.flags.ext
}
pub fn has_hmr(&self) -> bool {
if let DenoSubcommand::Run(RunFlags {
watch: Some(WatchFlagsWithPaths { hmr, .. }),
..
}) = &self.flags.subcommand
{
*hmr
} else if let DenoSubcommand::Serve(ServeFlags {
watch: Some(WatchFlagsWithPaths { hmr, .. }),
..
}) = &self.flags.subcommand
{
*hmr
} else {
false
}
}
/// If the --inspect or --inspect-brk flags are used.
pub fn is_inspecting(&self) -> bool {
self.flags.inspect.is_some()
|| self.flags.inspect_brk.is_some()
|| self.flags.inspect_wait.is_some()
}
pub fn inspect_brk(&self) -> Option<SocketAddr> {
self.flags.inspect_brk
}
pub fn inspect_wait(&self) -> Option<SocketAddr> {
self.flags.inspect_wait
}
pub fn log_level(&self) -> Option<log::Level> {
self.flags.log_level
}
pub fn is_quiet(&self) -> bool {
self
.log_level()
.map(|l| l == log::Level::Error)
.unwrap_or(false)
}
pub fn location_flag(&self) -> &Option<Url> {
&self.flags.location
}
pub fn no_remote(&self) -> bool {
self.flags.no_remote
}
pub fn permissions_options(&self) -> Result<PermissionsOptions, AnyError> {
self.permissions_options_for_dir(&self.start_dir)
}
pub fn permissions_options_for_dir(
&self,
dir: &WorkspaceDirectory,
) -> Result<PermissionsOptions, AnyError> {
let config_permissions = self.resolve_config_permissions_for_dir(dir)?;
let mut permissions_options = flags_to_permissions_options(
&self.flags.permissions,
config_permissions,
)?;
self.augment_import_permissions(&mut permissions_options);
if let DenoSubcommand::Serve(serve_flags) = &self.flags.subcommand {
augment_permissions_with_serve_flags(
&mut permissions_options,
serve_flags,
)?;
}
Ok(permissions_options)
}
fn resolve_config_permissions_for_dir<'a>(
&self,
dir: &'a WorkspaceDirectory,
) -> Result<Option<&'a PermissionsObjectWithBase>, AnyError> {
let config_permissions = if let Some(name) = &self.flags.permission_set {
if name.is_empty() {
let maybe_subcommand_permissions = match &self.flags.subcommand {
DenoSubcommand::Bench(_) => dir.to_bench_permissions_config()?,
DenoSubcommand::Compile(_) => dir.to_compile_permissions_config()?,
DenoSubcommand::Test(_) => dir.to_test_permissions_config()?,
_ => None,
};
match maybe_subcommand_permissions {
Some(permissions) => Some(permissions),
// do not error when the default set doesn't exist in order
// to allow providing `-P` unconditionally
None => dir.to_permissions_config()?.sets.get("default"),
}
} else {
Some(dir.to_permissions_config()?.get(name)?)
}
} else {
if !self.flags.has_permission() {
let set_config_permission_name = match &self.flags.subcommand {
DenoSubcommand::Bench(_) => dir
.to_bench_permissions_config()?
.filter(|permissions| !permissions.permissions.is_empty())
.map(|permissions| ("Bench", &permissions.base)),
DenoSubcommand::Compile(_) => dir
.to_compile_permissions_config()?
.filter(|permissions| !permissions.permissions.is_empty())
.map(|permissions| ("Compile", &permissions.base)),
DenoSubcommand::Test(_) => dir
.to_test_permissions_config()?
.filter(|permissions| !permissions.permissions.is_empty())
.map(|permissions| ("Test", &permissions.base)),
_ => None,
};
if let Some((name, config_file_url)) = set_config_permission_name {
// prevent people from wasting time wondering why benches/tests are failing
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/cli/args/flags_net.rs | cli/args/flags_net.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::net::IpAddr;
use std::str::FromStr;
use deno_core::url::Url;
use deno_runtime::deno_permissions::NetDescriptor;
#[derive(Debug, PartialEq, Eq)]
pub struct ParsePortError(String);
#[derive(Debug, PartialEq, Eq)]
pub struct BarePort(u16);
impl FromStr for BarePort {
type Err = ParsePortError;
fn from_str(s: &str) -> Result<BarePort, ParsePortError> {
if s.starts_with(':') {
match s.split_at(1).1.parse::<u16>() {
Ok(port) => Ok(BarePort(port)),
Err(e) => Err(ParsePortError(e.to_string())),
}
} else {
Err(ParsePortError(
"Bare Port doesn't start with ':'".to_string(),
))
}
}
}
pub fn validator(host_and_port: &str) -> Result<String, String> {
if Url::parse(&format!("internal://{host_and_port}")).is_ok()
|| host_and_port.parse::<IpAddr>().is_ok()
|| host_and_port.parse::<BarePort>().is_ok()
|| NetDescriptor::parse_for_list(host_and_port).is_ok()
{
Ok(host_and_port.to_string())
} else {
Err(format!("Bad host:port pair: {host_and_port}"))
}
}
/// Expands "bare port" paths (eg. ":8080") into full paths with hosts. It
/// expands to such paths into 3 paths with following hosts: `0.0.0.0:port`,
/// `127.0.0.1:port` and `localhost:port`.
pub fn parse(paths: Vec<String>) -> clap::error::Result<Vec<String>> {
let mut out: Vec<String> = vec![];
for host_and_port in paths.into_iter() {
if let Ok(port) = host_and_port.parse::<BarePort>() {
// we got bare port, let's add default hosts
for host in ["0.0.0.0", "127.0.0.1", "localhost"].iter() {
out.push(format!("{}:{}", host, port.0));
}
} else {
NetDescriptor::parse_for_list(&host_and_port).map_err(|e| {
clap::Error::raw(clap::error::ErrorKind::InvalidValue, e.to_string())
})?;
out.push(host_and_port)
}
}
Ok(out)
}
#[cfg(test)]
mod bare_port_tests {
use super::BarePort;
use super::ParsePortError;
#[test]
fn bare_port_parsed() {
let expected = BarePort(8080);
let actual = ":8080".parse::<BarePort>();
assert_eq!(actual, Ok(expected));
}
#[test]
fn bare_port_parse_error1() {
let expected =
ParsePortError("Bare Port doesn't start with ':'".to_string());
let actual = "8080".parse::<BarePort>();
assert_eq!(actual, Err(expected));
}
#[test]
fn bare_port_parse_error2() {
let actual = ":65536".parse::<BarePort>();
assert!(actual.is_err());
}
#[test]
fn bare_port_parse_error3() {
let actual = ":14u16".parse::<BarePort>();
assert!(actual.is_err());
}
#[test]
fn bare_port_parse_error4() {
let actual = "Deno".parse::<BarePort>();
assert!(actual.is_err());
}
#[test]
fn bare_port_parse_error5() {
let actual = "deno.land:8080".parse::<BarePort>();
assert!(actual.is_err());
}
}
#[cfg(test)]
mod tests {
use super::parse;
// Creates vector of strings, Vec<String>
macro_rules! svec {
($($x:expr),*) => (vec![$($x.to_string()),*]);
}
#[test]
fn parse_net_args_() {
let entries = svec![
"deno.land",
"deno.land:80",
"*.deno.land",
"[::]",
"[::1]",
"127.0.0.1",
"[::1]",
"1.2.3.4:5678",
"0.0.0.0:5678",
"127.0.0.1:5678",
"[::]:5678",
"[::1]:5678",
"localhost:5678",
"[::1]:8080",
"[::]:8000",
"[::1]:8000",
"localhost:8000",
"0.0.0.0:4545",
"127.0.0.1:4545",
"999.0.88.1:80",
"127.0.0.0/24",
"192.168.1.0/24",
"10.0.0.0/8"
];
let expected = svec![
"deno.land",
"deno.land:80",
"*.deno.land",
"[::]",
"[::1]",
"127.0.0.1",
"[::1]",
"1.2.3.4:5678",
"0.0.0.0:5678",
"127.0.0.1:5678",
"[::]:5678",
"[::1]:5678",
"localhost:5678",
"[::1]:8080",
"[::]:8000",
"[::1]:8000",
"localhost:8000",
"0.0.0.0:4545",
"127.0.0.1:4545",
"999.0.88.1:80",
"127.0.0.0/24",
"192.168.1.0/24",
"10.0.0.0/8"
];
let actual = parse(entries).unwrap();
assert_eq!(actual, expected);
}
#[test]
fn parse_net_args_expansion() {
let entries = svec![":8080"];
let expected = svec!["0.0.0.0:8080", "127.0.0.1:8080", "localhost:8080"];
let actual = parse(entries).unwrap();
assert_eq!(actual, expected);
}
#[test]
fn parse_net_args_ipv6() {
let entries = svec!["[::1]", "[::]:5678", "[::1]:5678"];
let expected = svec!["[::1]", "[::]:5678", "[::1]:5678"];
let actual = parse(entries).unwrap();
assert_eq!(actual, expected);
}
#[test]
fn parse_net_args_ipv6_error1() {
let entries = svec![":::"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error2() {
let entries = svec!["::1"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error3() {
let entries = svec!["::"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error4() {
let entries = svec!["::cafe"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error5() {
let entries = svec!["1::1"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error6() {
let entries = svec!["0123:4567:890a:bcde:fg::"];
assert!(parse(entries).is_err());
}
#[test]
fn parse_net_args_ipv6_error7() {
let entries = svec!["[::q]:8080"];
assert!(parse(entries).is_err());
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/bench_util/lib.rs | bench_util/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
mod js_runtime;
mod profiling;
pub use bencher;
pub use js_runtime::*;
pub use profiling::*; // Exports bench_or_profile! macro
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/bench_util/profiling.rs | bench_util/profiling.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use bencher::DynBenchFn;
use bencher::StaticBenchFn;
use bencher::TestDescAndFn;
use bencher::TestOpts;
pub fn is_profiling() -> bool {
std::env::var("PROFILING").is_ok()
}
#[macro_export]
// Tweaked and copied from https://github.com/bluss/bencher/blob/master/macros.rs
macro_rules! bench_or_profile {
($($group_name:path),+) => {
fn main() {
use $crate::bencher::TestOpts;
use $crate::bencher::run_tests_console;
let mut test_opts = TestOpts::default();
// check to see if we should filter:
if let Some(arg) = ::std::env::args().skip(1).find(|arg| *arg != "--bench") {
test_opts.filter = Some(arg);
}
let mut benches = Vec::new();
$(
benches.extend($group_name());
)+
if $crate::is_profiling() {
// Run profiling
$crate::run_profiles(&test_opts, benches);
} else {
// Run benches
run_tests_console(&test_opts, benches).unwrap();
}
}
};
($($group_name:path,)+) => {
bench_or_profile!($($group_name),+);
};
}
#[allow(clippy::print_stdout)]
pub fn run_profiles(opts: &TestOpts, tests: Vec<TestDescAndFn>) {
let tests = filter_tests(opts, tests);
// let decs = tests.iter().map(|t| t.desc.clone()).collect();
println!();
for b in tests {
println!("Profiling {}", b.desc.name);
run_profile(b);
}
println!();
}
fn run_profile(test: TestDescAndFn) {
match test.testfn {
DynBenchFn(bencher) => {
bencher::bench::run_once(|harness| bencher.run(harness));
}
StaticBenchFn(benchfn) => {
bencher::bench::run_once(benchfn);
}
};
}
// Copied from https://github.com/bluss/bencher/blob/master/lib.rs
fn filter_tests(
opts: &TestOpts,
tests: Vec<TestDescAndFn>,
) -> Vec<TestDescAndFn> {
let mut filtered = tests;
// Remove tests that don't match the test filter
filtered = match opts.filter {
None => filtered,
Some(ref filter) => filtered
.into_iter()
.filter(|test| test.desc.name.contains(&filter[..]))
.collect(),
};
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.cmp(&t2.desc.name));
filtered
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/bench_util/js_runtime.rs | bench_util/js_runtime.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use bencher::Bencher;
use deno_core::Extension;
use deno_core::JsRuntime;
use deno_core::PollEventLoopOptions;
use deno_core::RuntimeOptions;
use deno_core::v8;
use crate::profiling::is_profiling;
pub fn create_js_runtime(setup: impl FnOnce() -> Vec<Extension>) -> JsRuntime {
JsRuntime::new(RuntimeOptions {
extensions: setup(),
module_loader: None,
..Default::default()
})
}
fn loop_code(iters: u64, src: &str) -> String {
format!(r#"for(let i=0; i < {iters}; i++) {{ {src} }}"#,)
}
#[derive(Copy, Clone)]
pub struct BenchOptions {
pub benching_inner: u64,
pub profiling_inner: u64,
pub profiling_outer: u64,
}
impl Default for BenchOptions {
fn default() -> Self {
Self {
benching_inner: 1_000,
profiling_inner: 1_000,
profiling_outer: 10_000,
}
}
}
pub fn bench_js_sync(
b: &mut Bencher,
src: &str,
setup: impl FnOnce() -> Vec<Extension>,
) {
bench_js_sync_with(b, src, setup, Default::default())
}
pub fn bench_js_sync_with(
b: &mut Bencher,
src: &str,
setup: impl FnOnce() -> Vec<Extension>,
opts: BenchOptions,
) {
let mut runtime = create_js_runtime(setup);
deno_core::scope!(scope, runtime);
// Increase JS iterations if profiling for nicer flamegraphs
let inner_iters = if is_profiling() {
opts.profiling_inner * opts.profiling_outer
} else {
opts.benching_inner
};
// Looped code
let looped_src = loop_code(inner_iters, src);
let code = v8::String::new(scope, looped_src.as_ref()).unwrap();
let script = v8::Script::compile(scope, code, None).unwrap();
// Run once if profiling, otherwise regular bench loop
if is_profiling() {
script.run(scope).unwrap();
} else {
b.iter(|| {
script.run(scope).unwrap();
});
}
}
pub fn bench_js_async(
b: &mut Bencher,
src: &str,
setup: impl FnOnce() -> Vec<Extension>,
) {
bench_js_async_with(b, src, setup, Default::default())
}
pub fn bench_js_async_with(
b: &mut Bencher,
src: &str,
setup: impl FnOnce() -> Vec<Extension>,
opts: BenchOptions,
) {
let mut runtime = create_js_runtime(setup);
let tokio_runtime = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap();
// Looped code
let inner_iters = if is_profiling() {
opts.profiling_inner
} else {
opts.benching_inner
};
let looped = loop_code(inner_iters, src);
// Get a &'static str by leaking -- this is fine because it's benchmarking code
let src = Box::leak(looped.into_boxed_str());
if is_profiling() {
for _ in 0..opts.profiling_outer {
tokio_runtime.block_on(inner_async(src, &mut runtime));
}
} else {
b.iter(|| {
tokio_runtime.block_on(inner_async(src, &mut runtime));
});
}
}
async fn inner_async(src: &'static str, runtime: &mut JsRuntime) {
runtime.execute_script("inner_loop", src).unwrap();
runtime
.run_event_loop(PollEventLoopOptions::default())
.await
.unwrap();
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/bench_util/benches/utf8.rs | bench_util/benches/utf8.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use deno_bench_util::BenchOptions;
use deno_bench_util::bench_js_sync_with;
use deno_bench_util::bench_or_profile;
use deno_bench_util::bencher::Bencher;
use deno_bench_util::bencher::benchmark_group;
use deno_core::Extension;
fn setup() -> Vec<Extension> {
deno_core::extension!(
bench_setup,
js = ["ext:bench_setup/setup.js" = {
source = r#"
const hello = "hello world\n";
const hello1k = hello.repeat(1e3);
const hello1m = hello.repeat(1e6);
const helloEncoded = Deno.core.encode(hello);
const hello1kEncoded = Deno.core.encode(hello1k);
const hello1mEncoded = Deno.core.encode(hello1m);
"#
}]
);
vec![bench_setup::init()]
}
fn bench_utf8_encode_12_b(b: &mut Bencher) {
bench_js_sync_with(
b,
r#"Deno.core.encode(hello);"#,
setup,
BenchOptions {
benching_inner: 1,
..Default::default()
},
);
}
fn bench_utf8_encode_12_kb(b: &mut Bencher) {
bench_js_sync_with(
b,
r#"Deno.core.encode(hello1k);"#,
setup,
BenchOptions {
benching_inner: 1,
..Default::default()
},
);
}
fn bench_utf8_encode_12_mb(b: &mut Bencher) {
bench_js_sync_with(
b,
r#"Deno.core.encode(hello1m);"#,
setup,
BenchOptions {
benching_inner: 1,
profiling_inner: 10,
profiling_outer: 10,
},
);
}
fn bench_utf8_decode_12_b(b: &mut Bencher) {
bench_js_sync_with(
b,
r#"Deno.core.decode(helloEncoded);"#,
setup,
BenchOptions {
benching_inner: 1,
..Default::default()
},
);
}
fn bench_utf8_decode_12_kb(b: &mut Bencher) {
bench_js_sync_with(
b,
r#"Deno.core.decode(hello1kEncoded);"#,
setup,
BenchOptions {
benching_inner: 1,
..Default::default()
},
);
}
fn bench_utf8_decode_12_mb(b: &mut Bencher) {
bench_js_sync_with(
b,
r#"Deno.core.decode(hello1mEncoded);"#,
setup,
BenchOptions {
benching_inner: 1,
profiling_inner: 10,
profiling_outer: 10,
},
);
}
benchmark_group!(
benches,
bench_utf8_encode_12_b,
bench_utf8_encode_12_kb,
bench_utf8_encode_12_mb,
bench_utf8_decode_12_b,
bench_utf8_decode_12_kb,
bench_utf8_decode_12_mb,
);
bench_or_profile!(benches);
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/maybe_sync/lib.rs | libs/maybe_sync/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
pub use inner::*;
#[cfg(feature = "sync")]
mod inner {
#![allow(clippy::disallowed_types)]
pub use core::marker::Send as MaybeSend;
pub use core::marker::Sync as MaybeSync;
pub use std::sync::Arc as MaybeArc;
pub use std::sync::OnceLock as MaybeOnceLock;
pub use dashmap::DashMap as MaybeDashMap;
pub use dashmap::DashSet as MaybeDashSet;
}
#[cfg(not(feature = "sync"))]
mod inner {
pub use std::cell::OnceCell as MaybeOnceLock;
use std::cell::Ref;
use std::cell::RefCell;
use std::collections::HashMap;
use std::hash::BuildHasher;
use std::hash::Hash;
use std::hash::RandomState;
pub use std::rc::Rc as MaybeArc;
pub trait MaybeSync {}
impl<T> MaybeSync for T where T: ?Sized {}
pub trait MaybeSend {}
impl<T> MaybeSend for T where T: ?Sized {}
// Wrapper struct that exposes a subset of `DashMap` API.
#[derive(Debug)]
pub struct MaybeDashMap<K, V, S = RandomState>(RefCell<HashMap<K, V, S>>);
impl<K, V, S> Default for MaybeDashMap<K, V, S>
where
K: Eq + Hash,
S: Default + BuildHasher + Clone,
{
fn default() -> Self {
Self(RefCell::new(Default::default()))
}
}
impl<K: Eq + Hash, V, S: BuildHasher> MaybeDashMap<K, V, S> {
pub fn get<'a, Q: Eq + Hash + ?Sized>(
&'a self,
key: &Q,
) -> Option<Ref<'a, V>>
where
K: std::borrow::Borrow<Q>,
{
Ref::filter_map(self.0.borrow(), |map| map.get(key)).ok()
}
pub fn insert(&self, key: K, value: V) -> Option<V> {
let mut inner = self.0.borrow_mut();
inner.insert(key, value)
}
pub fn clear(&self) {
self.0.borrow_mut().clear();
}
pub fn remove(&self, key: &K) -> Option<(K, V)> {
self.0.borrow_mut().remove_entry(key)
}
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
self.0.borrow().len()
}
}
// Wrapper struct that exposes a subset of `DashMap` API.
#[derive(Debug)]
pub struct MaybeDashSet<V, S = RandomState>(
RefCell<std::collections::HashSet<V, S>>,
);
impl<V, S> Default for MaybeDashSet<V, S>
where
V: Eq + Hash,
S: Default + BuildHasher + Clone,
{
fn default() -> Self {
Self(RefCell::new(Default::default()))
}
}
impl<V: Eq + Hash, S: BuildHasher> MaybeDashSet<V, S> {
pub fn insert(&self, value: V) -> bool {
let mut inner = self.0.borrow_mut();
inner.insert(value)
}
}
}
#[allow(clippy::disallowed_types)]
#[inline]
pub fn new_rc<T>(value: T) -> MaybeArc<T> {
MaybeArc::new(value)
}
#[allow(clippy::disallowed_types)]
#[inline]
pub fn new_arc<T>(value: T) -> std::sync::Arc<T> {
std::sync::Arc::new(value)
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/typescript_go_client/src/lib.rs | libs/typescript_go_client/src/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
// Partially extracted / adapted from https://github.com/microsoft/libsyncrpc
// Copyright 2024 Microsoft Corporation. MIT license.
pub mod connection;
pub mod types;
use std::collections::HashSet;
use std::ffi::OsStr;
use std::io::BufReader;
use std::io::BufWriter;
use std::process::Child;
use std::process::ChildStdin;
use std::process::ChildStdout;
use connection::RpcConnection;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Failed to spawn process: {0}")]
ProcessSpawn(#[source] std::io::Error),
#[error("Failed to kill process: {0}")]
ProcessKill(#[source] std::io::Error),
#[error("Error in RPC connection: {0}")]
RpcConnection(#[source] std::io::Error),
#[error("Error encoding {obj} as {ty}: {source}")]
Encoding {
obj: &'static str,
ty: &'static str,
source: Box<Error>,
},
#[error("Error decoding UTF-8: {0}")]
Utf8(#[source] std::string::FromUtf8Error),
#[error("Invalid message type: {0}")]
InvalidMessageType(MessageType),
#[error("{0}")]
AdHoc(String),
#[error("serde json error: {0}")]
Json(#[from] serde_json::Error),
}
impl Error {
pub fn from_reason<S: Into<String>>(reason: S) -> Self {
Self::AdHoc(reason.into())
}
}
pub trait CallbackHandler {
fn supported_callbacks(&self) -> &'static [&'static str];
fn handle_callback(
&self,
name: &str,
payload: String,
) -> Result<String, Error>;
}
/// A synchronous RPC channel that allows JavaScript to synchronously call out
/// to a child process and get a response over a line-based protocol,
/// including handling of JavaScript-side callbacks before the call completes.
///
/// #### Protocol
///
/// Requests follow a MessagePack-based "tuple"/array protocol with 3 items:
/// `(<type>, <name>, <payload>)`. All items are binary arrays of 8-bit
/// integers, including the `<type>` and `<name>`, to avoid unnecessary
/// encoding/decoding at the protocol level.
///
/// For specific message types and their corresponding protocol behavior, please
/// see `MessageType` below.
pub struct SyncRpcChannel<T> {
child: Child,
conn: RpcConnection<BufReader<ChildStdout>, BufWriter<ChildStdin>>,
callback_handler: T,
supported_callbacks: HashSet<&'static str>,
}
impl<T: CallbackHandler> SyncRpcChannel<T> {
/// Constructs a new `SyncRpcChannel` by spawning a child process with the
/// given `exe` executable, and a given set of `args`.
pub fn new<I, S>(
exe: impl AsRef<OsStr>,
args: I,
callback_handler: T,
) -> Result<Self, Error>
where
I: IntoIterator<Item = S>,
S: AsRef<OsStr>,
{
let mut child = std::process::Command::new(exe)
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::inherit())
.args(args)
.spawn()
.map_err(Error::ProcessSpawn)?;
let supported_callbacks = callback_handler.supported_callbacks();
Ok(Self {
conn: RpcConnection::new(
BufReader::new(child.stdout.take().expect("Where did ChildStdout go?")),
BufWriter::new(child.stdin.take().expect("Where did ChildStdin go?")),
)
.map_err(Error::RpcConnection)?,
supported_callbacks: supported_callbacks.iter().copied().collect(),
callback_handler,
child,
})
}
/// Send a request to the child process and wait for a response. The method
/// will not return, synchronously, until a response is received or an error
/// occurs.
///
/// This method will take care of encoding and decoding the binary payload to
/// and from a JS string automatically and suitable for smaller payloads.
pub fn request_sync(
&mut self,
method: &str,
payload: String,
) -> Result<String, Error> {
self
.request_bytes_sync(method, payload.as_bytes())
.and_then(|arr| {
String::from_utf8((&arr[..]).into()).map_err(|e| Error::Encoding {
obj: "response",
ty: "string",
source: Box::new(Error::Utf8(e)),
})
})
}
/// Send a request to the child process and wait for a response. The method
/// will not return, synchronously, until a response is received or an error
/// occurs.
///
/// Unlike `requestSync`, this method will not do any of its own encoding or
/// decoding of payload data. Everything will be as sent/received through the
/// underlying protocol.
pub fn request_bytes_sync(
&mut self,
method: &str,
payload: &[u8],
) -> Result<Vec<u8>, Error> {
log::trace!("request_bytes_sync: {method}");
let method_bytes = method.as_bytes();
self
.conn
.write(MessageType::Request as u8, method_bytes, payload)
.map_err(Error::RpcConnection)?;
loop {
let (ty, name, payload) =
self.conn.read().map_err(Error::RpcConnection)?;
match ty.try_into().map_err(Error::from_reason)? {
MessageType::Response => {
if name == method_bytes {
return Ok(payload);
} else {
let name = String::from_utf8_lossy(&name);
return Err(Error::from_reason(format!(
"name mismatch for response: expected `{method}`, got `{name}`"
)));
}
}
MessageType::Error => {
return Err(Error::RpcConnection(self.conn.create_error(
&String::from_utf8_lossy(&name),
payload,
method,
)));
}
MessageType::Call => {
self.handle_call(&String::from_utf8_lossy(&name), payload)?;
}
_ => {
return Err(Error::from_reason(format!(
"Invalid message type from child: {ty:?}"
)));
}
}
}
}
// Closes the channel, terminating its underlying process.
pub fn close(&mut self) -> Result<(), Error> {
self.child.kill().map_err(Error::ProcessKill)?;
Ok(())
}
// Helper method to handle callback calls
fn handle_call(&mut self, name: &str, payload: Vec<u8>) -> Result<(), Error> {
if !self.supported_callbacks.contains(name) {
self.conn.write(MessageType::CallError as u8, name.as_bytes(), format!("unknown callback: `{name}`. Please make sure to register it on the JavaScript side before invoking it.").as_bytes())
.map_err(Error::RpcConnection)?;
return Err(Error::from_reason(format!(
"no callback named `{name}` found"
)));
}
let res = self
.callback_handler
.handle_callback(name, String::from_utf8(payload).map_err(Error::Utf8)?);
match res {
Ok(res) => {
self
.conn
.write(
MessageType::CallResponse as u8,
name.as_bytes(),
res.as_bytes(),
)
.map_err(Error::RpcConnection)?;
}
Err(e) => {
self
.conn
.write(
MessageType::CallError as u8,
name.as_bytes(),
format!("{e}").trim().as_bytes(),
)
.map_err(Error::RpcConnection)?;
return Err(Error::from_reason(format!(
"Error calling callback `{name}`: {}",
e
)));
}
}
Ok(())
}
}
/// Messages types exchanged between the channel and its child. All messages
/// have an associated `<name>` and `<payload>`, which will both be arrays of
/// 8-bit integers (`Uint8Array`s).
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub enum MessageType {
// --- Sent by channel---
/// A request to the child with the given raw byte `<payload>`, with
/// `<name>` as the method name. The child may send back any number of
/// `MessageType.Call` messages and must then close the request with either a
/// `MessageType.Response`, or a `MessageType.Error`. message.
Request = 1,
/// A response to a `MessageType.Call` message that the child previously sent.
/// The `<payload>` is the return value from invoking the JavaScript callback
/// associated with it. If the callback errors, `MessageType.CallError` will
/// be sent to the child.
CallResponse,
/// Informs the child that an error occurred. The `<payload>` will be the
/// binary representation of the stringified error, as UTF-8 bytes, not
/// necessarily in JSON format. The method linked to this message will also
/// throw an error after sending this message to its child and terminate the
/// request call.
CallError,
// --- Sent by child ---
/// A response to a request that the call was for. `<name>` MUST match the
/// `MessageType.Request` message's `<name>` argument.
Response,
/// A response that denotes some error occurred while processing the request
/// on the child side. The `<payload>` will simply be the binary
/// representation of the stringified error, as UTF-8 bytes, not necessarily
/// in JSON format. The method associated with this call will also throw an
/// error after receiving this message from the child.
Error,
/// A request to invoke a pre-registered JavaScript callback (see
/// `SyncRpcChannel#registerCallback`). `<name>` is the name of the callback,
/// and `<payload>` is an encoded UTF-8 string that the callback will be
/// called with. The child should then listen for `MessageType.CallResponse`
/// and `MessageType.CallError` messages.
Call,
// NOTE: Do NOT put any variants below this one, always add them _before_ it.
// See comment in TryFrom impl, and remove this when `variant_count` stabilizes.
_UnusedPlaceholderVariant,
// NOTHING SHOULD GO BELOW HERE
}
impl std::fmt::Display for MessageType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MessageType::Request => write!(f, "MessageType::Request"),
MessageType::CallResponse => write!(f, "MessageType::CallResponse"),
MessageType::CallError => write!(f, "MessageType::CallError"),
MessageType::Response => write!(f, "MessageType::Response"),
MessageType::Error => write!(f, "MessageType::Error"),
MessageType::Call => write!(f, "MessageType::Call"),
MessageType::_UnusedPlaceholderVariant => {
write!(f, "MessageType::_UnusedPlaceholderVariant")
}
}
}
}
impl TryFrom<u8> for MessageType {
type Error = String;
fn try_from(
value: u8,
) -> std::result::Result<Self, <MessageType as TryFrom<u8>>::Error> {
// TODO: change to the following line when `variant_count` stabilizes
// (https://github.com/rust-lang/rust/issues/73662) and remove `_UnusedPlaceholderVariant`
//
// if (1..=std::mem::variant_count::<MessageType>()) {
if (1..(MessageType::_UnusedPlaceholderVariant as u8)).contains(&value) {
// SAFETY: This is safe as long as the above holds true. It'll be fully
// safe once `variant_count` stabilizes.
Ok(unsafe { std::mem::transmute::<u8, MessageType>(value) })
} else {
Err(format!("Invalid message type: {value}"))
}
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/typescript_go_client/src/connection.rs | libs/typescript_go_client/src/connection.rs | // Copyright 2018-2025 the Deno authors. MIT license.
// Partially extracted / adapted from https://github.com/microsoft/libsyncrpc
// Copyright 2024 Microsoft Corporation. MIT license.
use std::io::BufRead;
use std::io::Result;
use std::io::Write;
use std::io::{self};
/// Lower-level wrapper around RPC-related messaging and process management.
pub struct RpcConnection<R: BufRead, W: Write> {
reader: R,
writer: W,
}
impl<R: BufRead, W: Write> RpcConnection<R, W> {
pub fn new(reader: R, writer: W) -> Result<Self> {
Ok(Self { reader, writer })
}
pub fn write(&mut self, ty: u8, name: &[u8], payload: &[u8]) -> Result<()> {
let w = &mut self.writer;
rmp::encode::write_array_len(w, 3)?;
rmp::encode::write_u8(w, ty)?;
rmp::encode::write_bin(w, name)?;
rmp::encode::write_bin(w, payload)?;
w.flush()?;
Ok(())
}
pub fn read(&mut self) -> Result<(u8, Vec<u8>, Vec<u8>)> {
let r = &mut self.reader;
assert_eq!(
rmp::decode::read_array_len(r).map_err(to_io)?,
3,
"Message components must be a valid 3-part messagepack array."
);
Ok((
rmp::decode::read_int(r).map_err(to_io)?,
self.read_bin()?,
self.read_bin()?,
))
}
fn read_bin(&mut self) -> Result<Vec<u8>> {
let r = &mut self.reader;
let payload_len = rmp::decode::read_bin_len(r).map_err(to_io)?;
let mut payload = vec![0u8; payload_len as usize];
r.read_exact(&mut payload)?;
Ok(payload)
}
// Helper method to create an error
pub fn create_error(
&self,
name: &str,
payload: Vec<u8>,
expected_method: &str,
) -> io::Error {
if name == expected_method {
let payload = match String::from_utf8(payload) {
Ok(payload) => payload,
Err(err) => return io::Error::other(format!("{err}")),
};
io::Error::other(payload)
} else {
io::Error::other(format!(
"name mismatch for response: expected `{expected_method}`, got `{name}`"
))
}
}
}
fn to_io<T: std::error::Error>(err: T) -> io::Error {
io::Error::other(format!("{err}"))
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/typescript_go_client/src/types.rs | libs/typescript_go_client/src/types.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::marker::PhantomData;
use indexmap::IndexMap;
#[derive(serde::Deserialize, Debug, Clone, Copy)]
#[serde(rename_all = "camelCase")]
pub struct Position {
pub line: u64,
pub character: u64,
}
#[derive(serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Diagnostic {
pub file_name: String,
pub start: Position,
pub end: Position,
pub start_pos: u32,
pub end_pos: u32,
pub code: u32,
pub category: String,
pub message: String,
pub message_chain: Vec<Diagnostic>,
pub related_information: Vec<Diagnostic>,
pub reports_unnecessary: bool,
pub reports_deprecated: bool,
pub skipped_on_no_emit: bool,
pub source_line: String,
}
pub type DiagnosticId = u32;
#[derive(
serde_repr::Deserialize_repr, serde_repr::Serialize_repr, Debug, Clone, Copy,
)]
#[repr(u32)]
pub enum ResolutionMode {
None = 0,
CommonJS = 1,
ESM = 99,
}
#[derive(serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ResolveModuleNamePayload {
pub module_name: String,
pub containing_file: String,
pub resolution_mode: ResolutionMode,
pub import_attribute_type: Option<String>,
// redirected_reference: Handle<Project>,
}
#[derive(serde::Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct ResolveTypeReferenceDirectivePayload {
pub type_reference_directive_name: String,
pub containing_file: String,
pub resolution_mode: ResolutionMode,
}
#[derive(serde::Deserialize, serde::Serialize)]
#[serde(from = "String", into = "String")]
pub struct Handle<T> {
pub id: String,
#[serde(skip)]
_phantom: PhantomData<T>,
}
impl<T> std::fmt::Debug for Handle<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("Handle").field(&self.id).finish()
}
}
impl<T> From<Handle<T>> for String {
fn from(value: Handle<T>) -> Self {
value.id
}
}
impl<T> Clone for Handle<T> {
fn clone(&self) -> Self {
Self {
id: self.id.clone(),
_phantom: PhantomData,
}
}
}
impl<T> From<String> for Handle<T> {
fn from(id: String) -> Self {
Self {
id,
_phantom: PhantomData,
}
}
}
#[derive(serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct Project {
pub id: Handle<Self>,
pub config_file_name: String,
pub root_files: Vec<String>,
pub compiler_options: IndexMap<String, serde_json::Value>,
}
#[derive(
Debug, Clone, serde_repr::Deserialize_repr, serde_repr::Serialize_repr,
)]
#[repr(u32)]
pub enum ModuleKind {
None = 0,
CommonJS = 1,
AMD = 2,
UMD = 3,
System = 4,
ES2015 = 5,
ES2020 = 6,
ES2022 = 7,
ESNext = 99,
Node16 = 100,
Node18 = 101,
NodeNext = 199,
Preserve = 200,
}
#[derive(serde::Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct GetImpliedNodeFormatForFilePayload {
pub file_name: String,
pub package_json_type: String,
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/npm_installer/local.rs | libs/npm_installer/local.rs | // Copyright 2018-2025 the Deno authors. MIT license.
//! Code for local node_modules resolution.
use std::borrow::Cow;
use std::cell::RefCell;
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::collections::BTreeSet;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::hash_map::Entry;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use anyhow::Error as AnyError;
use async_trait::async_trait;
use deno_error::JsErrorBox;
use deno_npm::NpmResolutionPackage;
use deno_npm::NpmSystemInfo;
use deno_npm::resolution::NpmResolutionSnapshot;
use deno_npm_cache::NpmCache;
use deno_npm_cache::NpmCacheHttpClient;
use deno_npm_cache::NpmCacheSys;
use deno_npm_cache::TarballCache;
use deno_npm_cache::hard_link_file;
use deno_path_util::fs::atomic_write_file_with_retries;
use deno_resolver::npm::get_package_folder_id_folder_name;
use deno_resolver::npm::managed::NpmResolutionCell;
use deno_semver::StackString;
use deno_semver::package::PackageNv;
use deno_terminal::colors;
use futures::FutureExt;
use futures::StreamExt;
use futures::stream::FuturesUnordered;
use parking_lot::Mutex;
use serde::Deserialize;
use serde::Serialize;
use sys_traits::FsDirEntry;
use sys_traits::FsMetadata;
use sys_traits::FsOpen;
use sys_traits::FsWrite;
use crate::BinEntries;
use crate::CachedNpmPackageExtraInfoProvider;
use crate::ExpectedExtraInfo;
use crate::LifecycleScriptsConfig;
use crate::NpmPackageExtraInfoProvider;
use crate::NpmPackageFsInstaller;
use crate::PackageCaching;
use crate::Reporter;
use crate::bin_entries::EntrySetupOutcome;
use crate::bin_entries::SetupBinEntrySys;
use crate::flag::LaxSingleProcessFsFlag;
use crate::flag::LaxSingleProcessFsFlagSys;
use crate::fs::CloneDirRecursiveSys;
use crate::fs::clone_dir_recursive;
use crate::fs::symlink_dir;
use crate::lifecycle_scripts::LifecycleScripts;
use crate::lifecycle_scripts::LifecycleScriptsExecutor;
use crate::lifecycle_scripts::LifecycleScriptsExecutorOptions;
use crate::lifecycle_scripts::LifecycleScriptsStrategy;
use crate::lifecycle_scripts::has_lifecycle_scripts;
use crate::lifecycle_scripts::is_running_lifecycle_script;
use crate::package_json::NpmInstallDepsProvider;
use crate::process_state::NpmProcessState;
#[sys_traits::auto_impl]
pub trait LocalNpmInstallSys:
NpmCacheSys
+ CloneDirRecursiveSys
+ SetupBinEntrySys
+ LaxSingleProcessFsFlagSys
+ sys_traits::EnvVar
+ sys_traits::FsSymlinkDir
+ sys_traits::FsCreateJunction
{
}
/// Resolver that creates a local node_modules directory
/// and resolves packages from it.
pub struct LocalNpmPackageInstaller<
THttpClient: NpmCacheHttpClient,
TReporter: Reporter,
TSys: LocalNpmInstallSys,
> {
lifecycle_scripts_executor: Arc<dyn LifecycleScriptsExecutor>,
npm_cache: Arc<NpmCache<TSys>>,
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
npm_package_extra_info_provider: Arc<NpmPackageExtraInfoProvider>,
reporter: TReporter,
resolution: Arc<NpmResolutionCell>,
sys: TSys,
tarball_cache: Arc<TarballCache<THttpClient, TSys>>,
lifecycle_scripts_config: Arc<LifecycleScriptsConfig>,
root_node_modules_path: PathBuf,
system_info: NpmSystemInfo,
install_reporter: Option<Arc<dyn crate::InstallReporter>>,
}
impl<
THttpClient: NpmCacheHttpClient,
TReporter: Reporter,
TSys: LocalNpmInstallSys,
> std::fmt::Debug for LocalNpmPackageInstaller<THttpClient, TReporter, TSys>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LocalNpmPackageInstaller")
.field("npm_cache", &self.npm_cache)
.field("npm_install_deps_provider", &self.npm_install_deps_provider)
.field("reporter", &self.reporter)
.field("resolution", &self.resolution)
.field("sys", &self.sys)
.field("tarball_cache", &self.tarball_cache)
.field("lifecycle_scripts_config", &self.lifecycle_scripts_config)
.field("root_node_modules_path", &self.root_node_modules_path)
.field("system_info", &self.system_info)
.finish()
}
}
struct InitializingGuard {
nv: PackageNv,
install_reporter: Arc<dyn crate::InstallReporter>,
}
impl Drop for InitializingGuard {
fn drop(&mut self) {
self.install_reporter.initialized(&self.nv);
}
}
impl<
THttpClient: NpmCacheHttpClient,
TReporter: Reporter,
TSys: LocalNpmInstallSys,
> LocalNpmPackageInstaller<THttpClient, TReporter, TSys>
{
#[allow(clippy::too_many_arguments)]
pub fn new(
lifecycle_scripts_executor: Arc<dyn LifecycleScriptsExecutor>,
npm_cache: Arc<NpmCache<TSys>>,
npm_package_extra_info_provider: Arc<NpmPackageExtraInfoProvider>,
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
reporter: TReporter,
resolution: Arc<NpmResolutionCell>,
sys: TSys,
tarball_cache: Arc<TarballCache<THttpClient, TSys>>,
node_modules_folder: PathBuf,
lifecycle_scripts: Arc<LifecycleScriptsConfig>,
system_info: NpmSystemInfo,
install_reporter: Option<Arc<dyn crate::InstallReporter>>,
) -> Self {
Self {
lifecycle_scripts_executor,
npm_cache,
npm_install_deps_provider,
npm_package_extra_info_provider,
reporter,
resolution,
tarball_cache,
sys,
lifecycle_scripts_config: lifecycle_scripts,
root_node_modules_path: node_modules_folder,
system_info,
install_reporter,
}
}
async fn sync_resolution_with_fs(
&self,
snapshot: &NpmResolutionSnapshot,
) -> Result<(), SyncResolutionWithFsError> {
if snapshot.is_empty()
&& self.npm_install_deps_provider.local_pkgs().is_empty()
{
return Ok(()); // don't create the directory
}
// don't set up node_modules (and more importantly try to acquire the file lock)
// if we're running as part of a lifecycle script
if is_running_lifecycle_script(&self.sys) {
return Ok(());
}
let deno_local_registry_dir = self.root_node_modules_path.join(".deno");
let deno_node_modules_dir = deno_local_registry_dir.join("node_modules");
self
.sys
.fs_create_dir_all(&deno_node_modules_dir)
.map_err(|source| SyncResolutionWithFsError::Creating {
path: deno_node_modules_dir.to_path_buf(),
source,
})?;
let bin_node_modules_dir_path = self.root_node_modules_path.join(".bin");
self
.sys
.fs_create_dir_all(&bin_node_modules_dir_path)
.map_err(|source| SyncResolutionWithFsError::Creating {
path: bin_node_modules_dir_path.to_path_buf(),
source,
})?;
let single_process_lock = LaxSingleProcessFsFlag::lock(
&self.sys,
deno_local_registry_dir.join(".deno.lock"),
&self.reporter,
// similar message used by cargo build
"waiting for file lock on node_modules directory",
)
.await;
// load this after we get the directory lock
let mut setup_cache = LocalSetupCache::load(
self.sys.clone(),
deno_local_registry_dir.join(".setup-cache.bin"),
);
let pb_clear_guard = self.reporter.clear_guard(); // prevent flickering
// 1. Write all the packages out the .deno directory.
//
// Copy (hardlink in future) <global_registry_cache>/<package_id>/ to
// node_modules/.deno/<package_folder_id_folder_name>/node_modules/<package_name>
let package_partitions =
snapshot.all_system_packages_partitioned(&self.system_info);
let mut cache_futures = FuturesUnordered::new();
let mut newest_packages_by_name: HashMap<
&StackString,
&NpmResolutionPackage,
> = HashMap::with_capacity(package_partitions.packages.len());
let bin_entries = Rc::new(RefCell::new(BinEntries::new(&self.sys)));
let lifecycle_scripts = Rc::new(RefCell::new(LifecycleScripts::new(
&self.sys,
&self.lifecycle_scripts_config,
LocalLifecycleScripts {
sys: &self.sys,
deno_local_registry_dir: &deno_local_registry_dir,
install_reporter: self.install_reporter.clone(),
},
)));
let packages_with_deprecation_warnings = Arc::new(Mutex::new(Vec::new()));
let mut package_tags: HashMap<&PackageNv, BTreeSet<&str>> = HashMap::new();
for (package_req, package_nv) in snapshot.package_reqs() {
if let Some(tag) = package_req.version_req.tag() {
package_tags.entry(package_nv).or_default().insert(tag);
}
}
let extra_info_provider = Arc::new(CachedNpmPackageExtraInfoProvider::new(
self.npm_package_extra_info_provider.clone(),
));
for package in &package_partitions.packages {
if let Some(current_pkg) =
newest_packages_by_name.get_mut(&package.id.nv.name)
{
if current_pkg.id.nv.cmp(&package.id.nv) == Ordering::Less {
*current_pkg = package;
}
} else {
newest_packages_by_name.insert(&package.id.nv.name, package);
};
let package_folder_name = get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
);
let folder_path = deno_local_registry_dir.join(&package_folder_name);
let tags = package_tags
.get(&package.id.nv)
.map(|tags| {
capacity_builder::StringBuilder::<String>::build(|builder| {
for (i, tag) in tags.iter().enumerate() {
if i > 0 {
builder.append(',')
}
builder.append(*tag);
}
})
.unwrap()
})
.unwrap_or_default();
enum PackageFolderState {
UpToDate,
Uninitialized,
TagsOutdated,
}
let initialized_file = folder_path.join(".initialized");
let package_state = if tags.is_empty() {
if self.sys.fs_exists_no_err(&initialized_file) {
PackageFolderState::UpToDate
} else {
PackageFolderState::Uninitialized
}
} else {
self
.sys
.fs_read_to_string(&initialized_file)
.map(|s| {
if s != tags {
PackageFolderState::TagsOutdated
} else {
PackageFolderState::UpToDate
}
})
.unwrap_or(PackageFolderState::Uninitialized)
};
if !self
.npm_cache
.cache_setting()
.should_use_for_npm_package(&package.id.nv.name)
|| matches!(package_state, PackageFolderState::Uninitialized)
{
if let Some(dist) = &package.dist {
// cache bust the dep from the dep setup cache so the symlinks
// are forced to be recreated
setup_cache.remove_dep(&package_folder_name);
let folder_path = folder_path.clone();
let packages_with_deprecation_warnings =
packages_with_deprecation_warnings.clone();
let extra_info_provider = extra_info_provider.clone();
let lifecycle_scripts = lifecycle_scripts.clone();
let bin_entries_to_setup = bin_entries.clone();
let install_reporter = self.install_reporter.clone();
cache_futures.push(
async move {
self
.tarball_cache
.ensure_package(&package.id.nv, dist)
.await
.map_err(JsErrorBox::from_err)?;
let pb_guard =
self.reporter.on_initializing(&package.id.nv.to_string());
let _initialization_guard =
install_reporter.as_ref().map(|install_reporter| {
install_reporter.initializing(&package.id.nv);
InitializingGuard {
nv: package.id.nv.clone(),
install_reporter: install_reporter.clone(),
}
});
let sub_node_modules = folder_path.join("node_modules");
let package_path = join_package_name(
Cow::Owned(sub_node_modules),
&package.id.nv.name,
);
let cache_folder =
self.npm_cache.package_folder_for_nv(&package.id.nv);
let handle = crate::rt::spawn_blocking({
let package_path = package_path.clone();
let sys = self.sys.clone();
move || {
clone_dir_recursive(&sys, &cache_folder, &package_path)?;
// write out a file that indicates this folder has been initialized
write_initialized_file(&sys, &initialized_file, &tags)?;
Ok::<_, SyncResolutionWithFsError>(())
}
});
let extra_fut = if (package.has_bin
|| package.has_scripts
|| package.is_deprecated)
&& package.extra.is_none()
{
extra_info_provider
.get_package_extra_info(
&package.id.nv,
&package_path,
ExpectedExtraInfo::from_package(package),
)
.boxed_local()
} else {
std::future::ready(Ok(
package.extra.clone().unwrap_or_default(),
))
.boxed_local()
};
let (result, extra) =
futures::future::join(handle, extra_fut).await;
result
.map_err(JsErrorBox::from_err)?
.map_err(JsErrorBox::from_err)?;
let extra = extra.map_err(JsErrorBox::from_err)?;
if package.has_bin {
bin_entries_to_setup.borrow_mut().add(
package,
&extra,
package_path.to_path_buf(),
);
}
if package.has_scripts {
lifecycle_scripts.borrow_mut().add(
package,
&extra,
package_path.into(),
);
}
if package.is_deprecated
&& let Some(deprecated) = &extra.deprecated
{
packages_with_deprecation_warnings
.lock()
.push((package.id.nv.clone(), deprecated.clone()));
}
// finally stop showing the progress bar
drop(pb_guard); // explicit for clarity
Ok::<_, JsErrorBox>(())
}
.boxed_local(),
);
}
} else {
if matches!(package_state, PackageFolderState::TagsOutdated) {
write_initialized_file(&self.sys, &initialized_file, &tags)?;
}
if package.has_bin || package.has_scripts {
let bin_entries_to_setup = bin_entries.clone();
let lifecycle_scripts = lifecycle_scripts.clone();
let extra_info_provider = extra_info_provider.clone();
let sub_node_modules = folder_path.join("node_modules");
let package_path = join_package_name(
Cow::Owned(sub_node_modules),
&package.id.nv.name,
);
cache_futures.push(
async move {
let extra = extra_info_provider
.get_package_extra_info(
&package.id.nv,
&package_path,
ExpectedExtraInfo::from_package(package),
)
.await
.map_err(JsErrorBox::from_err)?;
if package.has_bin {
bin_entries_to_setup.borrow_mut().add(
package,
&extra,
package_path.to_path_buf(),
);
}
if package.has_scripts {
lifecycle_scripts.borrow_mut().add(
package,
&extra,
package_path.into(),
);
}
Ok(())
}
.boxed_local(),
);
}
}
}
// Wait for all npm package installations to complete before applying patches
// This prevents race conditions where npm packages could overwrite patch files
while let Some(result) = cache_futures.next().await {
result?; // surface the first error
}
// 2. Setup the patch packages
for patch_pkg in self.npm_install_deps_provider.patch_pkgs() {
// there might be multiple ids per package due to peer dep copy packages
for id in snapshot.package_ids_for_nv(&patch_pkg.nv) {
let package = snapshot.package_from_id(id).unwrap();
let package_folder_name = get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
);
// node_modules/.deno/<package_folder_id_folder_name>/node_modules/<package_name> -> local package folder
let target = join_package_name(
Cow::Owned(
deno_local_registry_dir
.join(&package_folder_name)
.join("node_modules"),
),
&patch_pkg.nv.name,
);
cache_futures.push(
async move {
let from_path = patch_pkg.target_dir.clone();
let sys = self.sys.clone();
crate::rt::spawn_blocking({
move || {
clone_dir_recursive_except_node_modules_child(
&sys, &from_path, &target,
)
}
})
.await
.map_err(JsErrorBox::from_err)?
.map_err(JsErrorBox::from_err)?;
Ok::<_, JsErrorBox>(())
}
.boxed_local(),
);
}
}
// copy packages copy from the main packages, so wait
// until these are all done
while let Some(result) = cache_futures.next().await {
result?; // surface the first error
}
// 3. Create any "copy" packages, which are used for peer dependencies
for package in &package_partitions.copy_packages {
let package_cache_folder_id = package.get_package_cache_folder_id();
let destination_path = deno_local_registry_dir
.join(get_package_folder_id_folder_name(&package_cache_folder_id));
let initialized_file = destination_path.join(".initialized");
if !self.sys.fs_exists_no_err(&initialized_file) {
let sub_node_modules = destination_path.join("node_modules");
let package_path =
join_package_name(Cow::Owned(sub_node_modules), &package.id.nv.name);
let source_path = join_package_name(
Cow::Owned(
deno_local_registry_dir
.join(get_package_folder_id_folder_name(
&package_cache_folder_id.with_no_count(),
))
.join("node_modules"),
),
&package.id.nv.name,
);
cache_futures.push(
async move {
let sys = self.sys.clone();
crate::rt::spawn_blocking(move || {
clone_dir_recursive(&sys, &source_path, &package_path)
.map_err(JsErrorBox::from_err)?;
// write out a file that indicates this folder has been initialized
create_initialized_file(&sys, &initialized_file)?;
Ok::<_, JsErrorBox>(())
})
.await
.map_err(JsErrorBox::from_err)?
.map_err(JsErrorBox::from_err)?;
Ok::<_, JsErrorBox>(())
}
.boxed_local(),
);
}
}
while let Some(result) = cache_futures.next().await {
result?; // surface the first error
}
// 4. Symlink all the dependencies into the .deno directory.
//
// Symlink node_modules/.deno/<package_id>/node_modules/<dep_name> to
// node_modules/.deno/<dep_id>/node_modules/<dep_package_name>
for package in package_partitions.iter_all() {
let package_folder_name = get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
);
let sub_node_modules = deno_local_registry_dir
.join(&package_folder_name)
.join("node_modules");
let mut dep_setup_cache = setup_cache.with_dep(&package_folder_name);
for (name, dep_id) in &package.dependencies {
let dep = snapshot.package_from_id(dep_id).unwrap();
if package.optional_dependencies.contains(name)
&& !dep.system.matches_system(&self.system_info)
{
continue; // this isn't a dependency for the current system
}
let dep_cache_folder_id = dep.get_package_cache_folder_id();
let dep_folder_name =
get_package_folder_id_folder_name(&dep_cache_folder_id);
if package.dist.is_none()
|| dep_setup_cache.insert(name, &dep_folder_name)
{
let dep_folder_path = join_package_name(
Cow::Owned(
deno_local_registry_dir
.join(dep_folder_name)
.join("node_modules"),
),
&dep_id.nv.name,
);
symlink_package_dir(
&self.sys,
&dep_folder_path,
&join_package_name(Cow::Borrowed(&sub_node_modules), name),
)?;
}
}
}
let mut found_names: HashMap<&StackString, &PackageNv> = HashMap::new();
// set of node_modules in workspace packages that we've already ensured exist
let mut existing_child_node_modules_dirs: HashSet<PathBuf> = HashSet::new();
// 5. Create symlinks for package json dependencies
{
for remote in self.npm_install_deps_provider.remote_pkgs() {
let remote_pkg = match snapshot.resolve_pkg_from_pkg_req(&remote.req) {
Ok(remote_pkg) => remote_pkg,
_ => {
if remote.req.version_req.tag().is_some() {
// couldn't find a match, and `resolve_best_package_id`
// panics if you give it a tag
continue;
} else {
match snapshot.resolve_best_package_id(
&remote.req.name,
&remote.req.version_req,
) {
Some(remote_id) => {
snapshot.package_from_id(&remote_id).unwrap()
}
_ => {
continue; // skip, package not found
}
}
}
}
};
let Some(remote_alias) = &remote.alias else {
continue;
};
let alias_clashes = remote.req.name != *remote_alias
&& newest_packages_by_name.contains_key(remote_alias);
let install_in_child = {
// we'll install in the child if the alias is taken by another package, or
// if there's already a package with the same name but different version
// linked into the root
match found_names.entry(remote_alias) {
Entry::Occupied(nv) => {
// alias to a different package (in case of duplicate aliases)
// or the version doesn't match the version in the root node_modules
alias_clashes || &remote_pkg.id.nv != *nv.get()
}
Entry::Vacant(entry) => {
entry.insert(&remote_pkg.id.nv);
alias_clashes
}
}
};
let target_folder_name = get_package_folder_id_folder_name(
&remote_pkg.get_package_cache_folder_id(),
);
let local_registry_package_path = join_package_name(
Cow::Owned(
deno_local_registry_dir
.join(&target_folder_name)
.join("node_modules"),
),
&remote_pkg.id.nv.name,
);
if install_in_child {
// symlink the dep into the package's child node_modules folder
let dest_node_modules = remote.base_dir.join("node_modules");
if !existing_child_node_modules_dirs.contains(&dest_node_modules) {
self.sys.fs_create_dir_all(&dest_node_modules).map_err(
|source| SyncResolutionWithFsError::Creating {
path: dest_node_modules.clone(),
source,
},
)?;
existing_child_node_modules_dirs.insert(dest_node_modules.clone());
}
let mut dest_path = dest_node_modules;
dest_path.push(remote_alias);
symlink_package_dir(
&self.sys,
&local_registry_package_path,
&dest_path,
)?;
} else {
// symlink the package into `node_modules/<alias>`
if setup_cache
.insert_root_symlink(&remote_pkg.id.nv.name, &target_folder_name)
{
symlink_package_dir(
&self.sys,
&local_registry_package_path,
&join_package_name(
Cow::Borrowed(&self.root_node_modules_path),
remote_alias,
),
)?;
}
}
}
}
// 6. Create symlinks for the remaining top level packages in the node_modules folder.
// (These may be present if they are not in the package.json dependencies)
// Symlink node_modules/.deno/<package_id>/node_modules/<package_name> to
// node_modules/<package_name>
let mut ids = snapshot
.top_level_packages()
.filter(|f| !found_names.contains_key(&f.nv.name))
.collect::<Vec<_>>();
ids.sort_by(|a, b| b.cmp(a)); // create determinism and only include the latest version
for id in ids {
match found_names.entry(&id.nv.name) {
Entry::Occupied(_) => {
continue; // skip, already handled
}
Entry::Vacant(entry) => {
entry.insert(&id.nv);
}
}
let package = snapshot.package_from_id(id).unwrap();
let target_folder_name = get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
);
if setup_cache.insert_root_symlink(&id.nv.name, &target_folder_name) {
let local_registry_package_path = join_package_name(
Cow::Owned(
deno_local_registry_dir
.join(target_folder_name)
.join("node_modules"),
),
&id.nv.name,
);
symlink_package_dir(
&self.sys,
&local_registry_package_path,
&join_package_name(
Cow::Borrowed(&self.root_node_modules_path),
&id.nv.name,
),
)?;
}
}
// 7. Create a node_modules/.deno/node_modules/<package-name> directory with
// the remaining packages
for package in newest_packages_by_name.values() {
match found_names.entry(&package.id.nv.name) {
Entry::Occupied(_) => {
continue; // skip, already handled
}
Entry::Vacant(entry) => {
entry.insert(&package.id.nv);
}
}
let target_folder_name = get_package_folder_id_folder_name(
&package.get_package_cache_folder_id(),
);
if setup_cache
.insert_deno_symlink(&package.id.nv.name, &target_folder_name)
{
let local_registry_package_path = join_package_name(
Cow::Owned(
deno_local_registry_dir
.join(target_folder_name)
.join("node_modules"),
),
&package.id.nv.name,
);
symlink_package_dir(
&self.sys,
&local_registry_package_path,
&join_package_name(
Cow::Borrowed(&deno_node_modules_dir),
&package.id.nv.name,
),
)?;
}
}
// 8. Set up `node_modules/.bin` entries for packages that need it.
{
let bin_entries = match Rc::try_unwrap(bin_entries) {
Ok(bin_entries) => bin_entries.into_inner(),
Err(_) => panic!("Should have sole ref to rc."),
};
bin_entries.finish(
snapshot,
&bin_node_modules_dir_path,
|setup_outcome| {
let lifecycle_scripts = lifecycle_scripts.borrow();
match setup_outcome {
EntrySetupOutcome::MissingEntrypoint {
package,
package_path,
extra,
..
} if has_lifecycle_scripts(&self.sys, extra, package_path)
&& lifecycle_scripts.can_run_scripts(&package.id.nv)
&& !lifecycle_scripts.has_run_scripts(package) =>
{
// ignore, it might get fixed when the lifecycle scripts run.
// if not, we'll warn then
}
outcome => outcome.warn_if_failed(),
}
},
)?;
}
// 9. Create symlinks for the workspace packages
{
// todo(dsherret): this is not exactly correct because it should
// install correctly for a workspace (potentially in sub directories),
// but this is good enough for a first pass
for pkg in self.npm_install_deps_provider.local_pkgs() {
let Some(pkg_alias) = &pkg.alias else {
continue;
};
symlink_package_dir(
&self.sys,
&pkg.target_dir,
&self.root_node_modules_path.join(pkg_alias),
)?;
}
}
{
let packages_with_deprecation_warnings =
packages_with_deprecation_warnings.lock();
if !packages_with_deprecation_warnings.is_empty() {
use std::fmt::Write;
let mut output = String::new();
let _ = writeln!(
&mut output,
"{} The following packages are deprecated:",
colors::yellow("Warning")
);
let len = packages_with_deprecation_warnings.len();
for (idx, (package_nv, msg)) in
packages_with_deprecation_warnings.iter().enumerate()
{
if idx != len - 1 {
let _ = writeln!(
&mut output,
"┠─ {}",
colors::gray(format!("npm:{:?} ({})", package_nv, msg))
);
} else {
let _ = write!(
&mut output,
"┖─ {}",
colors::gray(format!("npm:{:?} ({})", package_nv, msg))
);
}
}
if let Some(install_reporter) = &self.install_reporter {
install_reporter.deprecated_message(output);
} else {
log::warn!("{}", output);
}
}
}
let lifecycle_scripts = std::mem::replace(
&mut *lifecycle_scripts.borrow_mut(),
LifecycleScripts::new(
&self.sys,
&self.lifecycle_scripts_config,
LocalLifecycleScripts {
sys: &self.sys,
deno_local_registry_dir: &deno_local_registry_dir,
install_reporter: self.install_reporter.clone(),
},
),
);
lifecycle_scripts.warn_not_run_scripts()?;
let packages_with_scripts = lifecycle_scripts.packages_with_scripts();
if !packages_with_scripts.is_empty() {
let process_state = NpmProcessState::new_local(
snapshot.as_valid_serialized(),
&self.root_node_modules_path,
)
.as_serialized();
self
.lifecycle_scripts_executor
.execute(LifecycleScriptsExecutorOptions {
init_cwd: &self.lifecycle_scripts_config.initial_cwd,
process_state: process_state.as_str(),
root_node_modules_dir_path: &self.root_node_modules_path,
on_ran_pkg_scripts: &|pkg| {
create_initialized_file(
&self.sys,
&ran_scripts_file(&deno_local_registry_dir, pkg),
)
},
snapshot,
system_packages: &package_partitions.packages,
packages_with_scripts,
extra_info_provider: &extra_info_provider,
})
.await
.map_err(SyncResolutionWithFsError::LifecycleScripts)?
}
setup_cache.save();
drop(single_process_lock);
drop(pb_clear_guard);
Ok(())
}
}
#[async_trait(?Send)]
impl<
THttpClient: NpmCacheHttpClient,
TReporter: Reporter,
TSys: LocalNpmInstallSys,
> NpmPackageFsInstaller
for LocalNpmPackageInstaller<THttpClient, TReporter, TSys>
{
async fn cache_packages<'a>(
&self,
caching: PackageCaching<'a>,
) -> Result<(), JsErrorBox> {
let snapshot = match caching {
PackageCaching::All => self.resolution.snapshot(),
PackageCaching::Only(reqs) => self.resolution.subset(&reqs),
};
self
.sync_resolution_with_fs(&snapshot)
.await
.map_err(JsErrorBox::from_err)
}
}
#[derive(Debug, thiserror::Error, deno_error::JsError)]
pub enum SyncResolutionWithFsError {
#[class(inherit)]
#[error("Creating '{path}'")]
Creating {
path: PathBuf,
#[source]
#[inherit]
source: std::io::Error,
},
#[class(inherit)]
#[error("Copying '{from}' to '{to}'")]
Copying {
from: PathBuf,
to: PathBuf,
#[source]
#[inherit]
source: std::io::Error,
},
#[class(inherit)]
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | true |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/npm_installer/global.rs | libs/npm_installer/global.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::path::Path;
use std::path::PathBuf;
use std::sync::Arc;
use deno_error::JsErrorBox;
use deno_npm::NpmResolutionPackage;
use deno_npm::NpmSystemInfo;
use deno_npm_cache::NpmCache;
use deno_npm_cache::NpmCacheHttpClient;
use deno_npm_cache::NpmCacheSys;
use deno_npm_cache::TarballCache;
use deno_resolver::npm::managed::NpmResolutionCell;
use deno_terminal::colors;
use futures::StreamExt;
use futures::stream::FuturesUnordered;
use sys_traits::OpenOptions;
use crate::LifecycleScriptsConfig;
use crate::NpmPackageFsInstaller;
use crate::PackageCaching;
use crate::lifecycle_scripts::LifecycleScripts;
use crate::lifecycle_scripts::LifecycleScriptsStrategy;
/// Resolves packages from the global npm cache.
pub struct GlobalNpmPackageInstaller<
THttpClient: NpmCacheHttpClient,
TSys: NpmCacheSys,
> {
cache: Arc<NpmCache<TSys>>,
tarball_cache: Arc<TarballCache<THttpClient, TSys>>,
sys: TSys,
resolution: Arc<NpmResolutionCell>,
lifecycle_scripts: Arc<LifecycleScriptsConfig>,
system_info: NpmSystemInfo,
install_reporter: Option<Arc<dyn crate::InstallReporter>>,
}
impl<THttpClient: NpmCacheHttpClient, TSys: NpmCacheSys> std::fmt::Debug
for GlobalNpmPackageInstaller<THttpClient, TSys>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("GlobalNpmPackageInstaller")
.field("cache", &self.cache)
.field("tarball_cache", &self.tarball_cache)
.field("resolution", &self.resolution)
.field("lifecycle_scripts", &self.lifecycle_scripts)
.field("system_info", &self.system_info)
.finish()
}
}
impl<THttpClient: NpmCacheHttpClient, TSys: NpmCacheSys>
GlobalNpmPackageInstaller<THttpClient, TSys>
{
pub fn new(
cache: Arc<NpmCache<TSys>>,
tarball_cache: Arc<TarballCache<THttpClient, TSys>>,
sys: TSys,
resolution: Arc<NpmResolutionCell>,
lifecycle_scripts: Arc<LifecycleScriptsConfig>,
system_info: NpmSystemInfo,
install_reporter: Option<Arc<dyn crate::InstallReporter>>,
) -> Self {
Self {
cache,
tarball_cache,
sys,
resolution,
lifecycle_scripts,
system_info,
install_reporter,
}
}
async fn cache_packages(
&self,
packages: &[NpmResolutionPackage],
) -> Result<(), deno_npm_cache::EnsurePackageError> {
let mut futures_unordered = FuturesUnordered::new();
for package in packages {
if let Some(dist) = &package.dist {
futures_unordered.push(async move {
self
.tarball_cache
.ensure_package(&package.id.nv, dist)
.await
});
}
}
while let Some(result) = futures_unordered.next().await {
// surface the first error
result?;
}
Ok(())
}
}
#[async_trait::async_trait(?Send)]
impl<THttpClient: NpmCacheHttpClient, TSys: NpmCacheSys> NpmPackageFsInstaller
for GlobalNpmPackageInstaller<THttpClient, TSys>
{
async fn cache_packages<'a>(
&self,
caching: PackageCaching<'a>,
) -> Result<(), JsErrorBox> {
let package_partitions = match caching {
PackageCaching::All => self
.resolution
.all_system_packages_partitioned(&self.system_info),
PackageCaching::Only(reqs) => self
.resolution
.subset(&reqs)
.all_system_packages_partitioned(&self.system_info),
};
self
.cache_packages(&package_partitions.packages)
.await
.map_err(JsErrorBox::from_err)?;
// create the copy package folders
for copy in package_partitions.copy_packages {
self
.cache
.ensure_copy_package(©.get_package_cache_folder_id())
.map_err(JsErrorBox::from_err)?;
}
let mut lifecycle_scripts = LifecycleScripts::new(
&self.sys,
&self.lifecycle_scripts,
GlobalLifecycleScripts::new(
self.cache.as_ref(),
&self.sys,
&self.lifecycle_scripts.root_dir,
self.install_reporter.clone(),
),
);
// For the global cache, we don't run scripts so we just care that there _are_
// scripts. Kind of hacky, but avoids fetching the "extra" info from the registry.
let extra = deno_npm::NpmPackageExtraInfo {
deprecated: None,
bin: None,
scripts: [("postinstall".into(), "".into())].into_iter().collect(),
};
for package in &package_partitions.packages {
if package.has_scripts {
let package_folder = self.cache.package_folder_for_nv(&package.id.nv);
lifecycle_scripts.add(package, &extra, Cow::Borrowed(&package_folder));
}
}
lifecycle_scripts
.warn_not_run_scripts()
.map_err(JsErrorBox::from_err)?;
Ok(())
}
}
struct GlobalLifecycleScripts<'a, TSys: NpmCacheSys> {
cache: &'a NpmCache<TSys>,
sys: &'a TSys,
path_hash: u64,
install_reporter: Option<Arc<dyn crate::InstallReporter>>,
}
impl<'a, TSys: NpmCacheSys> GlobalLifecycleScripts<'a, TSys> {
fn new(
cache: &'a NpmCache<TSys>,
sys: &'a TSys,
root_dir: &Path,
install_reporter: Option<Arc<dyn crate::InstallReporter>>,
) -> Self {
use std::hash::Hasher;
let mut hasher = twox_hash::XxHash64::default();
hasher.write(root_dir.to_string_lossy().as_bytes());
let path_hash = hasher.finish();
Self {
cache,
sys,
path_hash,
install_reporter,
}
}
fn warned_scripts_file(&self, package: &NpmResolutionPackage) -> PathBuf {
self
.cache
.package_folder_for_nv(&package.id.nv)
.join(format!(".scripts-warned-{}", self.path_hash))
}
}
impl<TSys: NpmCacheSys> LifecycleScriptsStrategy
for GlobalLifecycleScripts<'_, TSys>
{
fn can_run_scripts(&self) -> bool {
false
}
fn warn_on_scripts_not_run(
&self,
packages: &[(&NpmResolutionPackage, PathBuf)],
) -> std::result::Result<(), std::io::Error> {
use std::fmt::Write;
use std::writeln;
let mut output = String::new();
_ = writeln!(
&mut output,
"{} {}",
colors::yellow("╭"),
colors::yellow_bold("Warning")
);
_ = writeln!(&mut output, "{}", colors::yellow("│"));
_ = writeln!(
&mut output,
"{} Ignored build scripts for packages:",
colors::yellow("│"),
);
for (package, _) in packages {
_ = writeln!(
&mut output,
"{} {}",
colors::yellow("│"),
colors::italic(format!("npm:{}", package.id.nv))
);
}
_ = writeln!(&mut output, "{}", colors::yellow("│"));
_ = writeln!(
&mut output,
"{} Lifecycle scripts are only supported when using a `node_modules` directory.",
colors::yellow("│")
);
_ = writeln!(
&mut output,
"{} Enable it in your deno config file:",
colors::yellow("│")
);
_ = writeln!(
&mut output,
"{} {}",
colors::yellow("│"),
colors::bold("\"nodeModulesDir\": \"auto\"")
);
_ = write!(&mut output, "{}", colors::yellow("╰─"));
if let Some(install_reporter) = &self.install_reporter {
let paths = packages
.iter()
.map(|(package, _)| self.warned_scripts_file(package))
.collect::<Vec<_>>();
install_reporter.scripts_not_run_warning(
crate::lifecycle_scripts::LifecycleScriptsWarning::new(
output,
Box::new(move |sys| {
for path in paths {
let _ignore_err =
sys.fs_open_boxed(&path, &OpenOptions::new_write());
}
}),
),
);
} else {
log::warn!("{}", output);
for (package, _) in packages {
let _ignore_err = self.sys.fs_open(
self.warned_scripts_file(package),
&OpenOptions::new_write(),
);
}
}
Ok(())
}
fn has_warned(&self, package: &NpmResolutionPackage) -> bool {
self.sys.fs_exists_no_err(self.warned_scripts_file(package))
}
fn has_run(&self, _package: &NpmResolutionPackage) -> bool {
false
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
denoland/deno | https://github.com/denoland/deno/blob/7222e85d435b977de1ab810db067b86f29e6444f/libs/npm_installer/lib.rs | libs/npm_installer/lib.rs | // Copyright 2018-2025 the Deno authors. MIT license.
use std::borrow::Cow;
use std::path::PathBuf;
use std::sync::Arc;
use deno_error::JsErrorBox;
use deno_npm::NpmSystemInfo;
use deno_npm::registry::NpmPackageInfo;
use deno_npm::registry::NpmRegistryPackageInfoLoadError;
use deno_npm_cache::NpmCache;
use deno_npm_cache::NpmCacheHttpClient;
use deno_resolver::lockfile::LockfileLock;
use deno_resolver::npm::managed::NpmResolutionCell;
use deno_resolver::workspace::WorkspaceNpmLinkPackagesRc;
use deno_semver::package::PackageNv;
use deno_semver::package::PackageReq;
mod bin_entries;
mod extra_info;
mod factory;
mod flag;
mod fs;
mod global;
pub mod graph;
pub mod initializer;
pub mod lifecycle_scripts;
mod local;
pub mod package_json;
pub mod process_state;
pub mod resolution;
mod rt;
pub use bin_entries::BinEntries;
pub use bin_entries::BinEntriesError;
use deno_terminal::colors;
use deno_unsync::sync::AtomicFlag;
use deno_unsync::sync::TaskQueue;
use parking_lot::Mutex;
use rustc_hash::FxHashSet;
pub use self::extra_info::CachedNpmPackageExtraInfoProvider;
pub use self::extra_info::ExpectedExtraInfo;
pub use self::extra_info::NpmPackageExtraInfoProvider;
use self::extra_info::NpmPackageExtraInfoProviderSys;
pub use self::factory::InstallReporter;
pub use self::factory::NpmInstallerFactory;
pub use self::factory::NpmInstallerFactoryOptions;
pub use self::factory::NpmInstallerFactorySys;
use self::global::GlobalNpmPackageInstaller;
use self::initializer::NpmResolutionInitializer;
use self::lifecycle_scripts::LifecycleScriptsExecutor;
use self::local::LocalNpmInstallSys;
use self::local::LocalNpmPackageInstaller;
pub use self::local::LocalSetupCache;
use self::package_json::NpmInstallDepsProvider;
use self::package_json::PackageJsonDepValueParseWithLocationError;
use self::resolution::AddPkgReqsResult;
use self::resolution::NpmResolutionInstaller;
use self::resolution::NpmResolutionInstallerSys;
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum PackageCaching<'a> {
Only(Cow<'a, [PackageReq]>),
All,
}
/// The set of npm packages that are allowed to run lifecycle scripts.
#[derive(Debug, Clone, Eq, PartialEq, Default)]
pub enum PackagesAllowedScripts {
All,
Some(Vec<PackageReq>),
#[default]
None,
}
/// Info needed to run NPM lifecycle scripts
#[derive(Clone, Debug, Eq, PartialEq, Default)]
pub struct LifecycleScriptsConfig {
pub allowed: PackagesAllowedScripts,
pub denied: Vec<PackageReq>,
pub initial_cwd: PathBuf,
pub root_dir: PathBuf,
/// Part of an explicit `deno install`
pub explicit_install: bool,
}
pub trait InstallProgressReporter:
std::fmt::Debug + Send + Sync + 'static
{
fn blocking(&self, message: &str);
fn initializing(&self, nv: &PackageNv);
fn initialized(&self, nv: &PackageNv);
fn scripts_not_run_warning(
&self,
warning: crate::lifecycle_scripts::LifecycleScriptsWarning,
);
fn deprecated_message(&self, message: String);
}
pub trait Reporter:
std::fmt::Debug + Send + Sync + 'static + dyn_clone::DynClone
{
type Guard;
type ClearGuard;
fn on_blocking(&self, message: &str) -> Self::Guard;
fn on_initializing(&self, message: &str) -> Self::Guard;
fn clear_guard(&self) -> Self::ClearGuard;
}
#[derive(Debug, Clone)]
pub struct LogReporter;
impl Reporter for LogReporter {
type Guard = ();
type ClearGuard = ();
fn on_blocking(&self, message: &str) -> Self::Guard {
log::info!("{} {}", deno_terminal::colors::cyan("Blocking"), message);
}
fn on_initializing(&self, message: &str) -> Self::Guard {
log::info!("{} {}", deno_terminal::colors::green("Initialize"), message);
}
fn clear_guard(&self) -> Self::ClearGuard {}
}
/// Part of the resolution that interacts with the file system.
#[async_trait::async_trait(?Send)]
pub(crate) trait NpmPackageFsInstaller:
std::fmt::Debug + Send + Sync
{
async fn cache_packages<'a>(
&self,
caching: PackageCaching<'a>,
) -> Result<(), JsErrorBox>;
}
#[sys_traits::auto_impl]
pub trait NpmInstallerSys:
NpmResolutionInstallerSys + LocalNpmInstallSys + NpmPackageExtraInfoProviderSys
{
}
pub struct NpmInstallerOptions<TSys: NpmInstallerSys> {
pub maybe_lockfile: Option<Arc<LockfileLock<TSys>>>,
pub maybe_node_modules_path: Option<PathBuf>,
pub lifecycle_scripts: Arc<LifecycleScriptsConfig>,
pub system_info: NpmSystemInfo,
pub workspace_link_packages: WorkspaceNpmLinkPackagesRc,
}
#[derive(Debug)]
pub struct NpmInstaller<
TNpmCacheHttpClient: NpmCacheHttpClient,
TSys: NpmInstallerSys,
> {
fs_installer: Arc<dyn NpmPackageFsInstaller>,
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
npm_resolution_initializer: Arc<NpmResolutionInitializer<TSys>>,
npm_resolution_installer:
Arc<NpmResolutionInstaller<TNpmCacheHttpClient, TSys>>,
maybe_lockfile: Option<Arc<LockfileLock<TSys>>>,
npm_resolution: Arc<NpmResolutionCell>,
top_level_install_flag: AtomicFlag,
install_queue: TaskQueue,
cached_reqs: Mutex<FxHashSet<PackageReq>>,
}
impl<TNpmCacheHttpClient: NpmCacheHttpClient, TSys: NpmInstallerSys>
NpmInstaller<TNpmCacheHttpClient, TSys>
{
#[allow(clippy::too_many_arguments)]
pub fn new<TReporter: Reporter>(
install_reporter: Option<Arc<dyn InstallReporter>>,
lifecycle_scripts_executor: Arc<dyn LifecycleScriptsExecutor>,
npm_cache: Arc<NpmCache<TSys>>,
npm_install_deps_provider: Arc<NpmInstallDepsProvider>,
npm_registry_info_provider: Arc<
dyn deno_npm::registry::NpmRegistryApi + Send + Sync,
>,
npm_resolution: Arc<NpmResolutionCell>,
npm_resolution_initializer: Arc<NpmResolutionInitializer<TSys>>,
npm_resolution_installer: Arc<
NpmResolutionInstaller<TNpmCacheHttpClient, TSys>,
>,
reporter: &TReporter,
sys: TSys,
tarball_cache: Arc<deno_npm_cache::TarballCache<TNpmCacheHttpClient, TSys>>,
options: NpmInstallerOptions<TSys>,
) -> Self {
let fs_installer: Arc<dyn NpmPackageFsInstaller> =
match options.maybe_node_modules_path {
Some(node_modules_folder) => Arc::new(LocalNpmPackageInstaller::new(
lifecycle_scripts_executor,
npm_cache.clone(),
Arc::new(NpmPackageExtraInfoProvider::new(
npm_registry_info_provider,
Arc::new(sys.clone()),
options.workspace_link_packages,
)),
npm_install_deps_provider.clone(),
dyn_clone::clone(reporter),
npm_resolution.clone(),
sys,
tarball_cache,
node_modules_folder,
options.lifecycle_scripts,
options.system_info,
install_reporter,
)),
None => Arc::new(GlobalNpmPackageInstaller::new(
npm_cache,
tarball_cache,
sys,
npm_resolution.clone(),
options.lifecycle_scripts,
options.system_info,
install_reporter,
)),
};
Self {
fs_installer,
npm_install_deps_provider,
npm_resolution,
npm_resolution_initializer,
npm_resolution_installer,
maybe_lockfile: options.maybe_lockfile,
top_level_install_flag: Default::default(),
install_queue: Default::default(),
cached_reqs: Default::default(),
}
}
/// Adds package requirements to the resolver and ensures everything is setup.
/// This includes setting up the `node_modules` directory, if applicable.
pub async fn add_and_cache_package_reqs(
&self,
packages: &[PackageReq],
) -> Result<(), JsErrorBox> {
self.npm_resolution_initializer.ensure_initialized().await?;
self
.add_package_reqs_raw(
packages,
Some(PackageCaching::Only(packages.into())),
)
.await
.dependencies_result
}
pub async fn add_package_reqs_no_cache(
&self,
packages: &[PackageReq],
) -> Result<(), JsErrorBox> {
self.npm_resolution_initializer.ensure_initialized().await?;
self
.add_package_reqs_raw(packages, None)
.await
.dependencies_result
}
pub async fn add_package_reqs(
&self,
packages: &[PackageReq],
caching: PackageCaching<'_>,
) -> Result<(), JsErrorBox> {
self.npm_resolution_initializer.ensure_initialized().await?;
self
.add_package_reqs_raw(packages, Some(caching))
.await
.dependencies_result
}
pub async fn add_package_reqs_raw(
&self,
packages: &[PackageReq],
caching: Option<PackageCaching<'_>>,
) -> AddPkgReqsResult {
if packages.is_empty() && !self.npm_resolution.is_pending() {
return AddPkgReqsResult {
dependencies_result: Ok(()),
results: vec![],
};
}
#[cfg(debug_assertions)]
self.npm_resolution_initializer.debug_assert_initialized();
let mut result = self
.npm_resolution_installer
.add_package_reqs(packages)
.await;
if result.dependencies_result.is_ok()
&& let Some(lockfile) = self.maybe_lockfile.as_ref()
{
result.dependencies_result = lockfile.error_if_changed();
}
if result.dependencies_result.is_ok()
&& let Some(caching) = caching
{
result.dependencies_result =
self.maybe_cache_packages(packages, caching).await;
}
result
}
async fn maybe_cache_packages(
&self,
packages: &[PackageReq],
caching: PackageCaching<'_>,
) -> Result<(), JsErrorBox> {
// the async mutex is unfortunate, but needed to handle the edge case where two workers
// try to cache the same package at the same time. we need to hold the lock while we cache
// and since that crosses an await point, we need the async mutex.
//
// should have a negligible perf impact because acquiring the lock is still in the order of nanoseconds
// while caching typically takes micro or milli seconds.
let _permit = self.install_queue.acquire().await;
let uncached = {
let cached_reqs = self.cached_reqs.lock();
packages
.iter()
.filter(|req| !cached_reqs.contains(req))
.collect::<Vec<_>>()
};
if uncached.is_empty() {
return Ok(());
}
let result = self.fs_installer.cache_packages(caching).await;
if result.is_ok() {
let mut cached_reqs = self.cached_reqs.lock();
for req in uncached {
cached_reqs.insert(req.clone());
}
}
result
}
pub async fn cache_package_info(
&self,
package_name: &str,
) -> Result<Arc<NpmPackageInfo>, NpmRegistryPackageInfoLoadError> {
self
.npm_resolution_installer
.cache_package_info(package_name)
.await
}
pub async fn cache_packages(
&self,
caching: PackageCaching<'_>,
) -> Result<(), JsErrorBox> {
if self.npm_resolution.is_pending() {
self.add_package_reqs(&[], caching).await
} else {
self.npm_resolution_initializer.ensure_initialized().await?;
self.fs_installer.cache_packages(caching).await
}
}
pub fn ensure_no_pkg_json_dep_errors(
&self,
) -> Result<(), Box<PackageJsonDepValueParseWithLocationError>> {
for err in self.npm_install_deps_provider.pkg_json_dep_errors() {
match err.source.as_kind() {
deno_package_json::PackageJsonDepValueParseErrorKind::VersionReq(_)
| deno_package_json::PackageJsonDepValueParseErrorKind::JsrVersionReq(
_,
) => {
return Err(Box::new(err.clone()));
}
deno_package_json::PackageJsonDepValueParseErrorKind::Unsupported {
scheme,
} if scheme == "jsr" => {
return Err(Box::new(err.clone()));
}
deno_package_json::PackageJsonDepValueParseErrorKind::Unsupported {
..
} => {
// only warn for this one
log::warn!(
"{} {}\n at {}",
colors::yellow("Warning"),
err.source,
err.location,
)
}
}
}
Ok(())
}
/// Ensures that the top level `package.json` dependencies are installed.
///
/// Returns `true` if the top level packages are already installed. A
/// return value of `false` means that new packages were added to the npm resolution.
pub async fn ensure_top_level_package_json_install(
&self,
) -> Result<bool, JsErrorBox> {
if !self.top_level_install_flag.raise() {
return Ok(true); // already did this
}
self.npm_resolution_initializer.ensure_initialized().await?;
let pkg_json_remote_pkgs = self.npm_install_deps_provider.remote_pkgs();
if pkg_json_remote_pkgs.is_empty() {
return Ok(true);
}
// check if something needs resolving before bothering to load all
// the package information (which is slow)
if pkg_json_remote_pkgs.iter().all(|pkg| {
self
.npm_resolution
.resolve_pkg_id_from_pkg_req(&pkg.req)
.is_ok()
}) {
log::debug!(
"All package.json deps resolvable. Skipping top level install."
);
return Ok(true); // everything is already resolvable
}
let pkg_reqs = pkg_json_remote_pkgs
.iter()
.map(|pkg| pkg.req.clone())
.collect::<Vec<_>>();
self.add_package_reqs_no_cache(&pkg_reqs).await?;
Ok(false)
}
/// Run a resolution install if the npm snapshot is in a pending state
/// due to a config file change.
pub async fn install_resolution_if_pending(&self) -> Result<(), JsErrorBox> {
self.npm_resolution_initializer.ensure_initialized().await?;
self
.npm_resolution_installer
.install_if_pending()
.await
.map_err(JsErrorBox::from_err)?;
Ok(())
}
}
| rust | MIT | 7222e85d435b977de1ab810db067b86f29e6444f | 2026-01-04T15:31:58.521149Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.