repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/test-project-compiler/src/lib.rs | test-project-compiler/src/lib.rs | #[cfg(test)]
mod generated_tests;
use camino::Utf8PathBuf;
use gleam_core::{
analyse::TargetSupport,
build::{Codegen, Compile, Mode, NullTelemetry, Options, ProjectCompiler, Telemetry},
config::PackageConfig,
io::{FileSystemReader, FileSystemWriter},
paths::ProjectPaths,
warning::VectorWarningEmitterIO,
};
use std::rc::Rc;
pub fn prepare(path: &str, mode: Mode) -> String {
let root = Utf8PathBuf::from(path).canonicalize_utf8().unwrap();
let filesystem = test_helpers_rs::to_in_memory_filesystem(&root);
let initial_files = filesystem.files();
let toml = std::fs::read_to_string(root.join("gleam.toml")).unwrap();
let config: PackageConfig = toml::from_str(&toml).unwrap();
let warnings = VectorWarningEmitterIO::default();
let telemetry: &'static dyn Telemetry = &NullTelemetry;
let options = Options {
mode,
target: None,
compile: Compile::All,
codegen: Codegen::All,
warnings_as_errors: false,
root_target_support: TargetSupport::Enforced,
no_print_progress: true,
};
let compiler = ProjectCompiler::new(
config,
options,
vec![],
telemetry,
Rc::new(warnings.clone()),
ProjectPaths::new(root),
filesystem.clone(),
);
compiler.compile().unwrap();
for path in initial_files {
if filesystem.is_file(&path) {
filesystem.delete_file(&path).unwrap();
}
}
let files = filesystem.into_contents();
let warnings = warnings.take();
test_helpers_rs::TestCompileOutput { files, warnings }.as_overview_text()
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/test-project-compiler/src/generated_tests.rs | test-project-compiler/src/generated_tests.rs | //! This file is generated by build.rs
//! Do not edit it directly, instead add new test cases to ./cases
use gleam_core::build::Mode;
#[rustfmt::skip]
#[test]
fn with_dep_dev() {
let output = crate::prepare("./cases/with_dep", Mode::Dev);
insta::assert_snapshot!(
"with_dep_dev",
output,
"./cases/with_dep",
);
}
#[rustfmt::skip]
#[test]
fn with_dep_prod() {
let output = crate::prepare("./cases/with_dep", Mode::Prod);
insta::assert_snapshot!(
"with_dep_prod",
output,
"./cases/with_dep",
);
}
#[rustfmt::skip]
#[test]
fn with_dep_lsp() {
let output = crate::prepare("./cases/with_dep", Mode::Lsp);
insta::assert_snapshot!(
"with_dep_lsp",
output,
"./cases/with_dep",
);
}
#[rustfmt::skip]
#[test]
fn with_dev_dep_dev() {
let output = crate::prepare("./cases/with_dev_dep", Mode::Dev);
insta::assert_snapshot!(
"with_dev_dep_dev",
output,
"./cases/with_dev_dep",
);
}
#[rustfmt::skip]
#[test]
fn with_dev_dep_prod() {
let output = crate::prepare("./cases/with_dev_dep", Mode::Prod);
insta::assert_snapshot!(
"with_dev_dep_prod",
output,
"./cases/with_dev_dep",
);
}
#[rustfmt::skip]
#[test]
fn with_dev_dep_lsp() {
let output = crate::prepare("./cases/with_dev_dep", Mode::Lsp);
insta::assert_snapshot!(
"with_dev_dep_lsp",
output,
"./cases/with_dev_dep",
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/build.rs | compiler-core/build.rs | fn main() {
// capnpc::CompilerCommand::new()
// .file("schema.capnp")
// .output_path("generated/")
// .run()
// .expect("compiling schema.capnp");
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/reference.rs | compiler-core/src/reference.rs | use std::collections::{HashMap, HashSet};
use crate::ast::{Publicity, SrcSpan};
use bimap::{BiMap, Overwritten};
use ecow::EcoString;
use petgraph::{
Directed, Direction,
stable_graph::{NodeIndex, StableGraph},
};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ReferenceKind {
Qualified,
Unqualified,
Import,
Definition,
Alias,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct Reference {
pub location: SrcSpan,
pub kind: ReferenceKind,
}
pub type ReferenceMap = HashMap<(EcoString, EcoString), Vec<Reference>>;
#[derive(Debug, Clone)]
pub struct EntityInformation {
pub origin: SrcSpan,
pub kind: EntityKind,
}
/// Information about an "Entity". This determines how we warn about an entity
/// being unused.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum EntityKind {
Function,
Constant,
Constructor,
Type,
ImportedModule { module_name: EcoString },
ModuleAlias { module: EcoString },
ImportedConstructor { module: EcoString },
ImportedType { module: EcoString },
ImportedValue { module: EcoString },
}
/// Like `ast::Layer`, this type differentiates between different scopes. For example,
/// there can be a `wibble` value, a `wibble` module and a `wibble` type in the same
/// scope all at once!
///
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
enum EntityLayer {
/// An entity which exists in the type layer: a custom type, type variable
/// or type alias.
Type,
/// An entity which exists in the value layer: a constant, function or
/// custom type variant constructor.
Value,
/// An entity which has been shadowed. This allows us to keep track of unused
/// imports even if they have been shadowed by another value in the current
/// module.
/// This extra variant is needed because we used `Entity` as a key in a hashmap,
/// and so a duplicate key would not be able to exist.
/// We also would not want to get this shadowed entity when performing a lookup
/// of a named entity; we only want it to register it as an entity in the
/// `unused` function.
Shadowed,
/// The name of an imported module. Modules are separate to values!
Module,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Entity {
pub name: EcoString,
layer: EntityLayer,
}
#[derive(Debug, Default)]
pub struct ReferenceTracker {
/// A call-graph which tracks which values are referenced by which other value,
/// used for dead code detection.
graph: StableGraph<(), (), Directed>,
entities: BiMap<Entity, NodeIndex>,
current_node: NodeIndex,
public_entities: HashSet<Entity>,
entity_information: HashMap<Entity, EntityInformation>,
/// The locations of the references to each value in this module, used for
/// renaming and go-to reference.
pub value_references: ReferenceMap,
/// The locations of the references to each type in this module, used for
/// renaming and go-to reference.
pub type_references: ReferenceMap,
/// This map is used to access the nodes of modules that were not
/// aliased, given their name.
/// We need this to keep track of references made to imports by unqualified
/// values/types: when an unqualified item is used we want to add an edge
/// pointing to the import it comes from, so that if the item is used the
/// import won't be marked as unused:
///
/// ```gleam
/// import wibble/wobble.{used}
///
/// pub fn main() {
/// used
/// }
/// ```
///
/// And each imported entity carries around the _name of the module_ and not
/// just the alias (here it would be `wibble/wobble` and not just `wobble`).
///
module_name_to_node: HashMap<EcoString, NodeIndex>,
}
impl ReferenceTracker {
pub fn new() -> Self {
Self::default()
}
fn get_or_create_node(&mut self, name: EcoString, layer: EntityLayer) -> NodeIndex {
let entity = Entity { name, layer };
match self.entities.get_by_left(&entity) {
Some(index) => *index,
None => {
let index = self.graph.add_node(());
_ = self.entities.insert(entity, index);
index
}
}
}
fn create_node(&mut self, name: EcoString, layer: EntityLayer) -> NodeIndex {
let entity = Entity { name, layer };
let index = self.graph.add_node(());
self.create_node_and_maybe_shadow(entity, index);
index
}
fn create_node_and_maybe_shadow(&mut self, entity: Entity, index: NodeIndex) {
match self.entities.insert(entity, index) {
Overwritten::Neither => {}
Overwritten::Left(mut entity, index)
| Overwritten::Right(mut entity, index)
| Overwritten::Pair(mut entity, index)
| Overwritten::Both((mut entity, index), _) => {
// If an entity with the same name as this already exists,
// we still need to keep track of its usage! Thought it cannot
// be referenced anymore, it still might have been used before this
// point, or need to be marked as unused.
// To do this, we keep track of a "Shadowed" entity in `entity_information`.
if let Some(information) = self.entity_information.get(&entity) {
entity.layer = EntityLayer::Shadowed;
_ = self
.entity_information
.insert(entity.clone(), information.clone());
_ = self.entities.insert(entity, index);
}
}
}
}
/// This function exists because of a specific edge-case where constants
/// can shadow imported values. For example:
/// ```gleam
/// import math.{pi}
///
/// pub const pi = pi
/// ```
/// Here, the new `pi` constant shadows the imported `pi` value, but it still
/// references it, so it should not be marked as unused.
/// In order for this to work, we must first set the `current_function` field
/// so that the `pi` value is referenced by the public `pi` constant.
/// However, we can't insert the `pi` constant into the name scope yet, since
/// then it would count as referencing itself. We first need to set `current_function`,
/// then once we have analysed the right-hand-side of the constant, we can
/// register it in the scope using `register_constant`.
///
pub fn begin_constant(&mut self) {
self.current_node = self.graph.add_node(());
}
pub fn register_constant(&mut self, name: EcoString, location: SrcSpan, publicity: Publicity) {
let entity = Entity {
name,
layer: EntityLayer::Value,
};
self.create_node_and_maybe_shadow(entity.clone(), self.current_node);
match publicity {
Publicity::Public | Publicity::Internal { .. } => {
let _ = self.public_entities.insert(entity.clone());
}
Publicity::Private => {}
}
_ = self.entity_information.insert(
entity,
EntityInformation {
kind: EntityKind::Constant,
origin: location,
},
);
}
pub fn register_value(
&mut self,
name: EcoString,
kind: EntityKind,
location: SrcSpan,
publicity: Publicity,
) {
self.current_node = self.create_node(name.clone(), EntityLayer::Value);
self.register_module_reference_from_imported_entity(&kind);
let entity = Entity {
name,
layer: EntityLayer::Value,
};
match publicity {
Publicity::Public | Publicity::Internal { .. } => {
let _ = self.public_entities.insert(entity.clone());
}
Publicity::Private => {}
}
_ = self.entity_information.insert(
entity,
EntityInformation {
kind,
origin: location,
},
);
}
pub fn set_current_node(&mut self, name: EcoString) {
self.current_node = self.get_or_create_node(name, EntityLayer::Value);
}
pub fn register_type(
&mut self,
name: EcoString,
kind: EntityKind,
location: SrcSpan,
publicity: Publicity,
) {
self.current_node = self.create_node(name.clone(), EntityLayer::Type);
self.register_module_reference_from_imported_entity(&kind);
let entity = Entity {
name,
layer: EntityLayer::Type,
};
match publicity {
Publicity::Public | Publicity::Internal { .. } => {
let _ = self.public_entities.insert(entity.clone());
}
Publicity::Private => {}
}
_ = self.entity_information.insert(
entity,
EntityInformation {
kind,
origin: location,
},
);
}
pub fn register_aliased_module(
&mut self,
used_name: EcoString,
module_name: EcoString,
alias_location: SrcSpan,
import_location: SrcSpan,
) {
// We first record a node for the module being aliased. We use its entire
// name to identify it in this case and keep track of the node it's
// associated with.
self.register_module(module_name.clone(), module_name.clone(), import_location);
// Then we create a node for the alias, as the alias itself might be
// unused!
self.current_node = self.create_node(used_name.clone(), EntityLayer::Module);
// Also we want to register the fact that if this alias is used then the
// import is used: so we add a reference from the alias to the import
// we've just added.
self.register_module_reference(module_name.clone());
// Finally we can add information for this alias:
let entity = Entity {
name: used_name,
layer: EntityLayer::Module,
};
_ = self.entity_information.insert(
entity,
EntityInformation {
kind: EntityKind::ModuleAlias {
module: module_name,
},
origin: alias_location,
},
);
}
pub fn register_module(
&mut self,
used_name: EcoString,
module_name: EcoString,
location: SrcSpan,
) {
self.current_node = self.create_node(used_name.clone(), EntityLayer::Module);
let _ = self
.module_name_to_node
.insert(module_name.clone(), self.current_node);
let entity = Entity {
name: used_name,
layer: EntityLayer::Module,
};
_ = self.entity_information.insert(
entity,
EntityInformation {
kind: EntityKind::ImportedModule { module_name },
origin: location,
},
);
}
fn register_module_reference_from_imported_entity(&mut self, entity_kind: &EntityKind) {
match entity_kind {
EntityKind::Function
| EntityKind::Constant
| EntityKind::Constructor
| EntityKind::Type
| EntityKind::ImportedModule { .. }
| EntityKind::ModuleAlias { .. } => (),
EntityKind::ImportedConstructor { module }
| EntityKind::ImportedType { module }
| EntityKind::ImportedValue { module } => {
self.register_module_reference(module.clone())
}
}
}
pub fn register_value_reference(
&mut self,
module: EcoString,
name: EcoString,
referenced_name: &EcoString,
location: SrcSpan,
kind: ReferenceKind,
) {
match kind {
ReferenceKind::Qualified | ReferenceKind::Import | ReferenceKind::Definition => {}
ReferenceKind::Alias | ReferenceKind::Unqualified => {
let target = self.get_or_create_node(referenced_name.clone(), EntityLayer::Value);
_ = self.graph.add_edge(self.current_node, target, ());
}
}
self.value_references
.entry((module, name))
.or_default()
.push(Reference { location, kind });
}
pub fn register_type_reference(
&mut self,
module: EcoString,
name: EcoString,
referenced_name: &EcoString,
location: SrcSpan,
kind: ReferenceKind,
) {
match kind {
ReferenceKind::Qualified | ReferenceKind::Import | ReferenceKind::Definition => {}
ReferenceKind::Alias | ReferenceKind::Unqualified => {
self.register_type_reference_in_call_graph(referenced_name.clone())
}
}
self.type_references
.entry((module, name))
.or_default()
.push(Reference { location, kind });
}
/// Like `register_type_reference`, but doesn't modify `self.type_references`.
/// This is used when we define a constructor for a custom type. The constructor
/// doesn't actually "reference" its type, but if the constructor is used, the
/// type should also be considered used. The best way to represent this relationship
/// is to make a connection between them in the call graph.
///
pub fn register_type_reference_in_call_graph(&mut self, name: EcoString) {
let target = self.get_or_create_node(name, EntityLayer::Type);
_ = self.graph.add_edge(self.current_node, target, ());
}
pub fn register_module_reference(&mut self, name: EcoString) {
let target = match self.module_name_to_node.get(&name) {
Some(target) => *target,
None => self.get_or_create_node(name, EntityLayer::Module),
};
_ = self.graph.add_edge(self.current_node, target, ());
}
pub fn unused(&self) -> HashMap<Entity, EntityInformation> {
let mut unused_values = HashMap::with_capacity(self.entities.len());
for (entity, information) in self.entity_information.iter() {
_ = unused_values.insert(entity.clone(), information.clone());
}
for entity in self.public_entities.iter() {
if let Some(index) = self.entities.get_by_left(entity) {
self.mark_entity_as_used(&mut unused_values, entity, *index);
}
}
for (entity, _) in self.entities.iter() {
let Some(index) = self.entities.get_by_left(entity) else {
continue;
};
if self.public_entities.contains(entity) {
self.mark_entity_as_used(&mut unused_values, entity, *index);
} else {
// If the entity is not public, we still want to mark referenced
// imports as used.
self.mark_referenced_imports_as_used(&mut unused_values, entity, *index);
}
}
unused_values
}
fn mark_entity_as_used(
&self,
unused: &mut HashMap<Entity, EntityInformation>,
entity: &Entity,
index: NodeIndex,
) {
if unused.remove(entity).is_some() {
for node in self.graph.neighbors_directed(index, Direction::Outgoing) {
if let Some(entity) = self.entities.get_by_right(&node) {
self.mark_entity_as_used(unused, entity, node);
}
}
}
}
fn mark_referenced_imports_as_used(
&self,
unused: &mut HashMap<Entity, EntityInformation>,
entity: &Entity,
index: NodeIndex,
) {
// If the entity is a module there's no way it can reference other
// modules so we just ignore it.
// This also means that module aliases do not count as using a module!
if entity.layer == EntityLayer::Module {
return;
}
for node in self.graph.neighbors_directed(index, Direction::Outgoing) {
// We only want to mark referenced modules as used, so if the node
// is not a module we just skip it.
let Some(
module @ Entity {
layer: EntityLayer::Module,
..
},
) = self.entities.get_by_right(&node)
else {
continue;
};
// If the value appears in the module import list, it doesn't count
// as using it!
let is_imported_type = self
.type_references
.contains_key(&(module.name.clone(), entity.name.clone()));
let is_imported_value = self
.value_references
.contains_key(&(module.name.clone(), entity.name.clone()));
let appears_in_module_import_list = is_imported_type || is_imported_value;
if !(appears_in_module_import_list) {
self.mark_entity_as_used(unused, module, node);
}
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/config.rs | compiler-core/src/config.rs | mod stale_package_remover;
use crate::error::{FileIoAction, FileKind};
use crate::io::FileSystemReader;
use crate::io::ordered_map;
use crate::manifest::Manifest;
use crate::requirement::Requirement;
use crate::version::COMPILER_VERSION;
use crate::{Error, Result};
use camino::{Utf8Path, Utf8PathBuf};
use ecow::EcoString;
use globset::{Glob, GlobSetBuilder};
use hexpm::version::{self, LowestVersion, Version};
use http::Uri;
use serde::ser::SerializeSeq;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt::{self};
use std::marker::PhantomData;
#[cfg(test)]
use crate::manifest::ManifestPackage;
use crate::build::{Mode, Runtime, Target};
fn default_version() -> Version {
Version::parse("0.1.0").expect("default version")
}
fn erlang_target() -> Target {
Target::Erlang
}
fn default_javascript_runtime() -> Runtime {
Runtime::NodeJs
}
pub type Dependencies = HashMap<EcoString, Requirement>;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct SpdxLicense {
pub licence: String,
}
impl ToString for SpdxLicense {
fn to_string(&self) -> String {
String::from(&self.licence)
}
}
impl<'de> Deserialize<'de> for SpdxLicense {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(SpdxLicenseVisitor)
}
}
struct SpdxLicenseVisitor;
impl<'de> serde::de::Visitor<'de> for SpdxLicenseVisitor {
type Value = SpdxLicense;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a SPDX License ID")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match spdx::license_id(value) {
None => Err(serde::de::Error::custom(format!(
"{value} is not a valid SPDX License ID"
))),
Some(_) => Ok(SpdxLicense {
licence: value.to_string(),
}),
}
}
}
impl Serialize for SpdxLicense {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&self.licence)
}
}
impl AsRef<str> for SpdxLicense {
fn as_ref(&self) -> &str {
self.licence.as_str()
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct GleamVersion(version::Range);
impl From<version::Range> for GleamVersion {
fn from(range: version::Range) -> Self {
Self(range)
}
}
impl From<GleamVersion> for version::Range {
fn from(gleam_version: GleamVersion) -> Self {
gleam_version.0
}
}
impl From<GleamVersion> for pubgrub::Range<Version> {
fn from(gleam_version: GleamVersion) -> Self {
gleam_version.0.into()
}
}
impl GleamVersion {
pub fn from_pubgrub(range: pubgrub::Range<Version>) -> Self {
let range: version::Range = range.into();
range.into()
}
pub fn as_pubgrub(&self) -> &pubgrub::Range<Version> {
self.0.to_pubgrub()
}
pub fn new(spec: String) -> Result<GleamVersion> {
let hex =
version::Range::new(spec.to_string()).map_err(|e| Error::InvalidVersionFormat {
input: spec,
error: e.to_string(),
})?;
Ok(hex.into())
}
pub fn lowest_version(&self) -> Option<Version> {
self.as_pubgrub().lowest_version()
}
pub fn hex(&self) -> &version::Range {
&self.0
}
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Clone)]
pub struct PackageConfig {
#[serde(deserialize_with = "package_name::deserialize")]
pub name: EcoString,
#[serde(default = "default_version")]
pub version: Version,
#[serde(
default,
rename = "gleam",
deserialize_with = "deserialise_gleam_version",
serialize_with = "serialise_gleam_version"
)]
pub gleam_version: Option<GleamVersion>,
#[serde(default, alias = "licenses")]
pub licences: Vec<SpdxLicense>,
#[serde(default)]
pub description: EcoString,
#[serde(default, alias = "docs")]
pub documentation: Docs,
#[serde(default, serialize_with = "ordered_map")]
pub dependencies: Dependencies,
#[serde(default, rename = "dev-dependencies", serialize_with = "ordered_map")]
pub dev_dependencies: Dependencies,
#[serde(default)]
pub repository: Option<Repository>,
#[serde(default)]
pub links: Vec<Link>,
#[serde(default)]
pub erlang: ErlangConfig,
#[serde(default)]
pub javascript: JavaScriptConfig,
#[serde(default = "erlang_target")]
pub target: Target,
#[serde(default)]
pub internal_modules: Option<Vec<Glob>>,
}
pub fn serialise_gleam_version<S>(
gleam_gersion: &Option<GleamVersion>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match gleam_gersion {
Some(version) => serializer.serialize_str(&version.hex().to_string()),
None => serializer.serialize_none(),
}
}
pub fn deserialise_gleam_version<'de, D>(deserialiser: D) -> Result<Option<GleamVersion>, D::Error>
where
D: serde::Deserializer<'de>,
{
match Deserialize::deserialize(deserialiser)? {
Some(range_string) => {
let hex = version::Range::new(range_string).map_err(serde::de::Error::custom)?;
Ok(Some(hex.into()))
}
None => Ok(None),
}
}
impl PackageConfig {
pub fn dependencies_for(&self, mode: Mode) -> Result<Dependencies> {
match mode {
Mode::Dev | Mode::Lsp => self.all_direct_dependencies(),
Mode::Prod => Ok(self.dependencies.clone()),
}
}
// Return all the dependencies listed in the configuration, that is, all the
// direct dependencies, both in the `dependencies` and `dev-dependencies`.
pub fn all_direct_dependencies(&self) -> Result<Dependencies> {
let mut deps =
HashMap::with_capacity(self.dependencies.len() + self.dev_dependencies.len());
for (name, requirement) in self.dependencies.iter().chain(&self.dev_dependencies) {
let already_inserted = deps.insert(name.clone(), requirement.clone()).is_some();
if already_inserted {
return Err(Error::DuplicateDependency(name.clone()));
}
}
Ok(deps)
}
pub fn read<FS: FileSystemReader, P: AsRef<Utf8Path>>(
path: P,
fs: &FS,
) -> Result<PackageConfig, Error> {
let toml = fs.read(path.as_ref())?;
deserialise_config(path, toml)
}
/// Get the locked packages for the current config and a given (optional)
/// manifest of previously locked packages.
///
/// If a package is removed or the specified required version range for it
/// changes then it is not considered locked. This also goes for any child
/// packages of the package which have no other parents.
///
/// This function should be used each time resolution is performed so that
/// outdated deps are removed from the manifest and not locked to the
/// previously selected versions.
///
pub fn locked(&self, manifest: Option<&Manifest>) -> Result<HashMap<EcoString, Version>> {
match manifest {
None => Ok(HashMap::new()),
Some(manifest) => {
let requirements = self.all_direct_dependencies()?;
let fresh_and_locked = stale_package_remover::StalePackageRemover::fresh_and_locked(
&requirements,
manifest,
);
Ok(fresh_and_locked)
}
}
}
/// Determines whether the given module should be hidden in the docs or not
///
/// The developer can specify a list of glob patterns in the gleam.toml file
/// to determine modules that should not be shown in the package's documentation
pub fn is_internal_module(&self, module: &str) -> bool {
let package = &self.name;
match &self.internal_modules {
Some(globs) => {
let mut builder = GlobSetBuilder::new();
for glob in globs {
_ = builder.add(glob.clone());
}
builder.build()
}
// If no patterns were specified in the config then we use a default value
None => GlobSetBuilder::new()
.add(Glob::new(&format!("{package}/internal")).expect("internal module glob"))
.add(Glob::new(&format!("{package}/internal/*")).expect("internal module glob"))
.build(),
}
.expect("internal module globs")
.is_match(module)
}
// Checks to see if the gleam version specified in the config is compatible
// with the current compiler version
pub fn check_gleam_compatibility(&self) -> Result<(), Error> {
if let Some(version) = &self.gleam_version {
let range = version.as_pubgrub();
let compiler_version =
Version::parse(COMPILER_VERSION).expect("Parse compiler semantic version");
// We ignore the pre-release and build metadata when checking compatibility
let mut version_without_pre = compiler_version.clone();
version_without_pre.pre = vec![];
version_without_pre.build = None;
if !range.contains(&version_without_pre) {
return Err(Error::IncompatibleCompilerVersion {
package: self.name.to_string(),
required_version: range.to_string(),
gleam_version: COMPILER_VERSION.to_string(),
});
}
}
Ok(())
}
pub fn tag_for_version(&self, version: &Version) -> String {
let prefix = match self.repository.as_ref() {
Some(
Repository::GitHub { tag_prefix, .. }
| Repository::GitLab { tag_prefix, .. }
| Repository::BitBucket { tag_prefix, .. }
| Repository::Codeberg { tag_prefix, .. }
| Repository::SourceHut { tag_prefix, .. }
| Repository::Gitea { tag_prefix, .. }
| Repository::Forgejo { tag_prefix, .. }
| Repository::Tangled { tag_prefix, .. },
) => tag_prefix.as_ref(),
Some(Repository::Custom { .. }) | None => None,
};
match prefix {
Some(prefix) => format!("{prefix}v{version}"),
None => format!("v{version}"),
}
}
}
fn deserialise_config<P: AsRef<Utf8Path>>(
path: P,
toml: String,
) -> std::result::Result<PackageConfig, Error> {
let config: PackageConfig = toml::from_str(&toml).map_err(|e| Error::FileIo {
action: FileIoAction::Parse,
kind: FileKind::File,
path: path.as_ref().to_path_buf(),
err: Some(e.to_string()),
})?;
Ok(config)
}
// https://github.com/gleam-lang/gleam/issues/4867
#[test]
fn deny_extra_deps_properties() {
let toml = r#"
name = "wibble"
version = "1.0.0"
[dependencies]
aide_generator = { git = "git@github.com:crowdhailer/aide.git", ref = "f559c5bc", extra = "idk what this is" }
"#;
let error = deserialise_config("gleam.toml", toml.into())
.expect_err("should fail to deserialise because of additional path");
insta::assert_snapshot!(insta::internals::AutoName, error.pretty_string());
}
#[test]
fn locked_no_manifest() {
let mut config = PackageConfig::default();
config.dependencies = [
("prod1".into(), Requirement::hex("~> 1.0").unwrap()),
("prod2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into();
config.dev_dependencies = [
("dev1".into(), Requirement::hex("~> 1.0").unwrap()),
("dev2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into();
assert_eq!(config.locked(None).unwrap(), [].into());
}
#[test]
fn locked_no_changes() {
let mut config = PackageConfig::default();
config.dependencies = [
("prod1".into(), Requirement::hex("~> 1.0").unwrap()),
("prod2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into();
config.dev_dependencies = [
("dev1".into(), Requirement::hex("~> 1.0").unwrap()),
("dev2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into();
let manifest = Manifest {
requirements: config.all_direct_dependencies().unwrap(),
packages: vec![
manifest_package("prod1", "1.1.0", &[]),
manifest_package("prod2", "1.2.0", &[]),
manifest_package("dev1", "1.1.0", &[]),
manifest_package("dev2", "1.2.0", &[]),
],
};
assert_eq!(
config.locked(Some(&manifest)).unwrap(),
[
locked_version("prod1", "1.1.0"),
locked_version("prod2", "1.2.0"),
locked_version("dev1", "1.1.0"),
locked_version("dev2", "1.2.0"),
]
.into()
);
}
#[test]
fn locked_some_removed() {
let mut config = PackageConfig::default();
config.dependencies = [("prod1".into(), Requirement::hex("~> 1.0").unwrap())].into();
config.dev_dependencies = [("dev2".into(), Requirement::hex("~> 2.0").unwrap())].into();
let manifest = Manifest {
requirements: config.all_direct_dependencies().unwrap(),
packages: vec![
manifest_package("prod1", "1.1.0", &[]),
manifest_package("prod2", "1.2.0", &[]), // Not in config
manifest_package("dev1", "1.1.0", &[]), // Not in config
manifest_package("dev2", "1.2.0", &[]),
],
};
assert_eq!(
config.locked(Some(&manifest)).unwrap(),
[
// prod2 removed
// dev1 removed
locked_version("prod1", "1.1.0"),
locked_version("dev2", "1.2.0"),
]
.into()
);
}
#[test]
fn locked_some_changed() {
let mut config = PackageConfig::default();
config.dependencies = [
("prod1".into(), Requirement::hex("~> 3.0").unwrap()), // Does not match manifest
("prod2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into();
config.dev_dependencies = [
("dev1".into(), Requirement::hex("~> 3.0").unwrap()), // Does not match manifest
("dev2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into();
let manifest = Manifest {
requirements: [
("prod1".into(), Requirement::hex("~> 1.0").unwrap()),
("prod2".into(), Requirement::hex("~> 2.0").unwrap()),
("dev1".into(), Requirement::hex("~> 1.0").unwrap()),
("dev2".into(), Requirement::hex("~> 2.0").unwrap()),
]
.into(),
packages: vec![
manifest_package("prod1", "1.1.0", &[]),
manifest_package("prod2", "1.2.0", &[]),
manifest_package("dev1", "1.1.0", &[]),
manifest_package("dev2", "1.2.0", &[]),
],
};
assert_eq!(
config.locked(Some(&manifest)).unwrap(),
[
// prod1 removed
// dev1 removed
locked_version("prod2", "1.2.0"),
locked_version("dev2", "1.2.0"),
]
.into()
);
}
#[test]
fn locked_nested_are_removed_too() {
let mut config = PackageConfig::default();
config.dependencies = [
("1".into(), Requirement::hex("~> 2.0").unwrap()), // Does not match manifest
("2".into(), Requirement::hex("~> 1.0").unwrap()),
]
.into();
config.dev_dependencies = [].into();
let manifest = Manifest {
requirements: [
("1".into(), Requirement::hex("~> 1.0").unwrap()),
("2".into(), Requirement::hex("~> 1.0").unwrap()),
]
.into(),
packages: vec![
manifest_package("1", "1.1.0", &["1.1", "1.2"]),
manifest_package("1.1", "1.1.0", &["1.1.1", "1.1.2"]),
manifest_package("1.1.1", "1.1.0", &["shared"]),
manifest_package("1.1.2", "1.1.0", &[]),
manifest_package("1.2", "1.1.0", &["1.2.1", "1.2.2"]),
manifest_package("1.2.1", "1.1.0", &[]),
manifest_package("1.2.2", "1.1.0", &[]),
manifest_package("2", "2.1.0", &["2.1", "2.2"]),
manifest_package("2.1", "2.1.0", &["2.1.1", "2.1.2"]),
manifest_package("2.1.1", "2.1.0", &[]),
manifest_package("2.1.2", "2.1.0", &[]),
manifest_package("2.2", "2.1.0", &["2.2.1", "2.2.2", "shared"]),
manifest_package("2.2.1", "2.1.0", &[]),
manifest_package("2.2.2", "2.1.0", &[]),
manifest_package("shared", "2.1.0", &[]),
],
};
assert_eq!(
config.locked(Some(&manifest)).unwrap(),
[
// 1* removed
locked_version("2", "2.1.0"),
locked_version("2.1", "2.1.0"),
locked_version("2.1.1", "2.1.0"),
locked_version("2.1.2", "2.1.0"),
locked_version("2.2", "2.1.0"),
locked_version("2.2.1", "2.1.0"),
locked_version("2.2.2", "2.1.0"),
locked_version("shared", "2.1.0"),
]
.into()
);
}
// https://github.com/gleam-lang/gleam/issues/1754
#[test]
fn locked_unlock_new() {
let mut config = PackageConfig::default();
config.dependencies = [
("1".into(), Requirement::hex("~> 1.0").unwrap()),
("2".into(), Requirement::hex("~> 1.0").unwrap()),
("3".into(), Requirement::hex("~> 3.0").unwrap()), // Does not match manifest
]
.into();
config.dev_dependencies = [].into();
let manifest = Manifest {
requirements: [
("1".into(), Requirement::hex("~> 1.0").unwrap()),
("2".into(), Requirement::hex("~> 1.0").unwrap()),
]
.into(),
packages: vec![
manifest_package("1", "1.1.0", &["3"]),
manifest_package("2", "1.1.0", &["3"]),
manifest_package("3", "1.1.0", &[]),
],
};
assert_eq!(
config.locked(Some(&manifest)).unwrap(),
[locked_version("1", "1.1.0"), locked_version("2", "1.1.0"),].into()
)
}
#[test]
fn default_internal_modules() {
// When no internal modules are specified then we default to
// `["$package/internal", "$package/internal/*"]`
let mut config = PackageConfig::default();
config.name = "my_package".into();
config.internal_modules = None;
assert!(config.is_internal_module("my_package/internal"));
assert!(config.is_internal_module("my_package/internal/wibble"));
assert!(config.is_internal_module("my_package/internal/wibble/wobble"));
assert!(!config.is_internal_module("my_package/internallll"));
assert!(!config.is_internal_module("my_package/other"));
assert!(!config.is_internal_module("my_package/other/wibble"));
assert!(!config.is_internal_module("other/internal"));
}
#[test]
fn no_internal_modules() {
// When no internal modules are specified then we default to
// `["$package/internal", "$package/internal/*"]`
let mut config = PackageConfig::default();
config.name = "my_package".into();
config.internal_modules = Some(vec![]);
assert!(!config.is_internal_module("my_package/internal"));
assert!(!config.is_internal_module("my_package/internal/wibble"));
assert!(!config.is_internal_module("my_package/internal/wibble/wobble"));
assert!(!config.is_internal_module("my_package/internallll"));
assert!(!config.is_internal_module("my_package/other"));
assert!(!config.is_internal_module("my_package/other/wibble"));
assert!(!config.is_internal_module("other/internal"));
}
#[test]
fn hidden_a_directory_from_docs() {
let mut config = PackageConfig::default();
config.internal_modules = Some(vec![Glob::new("package/internal/*").expect("")]);
let mod1 = "package/internal";
let mod2 = "package/internal/module";
assert_eq!(config.is_internal_module(mod1), false);
assert_eq!(config.is_internal_module(mod2), true);
}
#[test]
fn hidden_two_directories_from_docs() {
let mut config = PackageConfig::default();
config.internal_modules = Some(vec![
Glob::new("package/internal1/*").expect(""),
Glob::new("package/internal2/*").expect(""),
]);
let mod1 = "package/internal1";
let mod2 = "package/internal1/module";
let mod3 = "package/internal2";
let mod4 = "package/internal2/module";
assert_eq!(config.is_internal_module(mod1), false);
assert_eq!(config.is_internal_module(mod2), true);
assert_eq!(config.is_internal_module(mod3), false);
assert_eq!(config.is_internal_module(mod4), true);
}
#[test]
fn hidden_a_directory_and_a_file_from_docs() {
let mut config = PackageConfig::default();
config.internal_modules = Some(vec![
Glob::new("package/internal1/*").expect(""),
Glob::new("package/module").expect(""),
]);
let mod1 = "package/internal1";
let mod2 = "package/internal1/module";
let mod3 = "package/module";
let mod4 = "package/module/inner";
assert_eq!(config.is_internal_module(mod1), false);
assert_eq!(config.is_internal_module(mod2), true);
assert_eq!(config.is_internal_module(mod3), true);
assert_eq!(config.is_internal_module(mod4), false);
}
#[test]
fn hidden_a_file_in_all_directories_from_docs() {
let mut config = PackageConfig::default();
config.internal_modules = Some(vec![Glob::new("package/*/module1").expect("")]);
let mod1 = "package/internal1/module1";
let mod2 = "package/internal2/module1";
let mod3 = "package/internal2/module2";
let mod4 = "package/module";
assert_eq!(config.is_internal_module(mod1), true);
assert_eq!(config.is_internal_module(mod2), true);
assert_eq!(config.is_internal_module(mod3), false);
assert_eq!(config.is_internal_module(mod4), false);
}
#[cfg(test)]
fn manifest_package(
name: &'static str,
version: &'static str,
requirements: &'static [&'static str],
) -> ManifestPackage {
use crate::manifest::Base16Checksum;
ManifestPackage {
name: name.into(),
version: Version::parse(version).unwrap(),
build_tools: vec![],
otp_app: None,
requirements: requirements
.iter()
.map(|requirement| (*requirement).into())
.collect(),
source: crate::manifest::ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![]),
},
}
}
#[cfg(test)]
fn locked_version(name: &'static str, version: &'static str) -> (EcoString, Version) {
(name.into(), Version::parse(version).unwrap())
}
impl Default for PackageConfig {
fn default() -> Self {
Self {
name: Default::default(),
version: default_version(),
gleam_version: Default::default(),
description: Default::default(),
documentation: Default::default(),
dependencies: Default::default(),
erlang: Default::default(),
javascript: Default::default(),
repository: Default::default(),
dev_dependencies: Default::default(),
licences: Default::default(),
links: Default::default(),
internal_modules: Default::default(),
target: Target::Erlang,
}
}
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Default, Clone)]
pub struct ErlangConfig {
/// An module that can be set in the `.app` file as the entrypoint for a stateful application
/// that defines a singleton supervision tree.
/// Erlang syntax.
#[serde(default)]
pub application_start_module: Option<EcoString>,
/// The argument for the start module start function. If not set then `[]` is used as the
/// default argument.
/// Erlang syntax.
#[serde(default)]
pub application_start_argument: Option<EcoString>,
#[serde(default)]
pub extra_applications: Vec<EcoString>,
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Default, Clone)]
pub struct JavaScriptConfig {
#[serde(default)]
pub typescript_declarations: bool,
#[serde(default = "default_javascript_runtime")]
pub runtime: Runtime,
#[serde(default, rename = "deno")]
pub deno: DenoConfig,
}
#[derive(Deserialize, Debug, PartialEq, Eq, Clone)]
pub enum DenoFlag {
AllowAll,
Allow(Vec<String>),
}
impl Default for DenoFlag {
fn default() -> Self {
Self::Allow(Vec::new())
}
}
impl Serialize for DenoFlag {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
match self {
DenoFlag::AllowAll => serializer.serialize_bool(true),
DenoFlag::Allow(items) => {
let mut seq = serializer.serialize_seq(Some(items.len()))?;
for e in items {
seq.serialize_element(e)?;
}
seq.end()
}
}
}
}
fn bool_or_seq_string_to_deno_flag<'de, D>(deserializer: D) -> Result<DenoFlag, D::Error>
where
D: serde::Deserializer<'de>,
{
struct StringOrVec(PhantomData<Vec<String>>);
impl<'de> serde::de::Visitor<'de> for StringOrVec {
type Value = DenoFlag;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("bool or list of strings")
}
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value {
Ok(DenoFlag::AllowAll)
} else {
Ok(DenoFlag::default())
}
}
fn visit_seq<S>(self, visitor: S) -> Result<Self::Value, S::Error>
where
S: serde::de::SeqAccess<'de>,
{
let allow: Vec<String> =
Deserialize::deserialize(serde::de::value::SeqAccessDeserializer::new(visitor))
.unwrap_or_default();
Ok(DenoFlag::Allow(allow))
}
}
deserializer.deserialize_any(StringOrVec(PhantomData))
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Default, Clone)]
pub struct DenoConfig {
#[serde(default, deserialize_with = "bool_or_seq_string_to_deno_flag")]
pub allow_env: DenoFlag,
#[serde(default)]
pub allow_sys: bool,
#[serde(default)]
pub allow_hrtime: bool,
#[serde(default, deserialize_with = "bool_or_seq_string_to_deno_flag")]
pub allow_net: DenoFlag,
#[serde(default)]
pub allow_ffi: bool,
#[serde(default, deserialize_with = "bool_or_seq_string_to_deno_flag")]
pub allow_read: DenoFlag,
#[serde(default, deserialize_with = "bool_or_seq_string_to_deno_flag")]
pub allow_run: DenoFlag,
#[serde(default, deserialize_with = "bool_or_seq_string_to_deno_flag")]
pub allow_write: DenoFlag,
#[serde(default)]
pub allow_all: bool,
#[serde(default)]
pub unstable: bool,
#[serde(
default,
serialize_with = "uri_serde::serialize_option",
deserialize_with = "uri_serde::deserialize_option"
)]
pub location: Option<Uri>,
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)]
#[serde(tag = "type")]
pub enum Repository {
#[serde(rename = "github")]
GitHub {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
#[serde(rename = "gitlab")]
GitLab {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
#[serde(rename = "bitbucket")]
BitBucket {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
#[serde(rename = "codeberg")]
Codeberg {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
#[serde(rename = "gitea")]
Gitea {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
#[serde(
serialize_with = "uri_serde::serialize",
deserialize_with = "uri_serde_default_https::deserialize"
)]
host: Uri,
},
#[serde(rename = "forgejo")]
Forgejo {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
#[serde(
serialize_with = "uri_serde::serialize",
deserialize_with = "uri_serde_default_https::deserialize"
)]
host: Uri,
},
#[serde(rename = "sourcehut")]
SourceHut {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
#[serde(rename = "tangled")]
Tangled {
user: String,
repo: String,
path: Option<String>,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
#[serde(rename = "custom")]
Custom {
url: String,
#[serde(rename = "tag-prefix")]
tag_prefix: Option<String>,
},
}
impl Repository {
pub fn url(&self) -> String {
match self {
Repository::GitHub { repo, user, .. } => {
format!("https://github.com/{user}/{repo}")
}
Repository::GitLab { repo, user, .. } => {
format!("https://gitlab.com/{user}/{repo}")
}
Repository::BitBucket { repo, user, .. } => {
format!("https://bitbucket.com/{user}/{repo}")
}
Repository::Codeberg { repo, user, .. } => {
format!("https://codeberg.org/{user}/{repo}")
}
Repository::SourceHut { repo, user, .. } => {
format!("https://git.sr.ht/~{user}/{repo}")
}
Repository::Tangled { repo, user, .. } => {
format!("https://tangled.sh/{user}/{repo}")
}
Repository::Gitea {
repo, user, host, ..
}
| Repository::Forgejo {
repo, user, host, ..
} => {
let string_host = host.to_string();
let cleaned_host = string_host.trim_end_matches('/');
format!("{cleaned_host}/{user}/{repo}")
}
Repository::Custom { url, .. } => url.clone(),
}
}
pub fn path(&self) -> Option<&String> {
match self {
Repository::GitHub { path, .. }
| Repository::GitLab { path, .. }
| Repository::BitBucket { path, .. }
| Repository::Codeberg { path, .. }
| Repository::SourceHut { path, .. }
| Repository::Tangled { path, .. }
| Repository::Gitea { path, .. }
| Repository::Forgejo { path, .. } => path.as_ref(),
Repository::Custom { .. } => None,
}
}
}
#[derive(Deserialize, Serialize, Default, Debug, PartialEq, Eq, Clone)]
pub struct Docs {
#[serde(default)]
pub pages: Vec<DocsPage>,
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)]
pub struct DocsPage {
pub title: String,
pub path: String,
pub source: Utf8PathBuf,
}
#[derive(Deserialize, Serialize, Debug, PartialEq, Eq, Clone)]
pub struct Link {
pub title: String,
#[serde(with = "uri_serde")]
pub href: Uri,
}
// Note we don't use http-serde since we also want to validate the scheme and host is set.
mod uri_serde {
use http::uri::InvalidUri;
use serde::{Deserialize, Deserializer, de::Error as _};
pub fn deserialize<'de, D>(deserializer: D) -> Result<http::Uri, D::Error>
where
D: Deserializer<'de>,
{
let string = String::deserialize(deserializer)?;
let uri: http::Uri = string
.parse()
.map_err(|err: InvalidUri| D::Error::custom(err.to_string()))?;
if uri.scheme().is_none() || uri.host().is_none() {
return Err(D::Error::custom("uri without scheme"));
}
Ok(uri)
}
pub fn deserialize_option<'de, D>(deserializer: D) -> Result<Option<http::Uri>, D::Error>
where
D: Deserializer<'de>,
{
let string: Option<String> = Option::deserialize(deserializer)?;
match string {
Some(s) => {
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/exhaustiveness.rs | compiler-core/src/exhaustiveness.rs | //! An implementation of the algorithm described in:
//!
//! - How to compile pattern matching, Jules Jacobs.
//! <https://julesjacobs.com/notes/patternmatching/patternmatching.pdf>
//!
//! - Efficient manipulation of binary data using pattern matching,
//! Per Gustafsson and Konstantinos Sagonas.
//! <https://user.it.uu.se/~kostis/Papers/JFP_06.pdf>
//!
//! - Compiling Pattern Matching to good Decision Trees, Luc Maranget.
//! <https://www.cs.tufts.edu/~nr/cs257/archive/luc-maranget/jun08.pdf>
//!
//! The first implementation of the decision tree was adapted from Yorick
//! Peterse's implementation at
//! <https://github.com/yorickpeterse/pattern-matching-in-rust>.
//! Thank you Yorick!
//!
//! > This module comment (and all the following doc comments) are a rough
//! > explanation. It's great to set some expectations on what to expect from
//! > the following code and why the data looks the way it does.
//! > If you want a more detailed explanation, the original paper is a lot more
//! > detailed!
//!
//! A case to be compiled looks a bit different from the case expressions we're
//! used to in Gleam: instead of having a variable to match on and a series of
//! branches, a `CaseToCompile` is made up of a series of branches that can each
//! contain multiple pattern checks. With a psedo-Gleam syntax, this is what it
//! would look like:
//!
//! ```txt
//! case {
//! a is Some, b is 1, c is _ -> todo
//! a is wibble -> todo
//! }
//! ```
//!
//! > You may wonder, why are we writing branches like this? Usually a case
//! > expression matches on a single variable and each branch refers to it. For
//! > example in gleam you'd write:
//! >
//! > ```gleam
//! > case a {
//! > Some(_) -> todo
//! > None -> todo
//! > }
//! > ```
//! >
//! > In out representation that would turn into:
//! >
//! > ```txt
//! > case {
//! > a is Some(_) -> todo
//! > a is None -> todo
//! > }
//! > ```
//! >
//! > This change makes it way easier to compile the pattern matching into a
//! > decision tree, because now we can add multiple checks on different
//! > variables in each branch.
//!
//! Starting from this data structure, we'll be splitting all the branches into
//! a decision tree that can be used to perform exhaustiveness checking and code
//! generation.
//!
mod missing_patterns;
pub mod printer;
use crate::{
ast::{
self, AssignName, BitArraySize, Endianness, IntOperator, SrcSpan, TypedBitArraySize,
TypedClause, TypedPattern, TypedPatternBitArraySegment,
},
parse::LiteralFloatValue,
strings::{
convert_string_escape_chars, length_utf16, length_utf32, string_to_utf16_bytes,
string_to_utf32_bytes,
},
type_::{
Environment, Opaque, Type, TypeValueConstructor, TypeValueConstructorField, TypeVar,
TypeVariantConstructors, collapse_links, error::UnreachablePatternReason,
is_prelude_module, string,
},
};
use bitvec::{order::Msb0, slice::BitSlice, vec::BitVec, view::BitView};
use ecow::EcoString;
use id_arena::{Arena, Id};
use itertools::Itertools;
use num_bigint::{BigInt, Sign};
use num_traits::ToPrimitive;
use radix_trie::{Trie, TrieCommon};
use std::{
cell::RefCell,
cmp::Ordering,
collections::{HashMap, HashSet, VecDeque},
hash::Hash,
sync::Arc,
};
/// A single branch composing a `case` expression to be compiled into a decision
/// tree.
///
/// As shown in the module documentation, branches are a bit different from the
/// usual branches we see in Gleam's case expressions. Each branch can perform
/// multiple checks (each on a different variable, which appears in the check
/// itself!):
///
/// ```txt
/// a is Some, b is 1 if condition -> todo
/// ─┬─────── ─┬──── ─┬────────── ─┬──
/// │ │ │ ╰── body: an arbitrary expression
/// │ │ ╰── guard: an additional boolean condition
/// ╰──────────┴── checks: check that a variable matches with a pattern
/// ─┬────────────────────────────────────
/// ╰── branch: one of the branches making up a pattern matching expression
/// ```
///
/// As shown here a branch can also optionally include a guard with a boolean
/// condition and is followed by a body that is to be executed if all the checks
/// match (and the guard evaluates to true).
///
#[derive(Clone, Eq, PartialEq, Debug)]
struct Branch {
/// Each branch is identified by a numeric index, so we can nicely
/// report errors once we find something's wrong with a branch.
///
clause_index: usize,
/// Each alternative pattern in an alternative pattern matching (e.g.
/// `one | two | three -> todo`) gets turned into its own branch in this
/// internal representation. So we also keep track of the index of the
/// alternative this comes from (0 being the first one and so on...)
///
alternative_index: usize,
checks: Vec<PatternCheck>,
guard: Option<usize>,
body: Body,
}
impl Branch {
fn new(
clause_index: usize,
alternative_index: usize,
checks: Vec<PatternCheck>,
has_guard: bool,
) -> Self {
Self {
clause_index,
alternative_index,
checks,
guard: if has_guard { Some(clause_index) } else { None },
body: Body::new(clause_index),
}
}
/// Removes and returns a `PatternCheck` on the given variable from this
/// branch.
///
fn pop_check_on_var(&mut self, var: &Variable) -> Option<PatternCheck> {
let index = self.checks.iter().position(|check| check.var == *var)?;
Some(self.checks.remove(index))
}
fn add_check(&mut self, check: PatternCheck) {
self.checks.push(check);
}
/// To simplify compiling the pattern we can get rid of all catch-all
/// patterns that are guaranteed to match by turning those into assignments.
///
/// What does this look like in practice? Let's go over an example.
/// Let's say we have this case to compile:
///
/// ```gleam
/// case a {
/// Some(1) -> Some(2)
/// otherwise -> otherwise
/// }
/// ```
///
/// In our internal representation this would become:
///
/// ```txt
/// case {
/// a is Some(1) -> Some(2)
/// a is otherwise -> otherwise
/// ─┬────────────
/// ╰── `a` will always match with this "catch all" variable pattern
/// }
/// ```
///
/// Focusing on the last branch, we can remove that check that always matches
/// by keeping track in its body of the correspondence. So it would end up
/// looking like this:
///
/// ```txt
/// case {
/// a is Some(1) -> Some(2)
/// ∅ -> {
/// ┬
/// ╰── This represents the fact that there's no checks left for this branch!
/// So we can make another observation: if there's no checks left in a
/// branch we know it will always match and we can produce a leaf in the
/// decision tree (there's an exception when we have guards, but we'll
/// get to it later)!
///
/// let otherwise = a
/// ─┬───────────────
/// ╰── And now we can understand what those `bindings` at the start of
/// a body are: as we remove variable patterns, we will rewrite those
/// as assignments at the top of the body of the corresponding branch.
///
/// otherwise
/// }
/// }
/// ```
///
fn move_unconditional_patterns(&mut self, compiler: &mut Compiler<'_>) {
self.checks.retain_mut(|check| {
loop {
match compiler.pattern(check.pattern) {
// Variable patterns always match, so we move those to the body
// and remove them from the branch's checks.
Pattern::Variable { name } => {
self.body.assign(name.clone(), check.var.clone());
return false;
}
// A discard pattern always matches, but since the value is not
// used we can just remove it without even adding an assignment
// to the body!
Pattern::Discard => return false,
// Assigns are kind of special: they get turned into assignments
// (shocking) but then we can't discard the pattern they wrap.
// So we replace the assignment pattern with the one it's wrapping
// and try again.
Pattern::Assign { name, pattern } => {
self.body.assign(name.clone(), check.var.clone());
check.pattern = *pattern;
}
// There's a special case of assignments when it comes to string
// prefix patterns. We can give a name to a literal prefix like this:
// `"0" as digit <> rest`.
// We also want to move this special case of an assignment to the
// branch body!
Pattern::StringPrefix {
prefix,
prefix_name,
rest: _,
} => {
if let Some(variable) = std::mem::take(prefix_name) {
self.body
.assign_literal_string(variable.clone(), prefix.clone());
}
return true;
}
// There's a special case of assignments when it comes to bit
// array patterns. We can give a name to one slice of the array and
// bind it to a variable to be used by later steps of the pattern
// like this: `<<len, payload:size(len)>>` (here we're binding
// two variables! `len` and `payload`).
//
// This kind of slicing will always match if it's not guarded by
// any size test, so if we find a `ReadAction` that is the first
// test to perform in a bit array pattern we know it's always
// going to match and can be safely moved into the branch's body.
Pattern::BitArray { tests } => match tests.front_mut() {
Some(BitArrayTest::Match(MatchTest {
value: BitArrayMatchedValue::Variable(name),
read_action,
})) => {
let bit_array = check.var.clone();
self.body.assign_bit_array_slice(
name.clone(),
bit_array,
read_action.clone(),
);
let _ = tests.pop_front();
}
Some(test) => match test {
// If we have `_ as a` we treat that as a regular variable
// assignment.
BitArrayTest::Match(MatchTest {
value: BitArrayMatchedValue::Assign { name, value },
read_action,
}) if value.is_discard() => {
*test = BitArrayTest::Match(MatchTest {
value: BitArrayMatchedValue::Variable(name.clone()),
read_action: read_action.clone(),
});
}
// Just like regular assigns, those patterns are unrefutable
// and will become assignments in the branch's body.
BitArrayTest::Match(MatchTest {
value: BitArrayMatchedValue::Assign { name, value },
read_action,
}) => {
self.body
.assign_segment_constant_value(name.clone(), value.as_ref());
// We will still need to check the aliased value!
*test = BitArrayTest::Match(MatchTest {
value: value.as_ref().clone(),
read_action: read_action.clone(),
});
}
// Discards are removed directly without even binding them
// in the branch's body.
_ if test.is_discard() => {
let _ = tests.pop_front();
}
// Otherwise there's no unconditional test to pop, we
// keep the pattern without changing it.
BitArrayTest::Size(_)
| BitArrayTest::Match(_)
| BitArrayTest::CatchAllIsBytes { .. }
| BitArrayTest::ReadSizeIsNotNegative { .. }
| BitArrayTest::SegmentIsFiniteFloat { .. } => return true,
},
// If a bit array pattern has no tests then it's always
// going to match, no matter what. We just remove it.
None => return false,
},
// All other patterns are not unconditional, so we just keep them.
Pattern::Int { .. }
| Pattern::Float { .. }
| Pattern::String { .. }
| Pattern::Tuple { .. }
| Pattern::Variant { .. }
| Pattern::NonEmptyList { .. }
| Pattern::EmptyList => return true,
}
}
});
}
}
/// The body of a branch. It always starts with a series of variable assignments
/// in the form: `let a = b`. As explained in `move_unconditional_patterns`' doc,
/// each body starts with a series of assignments we keep track of as we're
/// compiling each branch.
///
#[derive(Clone, Eq, PartialEq, Debug, serde::Serialize, serde::Deserialize)]
pub struct Body {
/// Any variables to bind before running the code.
///
/// The tuples are in the form `(name, value)`, so `(wibble, var)`
/// corresponds to `let wibble = var`.
///
pub bindings: Vec<(EcoString, BoundValue)>,
/// The index of the clause in the case expression that should be run.
///
pub clause_index: usize,
}
/// A value that can appear on the right hand side of one of the assignments we
/// find at the top of a body.
///
#[derive(Clone, Eq, PartialEq, Debug, serde::Serialize, serde::Deserialize)]
pub enum BoundValue {
/// `let a = variable`
///
Variable(Variable),
/// `let a = "a literal string"`
///
LiteralString(EcoString),
/// `let a = 123`
///
LiteralInt(BigInt),
/// `let a = 12.2`
///
LiteralFloat(EcoString),
/// `let a = sliceAsInt(bit_array, 0, 16, ...)`
///
BitArraySlice {
bit_array: Variable,
read_action: ReadAction,
},
}
impl Body {
pub fn new(clause_index: usize) -> Self {
Self {
bindings: vec![],
clause_index,
}
}
/// Adds a new assignment to the body, binding `let variable = value`
///
pub fn assign(&mut self, variable: EcoString, value: Variable) {
self.bindings.push((variable, BoundValue::Variable(value)));
}
fn assign_literal_string(&mut self, variable: EcoString, value: EcoString) {
self.bindings
.push((variable, BoundValue::LiteralString(value)));
}
fn assign_bit_array_slice(
&mut self,
segment_name: EcoString,
bit_array: Variable,
value: ReadAction,
) {
self.bindings.push((
segment_name,
BoundValue::BitArraySlice {
bit_array,
read_action: value,
},
))
}
fn assign_segment_constant_value(&mut self, name: EcoString, value: &BitArrayMatchedValue) {
let value = match value {
BitArrayMatchedValue::LiteralFloat(value) => BoundValue::LiteralFloat(value.clone()),
BitArrayMatchedValue::LiteralInt { value, .. } => BoundValue::LiteralInt(value.clone()),
BitArrayMatchedValue::LiteralString { value, .. } => {
BoundValue::LiteralString(value.clone())
}
BitArrayMatchedValue::Variable(_)
| BitArrayMatchedValue::Discard(_)
| BitArrayMatchedValue::Assign { .. } => {
panic!("aliased non constant value: {value:#?}")
}
};
self.bindings.push((name, value))
}
}
/// A user defined pattern such as `Some((x, 10))`.
/// This is a bit simpler than the full fledged `TypedPattern` used for code analysis
/// and only focuses on the relevant bits needed to perform exhaustiveness checking
/// and code generation.
///
/// Using this simplified version of a pattern for the case compiler makes it a
/// whole lot simpler and more efficient (patterns will have to be cloned, so
/// we use an arena to allocate those and only store ids to make this operation
/// extra cheap).
///
#[derive(Clone, Eq, PartialEq, Debug)]
pub enum Pattern {
Discard,
Int {
int_value: BigInt,
},
Float {
float_value: LiteralFloatValue,
},
String {
value: EcoString,
},
StringPrefix {
prefix: EcoString,
prefix_name: Option<EcoString>,
rest: Id<Pattern>,
},
Assign {
name: EcoString,
pattern: Id<Pattern>,
},
Variable {
name: EcoString,
},
Tuple {
elements: Vec<Id<Pattern>>,
},
Variant {
index: usize,
name: EcoString,
module: Option<EcoString>,
fields: Vec<Id<Pattern>>,
},
NonEmptyList {
first: Id<Pattern>,
rest: Id<Pattern>,
},
EmptyList,
BitArray {
tests: VecDeque<BitArrayTest>,
},
}
impl Pattern {
/// Each pattern (with a couple exceptions) can be turned into a
/// simpler `RuntimeCheck`: that is a check that can be performed at runtime
/// to make sure a `PatternCheck` can succeed on a specific value.
///
fn to_runtime_check_kind(&self) -> Option<RuntimeCheckKind> {
let kind = match self {
// These patterns are unconditional: they will always match and be moved
// out of a branch's checks. So there's no corresponding runtime check
// we can perform for them.
Pattern::Discard | Pattern::Variable { .. } | Pattern::Assign { .. } => return None,
Pattern::Int { int_value, .. } => RuntimeCheckKind::Int {
int_value: int_value.clone(),
},
Pattern::Float { float_value, .. } => RuntimeCheckKind::Float {
float_value: *float_value,
},
Pattern::String { value } => RuntimeCheckKind::String {
value: value.clone(),
},
Pattern::StringPrefix { prefix, .. } => RuntimeCheckKind::StringPrefix {
prefix: prefix.clone(),
},
Pattern::Tuple { elements } => RuntimeCheckKind::Tuple {
size: elements.len(),
},
Pattern::Variant { index, .. } => RuntimeCheckKind::Variant { index: *index },
Pattern::NonEmptyList { .. } => RuntimeCheckKind::NonEmptyList,
Pattern::EmptyList => RuntimeCheckKind::EmptyList,
// Bit arrays have no corresponding kind as they're dealt with in a
// completely different way.
Pattern::BitArray { .. } => return None,
};
Some(kind)
}
fn is_matching_on_unreachable_variant(&self, branch_mode: &BranchMode) -> bool {
match (self, branch_mode) {
(
Self::Variant { index, .. },
BranchMode::NamedType {
inferred_variant: Some(variant),
..
},
) if index != variant => true,
_ => false,
}
}
fn is_matching_on_impossible_segment(&self) -> Option<Vec<ImpossibleBitArraySegmentPattern>> {
match self {
Self::BitArray { tests } => {
let impossible_segments = tests
.iter()
.filter_map(|test| match test {
BitArrayTest::Size(_)
| BitArrayTest::CatchAllIsBytes { .. }
| BitArrayTest::ReadSizeIsNotNegative { .. }
| BitArrayTest::SegmentIsFiniteFloat { .. } => None,
BitArrayTest::Match(MatchTest { value, read_action }) => {
value.is_impossible_segment(read_action)
}
})
.collect_vec();
if impossible_segments.is_empty() {
None
} else {
Some(impossible_segments)
}
}
Self::Discard
| Self::Int { .. }
| Self::Float { .. }
| Self::String { .. }
| Self::StringPrefix { .. }
| Self::Assign { .. }
| Self::Variable { .. }
| Self::Tuple { .. }
| Self::Variant { .. }
| Self::NonEmptyList { .. }
| Self::EmptyList => None,
}
}
}
/// A single check making up a branch, checking that a variable matches with a
/// given pattern. For example, the following branch has 2 checks:
///
/// ```txt
/// a is Some, b is 1 -> todo
/// ┬ ─┬──
/// │ ╰── This is the pattern being checked
/// ╰── This is the variable being pattern matched on
/// ─┬─────── ─┬────
/// ╰─────────┴── Two `PatternCheck`s
/// ```
///
#[derive(Clone, Eq, PartialEq, Debug)]
struct PatternCheck {
var: Variable,
pattern: Id<Pattern>,
}
/// This is one of the checks we can take at runtime to decide how to move
/// forward in the decision tree.
///
/// After performing a successful check on a value we will discover something
/// about its shape: it might be an int, an variant of a custom type, ...
/// Some values (like variants and lists) might hold onto additional data we
/// will have to pattern match on: in order to do that we need a name to refer
/// to those new variables we've discovered after performing a check. That's
/// what `args` is for.
///
/// Let's have a look at an example. Imagine we have a pattern like this one:
/// `a is Wibble(1, _, [])`; after performing a runtime check to make sure `a`
/// is indeed a `Wibble`, we'll need to perform additional checks on it's
/// arguments: that pattern will be replaced by three new ones `a0 is 1`,
/// `a1 is _` and `a2 is []`. Those new variables are the `args`.
///
#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum RuntimeCheck {
Int {
int_value: BigInt,
},
Float {
float_value: LiteralFloatValue,
},
String {
value: EcoString,
},
StringPrefix {
prefix: EcoString,
rest: Variable,
},
Tuple {
size: usize,
elements: Vec<Variable>,
},
BitArray {
test: BitArrayTest,
},
Variant {
match_: VariantMatch,
index: usize,
labels: HashMap<usize, EcoString>,
fields: Vec<Variable>,
},
NonEmptyList {
first: Variable,
rest: Variable,
},
EmptyList,
}
impl RuntimeCheck {
fn kind(&self) -> Option<RuntimeCheckKind> {
let kind = match self {
RuntimeCheck::Int { int_value, .. } => RuntimeCheckKind::Int {
int_value: int_value.clone(),
},
RuntimeCheck::Float { float_value, .. } => RuntimeCheckKind::Float {
float_value: *float_value,
},
RuntimeCheck::String { value } => RuntimeCheckKind::String {
value: value.clone(),
},
RuntimeCheck::StringPrefix { prefix, rest: _ } => RuntimeCheckKind::StringPrefix {
prefix: prefix.clone(),
},
RuntimeCheck::Tuple { size, elements: _ } => RuntimeCheckKind::Tuple { size: *size },
RuntimeCheck::Variant { index, .. } => RuntimeCheckKind::Variant { index: *index },
RuntimeCheck::EmptyList => RuntimeCheckKind::EmptyList,
RuntimeCheck::NonEmptyList { first: _, rest: _ } => RuntimeCheckKind::NonEmptyList,
RuntimeCheck::BitArray { .. } => return None,
};
Some(kind)
}
pub(crate) fn is_ignored(&self) -> bool {
match self {
RuntimeCheck::Variant {
match_: VariantMatch::NeverExplicitlyMatchedOn { .. },
..
} => true,
RuntimeCheck::Int { .. }
| RuntimeCheck::Float { .. }
| RuntimeCheck::String { .. }
| RuntimeCheck::StringPrefix { .. }
| RuntimeCheck::Tuple { .. }
| RuntimeCheck::BitArray { .. }
| RuntimeCheck::NonEmptyList { .. }
| RuntimeCheck::Variant { .. }
| RuntimeCheck::EmptyList => false,
}
}
/// Returns all the bit array segments referenced in this check.
/// For each segment it returns its name and the read action used to access
/// such segment.
///
pub(crate) fn referenced_segment_patterns(&self) -> Vec<(&EcoString, &ReadAction)> {
match self {
RuntimeCheck::BitArray { test } => test.referenced_segment_patterns(),
RuntimeCheck::Int { .. }
| RuntimeCheck::Float { .. }
| RuntimeCheck::String { .. }
| RuntimeCheck::StringPrefix { .. }
| RuntimeCheck::Tuple { .. }
| RuntimeCheck::Variant { .. }
| RuntimeCheck::NonEmptyList { .. }
| RuntimeCheck::EmptyList => vec![],
}
}
}
#[derive(Eq, PartialEq, Clone, Hash, Debug)]
pub enum RuntimeCheckKind {
Int { int_value: BigInt },
Float { float_value: LiteralFloatValue },
String { value: EcoString },
StringPrefix { prefix: EcoString },
Tuple { size: usize },
Variant { index: usize },
EmptyList,
NonEmptyList,
}
/// All possible variant checks are automatically generated beforehand once we
/// know we are matching on a value with a custom type.
/// Then if the compiled case is explicitly matching on one of those, we update
/// it to store additional information: for example how the variant is used
/// (if qualified or unqualified and if it is aliased).
///
/// This way when we get to code generation we can clump all variants that were
/// never explicitly matched on in a single `else` block without blowing up code
/// size!
///
#[derive(Clone, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum VariantMatch {
ExplicitlyMatchedOn {
name: EcoString,
module: Option<EcoString>,
},
NeverExplicitlyMatchedOn {
name: EcoString,
},
}
impl VariantMatch {
pub(crate) fn name(&self) -> EcoString {
match self {
VariantMatch::ExplicitlyMatchedOn { name, module: _ } => name.clone(),
VariantMatch::NeverExplicitlyMatchedOn { name } => name.clone(),
}
}
pub(crate) fn module(&self) -> Option<EcoString> {
match self {
VariantMatch::ExplicitlyMatchedOn { name: _, module } => module.clone(),
VariantMatch::NeverExplicitlyMatchedOn { name: _ } => None,
}
}
}
/// A variable that can be matched on in a branch.
///
#[derive(Eq, PartialEq, Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct Variable {
pub id: usize,
pub type_: Arc<Type>,
}
impl Variable {
fn new(id: usize, type_: Arc<Type>) -> Self {
Self { id, type_ }
}
/// Builds a `PatternCheck` that checks this variable matches the given pattern.
/// So we can build pattern checks the same way we informally describe them:
/// ```txt
/// var is pattern
/// ```
/// With this builder method would become:
/// ```rs
/// var.is(pattern)
/// ```
///
fn is(&self, pattern: Id<Pattern>) -> PatternCheck {
PatternCheck {
var: self.clone(),
pattern,
}
}
}
#[derive(Debug)]
/// Different types need to be handled differently when compiling a case expression
/// into a decision tree. There's some types that have infinite matching patterns
/// (like ints, strings, ...) and thus will always need a fallback option.
///
/// Other types, like custom types, only have a well defined and finite number
/// of patterns that could match: when matching on a `Result` we know that we can
/// only have an `Ok(_)` and an `Error(_)`, anything else would end up being a
/// type error!
///
/// So this enum is used to pick the correct strategy to compile a case that's
/// performing a `PatternCheck` on a variable with a specific type.
///
enum BranchMode {
/// This covers numbers, functions, variables, strings, and bitarrays.
Infinite,
Tuple {
elements: Vec<Arc<Type>>,
},
List {
inner_type: Arc<Type>,
},
NamedType {
constructors: Vec<TypeValueConstructor>,
inferred_variant: Option<usize>,
},
}
impl BranchMode {
/// Returns a heuristic estimate of the branching factor.
///
/// This value is used by the pivot-selection to prefer splits
/// with fewer branches, which tends to produce smaller and shallower
/// decision trees.
fn branching_factor(&self) -> usize {
match self {
BranchMode::Infinite => usize::MAX,
BranchMode::Tuple { elements } => elements.len(),
BranchMode::List { .. } => 2,
BranchMode::NamedType { constructors, .. } => constructors.len(),
}
}
fn is_infinite(&self) -> bool {
match self {
BranchMode::Infinite => true,
BranchMode::Tuple { .. } | BranchMode::List { .. } | BranchMode::NamedType { .. } => {
false
}
}
}
}
impl Variable {
fn branch_mode(&self, env: &Environment<'_>) -> BranchMode {
match collapse_links(self.type_.clone()).as_ref() {
Type::Fn { .. } | Type::Var { .. } => BranchMode::Infinite,
Type::Named { module, name, .. }
if is_prelude_module(module)
&& (name == "Int"
|| name == "Float"
|| name == "BitArray"
|| name == "String") =>
{
BranchMode::Infinite
}
Type::Named {
module,
name,
arguments,
..
} if is_prelude_module(module) && name == "List" => BranchMode::List {
inner_type: arguments.first().expect("list has a type argument").clone(),
},
Type::Tuple { elements } => BranchMode::Tuple {
elements: elements.clone(),
},
Type::Named {
module,
name,
arguments,
inferred_variant,
..
} => {
let constructors = ConstructorSpecialiser::specialise_constructors(
env.get_constructors_for_type(module, name)
.expect("Custom type variants must exist"),
arguments.as_slice(),
&env.current_module,
module,
);
let inferred_variant = inferred_variant.map(|i| i as usize);
BranchMode::NamedType {
constructors,
inferred_variant,
}
}
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/ast.rs | compiler-core/src/ast.rs | mod constant;
mod typed;
mod untyped;
#[cfg(test)]
mod tests;
pub mod visit;
pub use self::typed::{InvalidExpression, TypedExpr};
pub use self::untyped::{FunctionLiteralKind, UntypedExpr};
pub use self::constant::{Constant, TypedConstant, UntypedConstant};
use crate::analyse::Inferred;
use crate::ast::typed::pairwise_all;
use crate::bit_array;
use crate::build::{ExpressionPosition, Located, Target, module_erlang_name};
use crate::exhaustiveness::CompiledCase;
use crate::parse::{LiteralFloatValue, SpannedString};
use crate::type_::error::VariableOrigin;
use crate::type_::expression::{Implementations, Purity};
use crate::type_::printer::Names;
use crate::type_::{
self, Deprecation, HasType, ModuleValueConstructor, PatternConstructor, Type, TypedCallArg,
ValueConstructor, ValueConstructorVariant, nil,
};
use itertools::Itertools;
use num_traits::Zero;
use std::collections::HashSet;
use std::sync::Arc;
use ecow::EcoString;
use num_bigint::{BigInt, Sign};
use num_traits::{One, ToPrimitive};
#[cfg(test)]
use pretty_assertions::assert_eq;
use vec1::Vec1;
pub const PIPE_VARIABLE: &str = "_pipe";
pub const USE_ASSIGNMENT_VARIABLE: &str = "_use";
pub const RECORD_UPDATE_VARIABLE: &str = "_record";
pub const ASSERT_FAIL_VARIABLE: &str = "_assert_fail";
pub const ASSERT_SUBJECT_VARIABLE: &str = "_assert_subject";
pub const CAPTURE_VARIABLE: &str = "_capture";
pub const BLOCK_VARIABLE: &str = "_block";
pub trait HasLocation {
fn location(&self) -> SrcSpan;
}
pub type UntypedModule = Module<(), Vec<TargetedDefinition>>;
pub type TypedModule = Module<type_::ModuleInterface, TypedDefinitions>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Module<Info, Definitions> {
pub name: EcoString,
pub documentation: Vec<EcoString>,
pub type_info: Info,
pub definitions: Definitions,
pub names: Names,
/// The source byte locations of definition that are unused.
/// This is used in code generation to know when definitions can be safely omitted.
pub unused_definition_positions: HashSet<u32>,
}
impl<Info, Definitions> Module<Info, Definitions> {
pub fn erlang_name(&self) -> EcoString {
module_erlang_name(&self.name)
}
}
impl TypedModule {
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
let TypedDefinitions {
imports,
constants,
custom_types,
type_aliases,
functions,
} = &self.definitions;
imports
.iter()
.find_map(|import| import.find_node(byte_index))
.or_else(|| (constants.iter()).find_map(|constant| constant.find_node(byte_index)))
.or_else(|| (custom_types.iter()).find_map(|type_| type_.find_node(byte_index)))
.or_else(|| (type_aliases.iter()).find_map(|alias| alias.find_node(byte_index)))
.or_else(|| (functions.iter()).find_map(|function| function.find_node(byte_index)))
}
pub fn find_statement(&self, byte_index: u32) -> Option<&TypedStatement> {
// Statements can only be found inside a module function, there's no
// need to go over all the other module definitions.
self.definitions
.functions
.iter()
.find_map(|function| function.find_statement(byte_index))
}
pub fn definitions_len(&self) -> usize {
let TypedDefinitions {
imports,
constants,
custom_types,
type_aliases,
functions,
} = &self.definitions;
imports.len() + constants.len() + custom_types.len() + type_aliases.len() + functions.len()
}
}
#[derive(Debug)]
pub struct TypedDefinitions {
pub imports: Vec<TypedImport>,
pub constants: Vec<TypedModuleConstant>,
pub custom_types: Vec<TypedCustomType>,
pub type_aliases: Vec<TypedTypeAlias>,
pub functions: Vec<TypedFunction>,
}
/// The `@target(erlang)` and `@target(javascript)` attributes can be used to
/// mark a definition as only being for a specific target.
///
/// ```gleam
/// const x: Int = 1
///
/// @target(erlang)
/// pub fn main(a) { ...}
/// ```
///
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TargetedDefinition {
pub definition: UntypedDefinition,
pub target: Option<Target>,
}
impl TargetedDefinition {
pub fn is_for(&self, target: Target) -> bool {
self.target.map(|t| t == target).unwrap_or(true)
}
}
impl UntypedModule {
pub fn dependencies(&self, target: Target) -> Vec<(EcoString, SrcSpan)> {
self.iter_definitions(target)
.flat_map(|definition| match definition {
Definition::Import(Import {
module, location, ..
}) => Some((module.clone(), *location)),
Definition::Function(_)
| Definition::TypeAlias(_)
| Definition::CustomType(_)
| Definition::ModuleConstant(_) => None,
})
.collect()
}
pub fn iter_definitions(&self, target: Target) -> impl Iterator<Item = &UntypedDefinition> {
self.definitions
.iter()
.filter(move |definition| definition.is_for(target))
.map(|definition| &definition.definition)
}
pub fn into_iter_definitions(self, target: Target) -> impl Iterator<Item = UntypedDefinition> {
self.definitions
.into_iter()
.filter(move |definition| definition.is_for(target))
.map(|definition| definition.definition)
}
}
#[test]
fn module_dependencies_test() {
let parsed = crate::parse::parse_module(
camino::Utf8PathBuf::from("test/path"),
"import one
@target(erlang)
import two
@target(javascript)
import three
import four",
&crate::warning::WarningEmitter::null(),
)
.expect("syntax error");
let module = parsed.module;
assert_eq!(
vec![
("one".into(), SrcSpan::new(0, 10)),
("two".into(), SrcSpan::new(45, 55)),
("four".into(), SrcSpan::new(118, 129)),
],
module.dependencies(Target::Erlang)
);
}
pub type TypedArg = Arg<Arc<Type>>;
pub type UntypedArg = Arg<()>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Arg<T> {
pub names: ArgNames,
pub location: SrcSpan,
pub annotation: Option<TypeAst>,
pub type_: T,
}
impl<A> Arg<A> {
pub fn set_type<B>(self, t: B) -> Arg<B> {
Arg {
type_: t,
names: self.names,
location: self.location,
annotation: self.annotation,
}
}
pub fn get_variable_name(&self) -> Option<&EcoString> {
self.names.get_variable_name()
}
pub fn is_capture_hole(&self) -> bool {
match &self.names {
ArgNames::Named { name, .. } if name == CAPTURE_VARIABLE => true,
ArgNames::Discard { .. }
| ArgNames::LabelledDiscard { .. }
| ArgNames::Named { .. }
| ArgNames::NamedLabelled { .. } => false,
}
}
}
impl TypedArg {
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
if self.location.contains(byte_index) {
if let Some(annotation) = &self.annotation {
return annotation
.find_node(byte_index, self.type_.clone())
.or(Some(Located::Arg(self)));
}
Some(Located::Arg(self))
} else {
None
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ArgNames {
Discard {
name: EcoString,
location: SrcSpan,
},
LabelledDiscard {
label: EcoString,
label_location: SrcSpan,
name: EcoString,
name_location: SrcSpan,
},
Named {
name: EcoString,
location: SrcSpan,
},
NamedLabelled {
label: EcoString,
label_location: SrcSpan,
name: EcoString,
name_location: SrcSpan,
},
}
impl ArgNames {
pub fn get_label(&self) -> Option<&EcoString> {
match self {
ArgNames::Discard { .. } | ArgNames::Named { .. } => None,
ArgNames::LabelledDiscard { label, .. } | ArgNames::NamedLabelled { label, .. } => {
Some(label)
}
}
}
pub fn get_variable_name(&self) -> Option<&EcoString> {
match self {
ArgNames::Discard { .. } | ArgNames::LabelledDiscard { .. } => None,
ArgNames::NamedLabelled { name, .. } | ArgNames::Named { name, .. } => Some(name),
}
}
}
pub type TypedRecordConstructor = RecordConstructor<Arc<Type>>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RecordConstructor<T> {
pub location: SrcSpan,
pub name_location: SrcSpan,
pub name: EcoString,
pub arguments: Vec<RecordConstructorArg<T>>,
pub documentation: Option<(u32, EcoString)>,
pub deprecation: Deprecation,
}
impl<A> RecordConstructor<A> {
pub fn put_doc(&mut self, new_doc: (u32, EcoString)) {
self.documentation = Some(new_doc);
}
}
pub type TypedRecordConstructorArg = RecordConstructorArg<Arc<Type>>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RecordConstructorArg<T> {
pub label: Option<SpannedString>,
pub ast: TypeAst,
pub location: SrcSpan,
pub type_: T,
pub doc: Option<(u32, EcoString)>,
}
impl<T: PartialEq> RecordConstructorArg<T> {
pub fn put_doc(&mut self, new_doc: (u32, EcoString)) {
self.doc = Some(new_doc);
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TypeAstConstructor {
pub location: SrcSpan,
pub name_location: SrcSpan,
pub module: Option<(EcoString, SrcSpan)>,
pub name: EcoString,
pub arguments: Vec<TypeAst>,
pub start_parentheses: Option<u32>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TypeAstFn {
pub location: SrcSpan,
pub arguments: Vec<TypeAst>,
pub return_: Box<TypeAst>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TypeAstVar {
pub location: SrcSpan,
pub name: EcoString,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TypeAstTuple {
pub location: SrcSpan,
pub elements: Vec<TypeAst>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct TypeAstHole {
pub location: SrcSpan,
pub name: EcoString,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum TypeAst {
Constructor(TypeAstConstructor),
Fn(TypeAstFn),
Var(TypeAstVar),
Tuple(TypeAstTuple),
Hole(TypeAstHole),
}
impl TypeAst {
pub fn location(&self) -> SrcSpan {
match self {
TypeAst::Fn(TypeAstFn { location, .. })
| TypeAst::Var(TypeAstVar { location, .. })
| TypeAst::Hole(TypeAstHole { location, .. })
| TypeAst::Tuple(TypeAstTuple { location, .. })
| TypeAst::Constructor(TypeAstConstructor { location, .. }) => *location,
}
}
pub fn is_logically_equal(&self, other: &TypeAst) -> bool {
match self {
TypeAst::Constructor(TypeAstConstructor {
module,
name,
arguments,
location: _,
name_location: _,
start_parentheses: _,
}) => match other {
TypeAst::Constructor(TypeAstConstructor {
module: o_module,
name: o_name,
arguments: o_arguments,
location: _,
name_location: _,
start_parentheses: _,
}) => {
let module_name =
|m: &Option<(EcoString, _)>| m.as_ref().map(|(m, _)| m.clone());
module_name(module) == module_name(o_module)
&& name == o_name
&& arguments.len() == o_arguments.len()
&& arguments
.iter()
.zip(o_arguments)
.all(|a| a.0.is_logically_equal(a.1))
}
TypeAst::Fn(_) | TypeAst::Var(_) | TypeAst::Tuple(_) | TypeAst::Hole(_) => false,
},
TypeAst::Fn(TypeAstFn {
arguments,
return_,
location: _,
}) => match other {
TypeAst::Fn(TypeAstFn {
arguments: o_arguments,
return_: o_return_,
location: _,
}) => {
arguments.len() == o_arguments.len()
&& arguments
.iter()
.zip(o_arguments)
.all(|a| a.0.is_logically_equal(a.1))
&& return_.is_logically_equal(o_return_)
}
TypeAst::Constructor(_)
| TypeAst::Var(_)
| TypeAst::Tuple(_)
| TypeAst::Hole(_) => false,
},
TypeAst::Var(TypeAstVar { name, location: _ }) => match other {
TypeAst::Var(TypeAstVar {
name: o_name,
location: _,
}) => name == o_name,
TypeAst::Constructor(_) | TypeAst::Fn(_) | TypeAst::Tuple(_) | TypeAst::Hole(_) => {
false
}
},
TypeAst::Tuple(TypeAstTuple {
elements,
location: _,
}) => match other {
TypeAst::Tuple(TypeAstTuple {
elements: other_elements,
location: _,
}) => {
elements.len() == other_elements.len()
&& elements
.iter()
.zip(other_elements)
.all(|a| a.0.is_logically_equal(a.1))
}
TypeAst::Constructor(_) | TypeAst::Fn(_) | TypeAst::Var(_) | TypeAst::Hole(_) => {
false
}
},
TypeAst::Hole(TypeAstHole { name, location: _ }) => match other {
TypeAst::Hole(TypeAstHole {
name: o_name,
location: _,
}) => name == o_name,
TypeAst::Constructor(_) | TypeAst::Fn(_) | TypeAst::Var(_) | TypeAst::Tuple(_) => {
false
}
},
}
}
pub fn find_node(&self, byte_index: u32, type_: Arc<Type>) -> Option<Located<'_>> {
if !self.location().contains(byte_index) {
return None;
}
match self {
TypeAst::Fn(TypeAstFn {
arguments, return_, ..
}) => type_
.fn_types()
.and_then(|(arg_types, ret_type)| {
if let Some(arg) = arguments
.iter()
.zip(arg_types)
.find_map(|(arg, arg_type)| arg.find_node(byte_index, arg_type.clone()))
{
return Some(arg);
}
if let Some(ret) = return_.find_node(byte_index, ret_type) {
return Some(ret);
}
None
})
.or(Some(Located::Annotation { ast: self, type_ })),
TypeAst::Constructor(TypeAstConstructor {
arguments, module, ..
}) => type_
.constructor_types()
.and_then(|arg_types| {
if let Some(arg) = arguments
.iter()
.zip(arg_types)
.find_map(|(arg, arg_type)| arg.find_node(byte_index, arg_type.clone()))
{
return Some(arg);
}
None
})
.or(module.as_ref().and_then(|(name, location)| {
if location.contains(byte_index) {
Some(Located::ModuleName {
location: *location,
name,
layer: Layer::Type,
})
} else {
None
}
}))
.or(Some(Located::Annotation { ast: self, type_ })),
TypeAst::Tuple(TypeAstTuple { elements, .. }) => type_
.tuple_types()
.and_then(|elem_types| {
if let Some(e) = elements
.iter()
.zip(elem_types)
.find_map(|(e, e_type)| e.find_node(byte_index, e_type.clone()))
{
return Some(e);
}
None
})
.or(Some(Located::Annotation { ast: self, type_ })),
TypeAst::Var(_) | TypeAst::Hole(_) => Some(Located::Annotation { ast: self, type_ }),
}
}
/// Generates an annotation corresponding to the type.
pub fn print(&self, buffer: &mut EcoString) {
match &self {
TypeAst::Var(var) => buffer.push_str(&var.name),
TypeAst::Hole(hole) => buffer.push_str(&hole.name),
TypeAst::Tuple(tuple) => {
buffer.push_str("#(");
for (i, element) in tuple.elements.iter().enumerate() {
element.print(buffer);
if i < tuple.elements.len() - 1 {
buffer.push_str(", ");
}
}
buffer.push(')')
}
TypeAst::Fn(func) => {
buffer.push_str("fn(");
for (i, argument) in func.arguments.iter().enumerate() {
argument.print(buffer);
if i < func.arguments.len() - 1 {
buffer.push_str(", ");
}
}
buffer.push(')');
buffer.push_str(" -> ");
func.return_.print(buffer);
}
TypeAst::Constructor(constructor) => {
if let Some((module, _)) = &constructor.module {
buffer.push_str(module);
buffer.push('.');
}
buffer.push_str(&constructor.name);
if !constructor.arguments.is_empty() {
buffer.push('(');
for (i, argument) in constructor.arguments.iter().enumerate() {
argument.print(buffer);
if i < constructor.arguments.len() - 1 {
buffer.push_str(", ");
}
}
buffer.push(')');
}
}
}
}
}
#[test]
fn type_ast_print_fn() {
let mut buffer = EcoString::new();
let ast = TypeAst::Fn(TypeAstFn {
location: SrcSpan { start: 1, end: 1 },
arguments: vec![
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "String".into(),
}),
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "Bool".into(),
}),
],
return_: Box::new(TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "Int".into(),
})),
});
ast.print(&mut buffer);
assert_eq!(&buffer, "fn(String, Bool) -> Int")
}
#[test]
fn type_ast_print_constructor() {
let mut buffer = EcoString::new();
let ast = TypeAst::Constructor(TypeAstConstructor {
name: "SomeType".into(),
module: Some(("some_module".into(), SrcSpan { start: 1, end: 1 })),
location: SrcSpan { start: 1, end: 1 },
name_location: SrcSpan { start: 1, end: 1 },
arguments: vec![
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "String".into(),
}),
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "Bool".into(),
}),
],
start_parentheses: Some(1),
});
ast.print(&mut buffer);
assert_eq!(&buffer, "some_module.SomeType(String, Bool)")
}
#[test]
fn type_ast_print_tuple() {
let mut buffer = EcoString::new();
let ast = TypeAst::Tuple(TypeAstTuple {
location: SrcSpan { start: 1, end: 1 },
elements: vec![
TypeAst::Constructor(TypeAstConstructor {
name: "SomeType".into(),
module: Some(("some_module".into(), SrcSpan { start: 1, end: 1 })),
location: SrcSpan { start: 1, end: 1 },
name_location: SrcSpan { start: 1, end: 1 },
arguments: vec![
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "String".into(),
}),
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "Bool".into(),
}),
],
start_parentheses: Some(1),
}),
TypeAst::Fn(TypeAstFn {
location: SrcSpan { start: 1, end: 1 },
arguments: vec![
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "String".into(),
}),
TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "Bool".into(),
}),
],
return_: Box::new(TypeAst::Var(TypeAstVar {
location: SrcSpan { start: 1, end: 1 },
name: "Int".into(),
})),
}),
],
});
ast.print(&mut buffer);
assert_eq!(
&buffer,
"#(some_module.SomeType(String, Bool), fn(String, Bool) -> Int)"
)
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum Publicity {
Public,
Private,
Internal { attribute_location: Option<SrcSpan> },
}
impl Publicity {
pub fn is_private(&self) -> bool {
match self {
Self::Private => true,
Self::Public | Self::Internal { .. } => false,
}
}
pub fn is_internal(&self) -> bool {
match self {
Self::Internal { .. } => true,
Self::Public | Self::Private => false,
}
}
pub fn is_public(&self) -> bool {
match self {
Self::Public => true,
Self::Internal { .. } | Self::Private => false,
}
}
pub fn is_importable(&self) -> bool {
match self {
Self::Internal { .. } | Self::Public => true,
Self::Private => false,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
/// A function definition
///
/// Note that an anonymous function will have `None` as the name field, while a
/// named function will have `Some`.
///
/// # Example(s)
///
/// ```gleam
/// // Public function
/// pub fn wobble() -> String { ... }
/// // Private function
/// fn wibble(x: Int) -> Int { ... }
/// // Anonymous function
/// fn(x: Int) { ... }
/// ```
pub struct Function<T, Expr> {
pub location: SrcSpan,
pub body_start: Option<u32>,
pub end_position: u32,
pub name: Option<SpannedString>,
pub arguments: Vec<Arg<T>>,
pub body: Vec<Statement<T, Expr>>,
pub publicity: Publicity,
pub deprecation: Deprecation,
pub return_annotation: Option<TypeAst>,
pub return_type: T,
pub documentation: Option<(u32, EcoString)>,
pub external_erlang: Option<(EcoString, EcoString, SrcSpan)>,
pub external_javascript: Option<(EcoString, EcoString, SrcSpan)>,
pub implementations: Implementations,
pub purity: Purity,
}
pub type TypedFunction = Function<Arc<Type>, TypedExpr>;
pub type UntypedFunction = Function<(), UntypedExpr>;
impl<T, E> Function<T, E> {
pub fn full_location(&self) -> SrcSpan {
SrcSpan::new(self.location.start, self.end_position)
}
}
impl TypedFunction {
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
// Search for the corresponding node inside the function
// only if the index falls within the function's full location.
if !self.full_location().contains(byte_index) {
return None;
}
if let Some(found) = self
.body
.iter()
.find_map(|statement| statement.find_node(byte_index))
{
return Some(found);
}
if let Some(found_arg) = self
.arguments
.iter()
.find_map(|arg| arg.find_node(byte_index))
{
return Some(found_arg);
};
if let Some(found_statement) = self
.body
.iter()
.find(|statement| statement.location().contains(byte_index))
{
return Some(Located::Statement(found_statement));
};
// Check if location is within the return annotation.
if let Some(located) = self
.return_annotation
.iter()
.find_map(|annotation| annotation.find_node(byte_index, self.return_type.clone()))
{
return Some(located);
};
// Note that the fn `.location` covers the function head, not
// the entire statement.
if self.location.contains(byte_index) {
Some(Located::ModuleFunction(self))
} else if self.full_location().contains(byte_index) {
Some(Located::FunctionBody(self))
} else {
None
}
}
pub fn find_statement(&self, byte_index: u32) -> Option<&TypedStatement> {
if !self.full_location().contains(byte_index) {
return None;
}
self.body
.iter()
.find_map(|statement| statement.find_statement(byte_index))
}
pub fn main_function(&self) -> Option<&TypedFunction> {
if let Some((_, name)) = &self.name
&& name == "main"
{
Some(self)
} else {
None
}
}
}
pub type UntypedImport = Import<()>;
pub type TypedImport = Import<EcoString>;
#[derive(Debug, Clone, PartialEq, Eq)]
/// Import another Gleam module so the current module can use the types and
/// values it defines.
///
/// # Example(s)
///
/// ```gleam
/// import unix/cat
/// // Import with alias
/// import animal/cat as kitty
/// ```
pub struct Import<PackageName> {
pub documentation: Option<EcoString>,
pub location: SrcSpan,
pub module: EcoString,
pub as_name: Option<(AssignName, SrcSpan)>,
pub unqualified_values: Vec<UnqualifiedImport>,
pub unqualified_types: Vec<UnqualifiedImport>,
pub package: PackageName,
}
impl<T> Import<T> {
pub fn used_name(&self) -> Option<EcoString> {
match self.as_name.as_ref() {
Some((AssignName::Variable(name), _)) => Some(name.clone()),
Some((AssignName::Discard(_), _)) => None,
None => self.module.split('/').next_back().map(EcoString::from),
}
}
pub(crate) fn alias_location(&self) -> Option<SrcSpan> {
self.as_name.as_ref().map(|(_, location)| *location)
}
}
impl TypedImport {
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
if !self.location.contains(byte_index) {
return None;
}
if let Some(unqualified) = self
.unqualified_values
.iter()
.find(|unqualified_value| unqualified_value.location.contains(byte_index))
{
return Some(Located::UnqualifiedImport(
crate::build::UnqualifiedImport {
name: &unqualified.name,
module: &self.module,
is_type: false,
location: &unqualified.location,
},
));
}
if let Some(unqualified) = self
.unqualified_types
.iter()
.find(|unqualified_value| unqualified_value.location.contains(byte_index))
{
return Some(Located::UnqualifiedImport(
crate::build::UnqualifiedImport {
name: &unqualified.name,
module: &self.module,
is_type: true,
location: &unqualified.location,
},
));
}
Some(Located::ModuleImport(self))
}
}
pub type UntypedModuleConstant = ModuleConstant<(), ()>;
pub type TypedModuleConstant = ModuleConstant<Arc<Type>, EcoString>;
#[derive(Debug, Clone, PartialEq, Eq)]
/// A certain fixed value that can be used in multiple places
///
/// # Example(s)
///
/// ```gleam
/// pub const start_year = 2101
/// pub const end_year = 2111
/// ```
pub struct ModuleConstant<T, ConstantRecordTag> {
pub documentation: Option<(u32, EcoString)>,
/// The location of the constant, starting at the "(pub) const" keywords and
/// ending after the ": Type" annotation, or (without an annotation) after its name.
pub location: SrcSpan,
pub publicity: Publicity,
pub name: EcoString,
pub name_location: SrcSpan,
pub annotation: Option<TypeAst>,
pub value: Box<Constant<T, ConstantRecordTag>>,
pub type_: T,
pub deprecation: Deprecation,
pub implementations: Implementations,
}
impl TypedModuleConstant {
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
// Check if location is within the annotation.
if let Some(annotation) = &self.annotation
&& let Some(located) = annotation.find_node(byte_index, self.type_.clone())
{
return Some(located);
}
if let Some(located) = self.value.find_node(byte_index) {
return Some(located);
}
if self.location.contains(byte_index) {
Some(Located::ModuleConstant(self))
} else {
None
}
}
}
pub type UntypedCustomType = CustomType<()>;
pub type TypedCustomType = CustomType<Arc<Type>>;
#[derive(Debug, Clone, PartialEq, Eq)]
/// A newly defined type with one or more constructors.
/// Each variant of the custom type can contain different types, so the type is
/// the product of the types contained by each variant.
///
/// This might be called an algebraic data type (ADT) or tagged union in other
/// languages and type systems.
///
///
/// # Example(s)
///
/// ```gleam
/// pub type Cat {
/// Cat(name: String, cuteness: Int)
/// }
/// ```
pub struct CustomType<T> {
pub location: SrcSpan,
pub end_position: u32,
pub name: EcoString,
pub name_location: SrcSpan,
pub publicity: Publicity,
pub constructors: Vec<RecordConstructor<T>>,
pub documentation: Option<(u32, EcoString)>,
pub deprecation: Deprecation,
pub opaque: bool,
/// The names of the type parameters.
pub parameters: Vec<SpannedString>,
/// Once type checked this field will contain the type information for the
/// type parameters.
pub typed_parameters: Vec<T>,
pub external_erlang: Option<(EcoString, EcoString, SrcSpan)>,
pub external_javascript: Option<(EcoString, EcoString, SrcSpan)>,
}
impl<T> CustomType<T> {
/// The `location` field of a `CustomType` is only the location of `pub type
/// TheName`. This method returns a `SrcSpan` that includes the entire type
/// definition.
pub fn full_location(&self) -> SrcSpan {
SrcSpan::new(self.location.start, self.end_position)
}
}
impl TypedCustomType {
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
// Check if location is within the type of one of the arguments of a constructor.
if let Some(constructor) = self
.constructors
.iter()
.find(|constructor| constructor.location.contains(byte_index))
{
if let Some(annotation) = constructor
.arguments
.iter()
.find(|arg| arg.location.contains(byte_index))
.and_then(|arg| arg.ast.find_node(byte_index, arg.type_.clone()))
{
return Some(annotation);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/javascript.rs | compiler-core/src/javascript.rs | mod decision;
mod expression;
mod import;
#[cfg(test)]
mod tests;
mod typescript;
use std::collections::HashMap;
use num_bigint::BigInt;
use num_traits::ToPrimitive;
use crate::build::Target;
use crate::build::package_compiler::StdlibPackage;
use crate::codegen::TypeScriptDeclarations;
use crate::type_::{PRELUDE_MODULE_NAME, RecordAccessor};
use crate::{
ast::{Import, *},
docvec,
line_numbers::LineNumbers,
pretty::*,
};
use camino::Utf8Path;
use ecow::{EcoString, eco_format};
use expression::Context;
use itertools::Itertools;
use self::import::{Imports, Member};
const INDENT: isize = 2;
pub const PRELUDE: &str = include_str!("../templates/prelude.mjs");
pub const PRELUDE_TS_DEF: &str = include_str!("../templates/prelude.d.mts");
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum JavaScriptCodegenTarget {
JavaScript,
TypeScriptDeclarations,
}
#[derive(Debug)]
pub struct Generator<'a> {
line_numbers: &'a LineNumbers,
module: &'a TypedModule,
tracker: UsageTracker,
module_scope: im::HashMap<EcoString, usize>,
current_module_name_segments_count: usize,
typescript: TypeScriptDeclarations,
stdlib_package: StdlibPackage,
/// Relative path to the module, surrounded in `"`s to make it a string, and with `\`s escaped
/// to `\\`.
src_path: EcoString,
}
impl<'a> Generator<'a> {
pub fn new(config: ModuleConfig<'a>) -> Self {
let ModuleConfig {
typescript,
stdlib_package,
module,
line_numbers,
src: _,
path: _,
project_root,
} = config;
let current_module_name_segments_count = module.name.split('/').count();
let src_path = &module.type_info.src_path;
let src_path = src_path
.strip_prefix(project_root)
.unwrap_or(src_path)
.as_str();
let src_path = eco_format!("\"{src_path}\"").replace("\\", "\\\\");
Self {
current_module_name_segments_count,
line_numbers,
module,
src_path,
tracker: UsageTracker::default(),
module_scope: Default::default(),
typescript,
stdlib_package,
}
}
fn type_reference(&self) -> Document<'a> {
if self.typescript == TypeScriptDeclarations::None {
return nil();
}
// Get the name of the module relative the directory (similar to basename)
let module = self
.module
.name
.as_str()
.split('/')
.next_back()
.expect("JavaScript generator could not identify imported module name.");
docvec!["/// <reference types=\"./", module, ".d.mts\" />", line()]
}
pub fn compile(&mut self) -> Document<'a> {
// Determine what JavaScript imports we need to generate
let mut imports = self.collect_imports();
// Determine what names are defined in the module scope so we know to
// rename any variables that are defined within functions using the same
// names.
self.register_module_definitions_in_scope();
// Generate JavaScript code for each statement.
let statements = self.definitions();
// Two lines between each statement
let mut statements = Itertools::intersperse(statements.into_iter(), lines(2)).collect_vec();
// Import any prelude functions that have been used
if self.tracker.ok_used {
self.register_prelude_usage(&mut imports, "Ok", None);
};
if self.tracker.error_used {
self.register_prelude_usage(&mut imports, "Error", None);
};
if self.tracker.list_used {
self.register_prelude_usage(&mut imports, "toList", None);
};
if self.tracker.list_empty_class_used || self.tracker.echo_used {
self.register_prelude_usage(&mut imports, "Empty", Some("$Empty"));
};
if self.tracker.list_non_empty_class_used || self.tracker.echo_used {
self.register_prelude_usage(&mut imports, "NonEmpty", Some("$NonEmpty"));
};
if self.tracker.prepend_used {
self.register_prelude_usage(&mut imports, "prepend", Some("listPrepend"));
};
if self.tracker.custom_type_used || self.tracker.echo_used {
self.register_prelude_usage(&mut imports, "CustomType", Some("$CustomType"));
};
if self.tracker.make_error_used {
self.register_prelude_usage(&mut imports, "makeError", None);
};
if self.tracker.int_remainder_used {
self.register_prelude_usage(&mut imports, "remainderInt", None);
};
if self.tracker.float_division_used {
self.register_prelude_usage(&mut imports, "divideFloat", None);
};
if self.tracker.int_division_used {
self.register_prelude_usage(&mut imports, "divideInt", None);
};
if self.tracker.object_equality_used {
self.register_prelude_usage(&mut imports, "isEqual", None);
};
if self.tracker.bit_array_literal_used {
self.register_prelude_usage(&mut imports, "toBitArray", None);
}
if self.tracker.bit_array_slice_used || self.tracker.echo_used {
self.register_prelude_usage(&mut imports, "bitArraySlice", None);
}
if self.tracker.bit_array_slice_to_float_used {
self.register_prelude_usage(&mut imports, "bitArraySliceToFloat", None);
}
if self.tracker.bit_array_slice_to_int_used || self.tracker.echo_used {
self.register_prelude_usage(&mut imports, "bitArraySliceToInt", None);
}
if self.tracker.sized_integer_segment_used {
self.register_prelude_usage(&mut imports, "sizedInt", None);
}
if self.tracker.string_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "stringBits", None);
}
if self.tracker.string_utf16_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "stringToUtf16", None);
}
if self.tracker.string_utf32_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "stringToUtf32", None);
}
if self.tracker.codepoint_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "codepointBits", None);
}
if self.tracker.codepoint_utf16_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "codepointToUtf16", None);
}
if self.tracker.codepoint_utf32_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "codepointToUtf32", None);
}
if self.tracker.float_bit_array_segment_used {
self.register_prelude_usage(&mut imports, "sizedFloat", None);
}
let echo_definition = self.echo_definition(&mut imports);
let type_reference = self.type_reference();
let filepath_definition = self.filepath_definition();
// Put it all together
if imports.is_empty() && statements.is_empty() {
docvec![
type_reference,
filepath_definition,
"export {}",
line(),
echo_definition
]
} else if imports.is_empty() {
statements.push(line());
docvec![
type_reference,
filepath_definition,
statements,
echo_definition
]
} else if statements.is_empty() {
docvec![
type_reference,
imports.into_doc(JavaScriptCodegenTarget::JavaScript),
filepath_definition,
echo_definition,
]
} else {
docvec![
type_reference,
imports.into_doc(JavaScriptCodegenTarget::JavaScript),
line(),
filepath_definition,
statements,
line(),
echo_definition
]
}
}
fn echo_definition(&mut self, imports: &mut Imports<'a>) -> Document<'a> {
if !self.tracker.echo_used {
return nil();
}
if StdlibPackage::Present == self.stdlib_package {
let value = Some((
AssignName::Variable("stdlib$dict".into()),
SrcSpan::default(),
));
self.register_import(imports, "gleam_stdlib", "gleam/dict", &value, &[]);
}
self.register_prelude_usage(imports, "BitArray", Some("$BitArray"));
self.register_prelude_usage(imports, "List", Some("$List"));
self.register_prelude_usage(imports, "UtfCodepoint", Some("$UtfCodepoint"));
docvec![line(), std::include_str!("../templates/echo.mjs"), line()]
}
fn register_prelude_usage(
&self,
imports: &mut Imports<'a>,
name: &'static str,
alias: Option<&'static str>,
) {
let path = self.import_path(&self.module.type_info.package, PRELUDE_MODULE_NAME);
let member = Member {
name: name.to_doc(),
alias: alias.map(|a| a.to_doc()),
};
imports.register_module(path, [], [member]);
}
fn custom_type_definition(
&mut self,
custom_type: &'a TypedCustomType,
) -> Option<Vec<Document<'a>>> {
if self
.module
.unused_definition_positions
.contains(&custom_type.location.start)
{
return None;
}
let TypedCustomType {
name,
publicity,
constructors,
opaque,
..
} = custom_type;
// If there's no constructors then there's nothing to do here.
if constructors.is_empty() {
return Some(vec![]);
}
self.tracker.custom_type_used = true;
let constructor_publicity = if *opaque || publicity.is_private() {
Publicity::Private
} else {
Publicity::Public
};
let mut definitions = constructors
.iter()
.map(|constructor| self.variant_definition(constructor, name, constructor_publicity))
.collect_vec();
// Generate getters for fields shared between variants
if let Some(accessors_map) = self.module.type_info.accessors.get(name)
&& !accessors_map.shared_accessors.is_empty()
// Don't bother generating shared getters when there's only one variant,
// since the specific accessors can always be uses instead.
&& constructors.len() != 1
// Only generate accessors for the API if the constructors are public
&& constructor_publicity.is_public()
{
definitions.push(self.shared_custom_type_fields(name, &accessors_map.shared_accessors));
}
Some(definitions)
}
fn variant_definition(
&self,
constructor: &'a TypedRecordConstructor,
type_name: &'a str,
publicity: Publicity,
) -> Document<'a> {
let class_definition = self.variant_class_definition(constructor, publicity);
// If the custom type is private or opaque, we don't need to generate API
// functions for it.
if publicity.is_private() {
return class_definition;
}
let constructor_definition = self.variant_constructor_definition(constructor, type_name);
let variant_check_definition = self.variant_check_definition(constructor, type_name);
let fields_definition = self.variant_fields_definition(constructor, type_name);
docvec![
class_definition,
line(),
constructor_definition,
line(),
variant_check_definition,
fields_definition,
]
}
fn variant_constructor_definition(
&self,
constructor: &'a TypedRecordConstructor,
type_name: &'a str,
) -> Document<'a> {
let mut arguments = Vec::new();
for (index, parameter) in constructor.arguments.iter().enumerate() {
if let Some((_, label)) = ¶meter.label {
arguments.push(maybe_escape_identifier(label).to_doc());
} else {
arguments.push(eco_format!("${index}").to_doc());
}
}
let construction = docvec![
break_("", " "),
"new ",
constructor.name.as_str(),
"(",
join(arguments.clone(), break_(",", ", ")).group(),
");"
]
.group();
docvec![
"export const ",
type_name,
"$",
constructor.name.as_str(),
" = (",
join(arguments, break_(",", ", ")),
") =>",
construction.nest(INDENT),
]
}
fn variant_check_definition(
&self,
constructor: &'a TypedRecordConstructor,
type_name: &'a str,
) -> Document<'a> {
let construction = docvec![
break_("", " "),
"value instanceof ",
constructor.name.as_str(),
";"
]
.group();
docvec![
"export const ",
type_name,
"$is",
constructor.name.as_str(),
" = (value) =>",
construction.nest(INDENT),
]
}
fn variant_fields_definition(
&self,
constructor: &'a TypedRecordConstructor,
type_name: &'a str,
) -> Document<'a> {
let mut functions = Vec::new();
for (index, argument) in constructor.arguments.iter().enumerate() {
// Always generate the accessor for the value at this index. Although
// this is not necessary when a label is present, we want to make sure
// that adding a label to a record isn't a breaking change. For this
// reason, we need to generate an index getter even when a label is
// present to ensure consistent behaviour between labelled and unlabelled
// field access.
let function_name = eco_format!(
"{type_name}${record_name}${index}",
record_name = constructor.name,
);
let contents;
// If the argument is labelled, also generate a getter for the labelled
// argument.
if let Some((_, label)) = &argument.label {
let function_name = eco_format!(
"{type_name}${record_name}${label}",
record_name = constructor.name,
);
contents =
docvec![break_("", " "), "value.", maybe_escape_property(label), ";"].group();
functions.push(docvec![
line(),
"export const ",
function_name,
" = (value) =>",
contents.clone().nest(INDENT),
]);
} else {
contents = docvec![break_("", " "), "value[", index, "];"].group()
}
functions.push(docvec![
line(),
"export const ",
function_name,
" = (value) =>",
contents.nest(INDENT),
]);
}
concat(functions)
}
fn shared_custom_type_fields(
&self,
type_name: &'a str,
shared_accessors: &HashMap<EcoString, RecordAccessor>,
) -> Document<'a> {
let accessors = shared_accessors.keys().sorted().map(|field| {
let function_name = eco_format!("{type_name}${field}");
let contents =
docvec![break_("", " "), "value.", maybe_escape_property(field), ";"].group();
docvec![
"export const ",
function_name,
" = (value) =>",
contents.nest(INDENT),
]
});
concat(Itertools::intersperse(accessors, line()))
}
fn variant_class_definition(
&self,
constructor: &'a TypedRecordConstructor,
publicity: Publicity,
) -> Document<'a> {
fn parameter((i, arg): (usize, &TypedRecordConstructorArg)) -> Document<'_> {
arg.label
.as_ref()
.map(|(_, s)| maybe_escape_identifier(s))
.unwrap_or_else(|| eco_format!("${i}"))
.to_doc()
}
let doc = if let Some((_, documentation)) = &constructor.documentation {
jsdoc_comment(documentation, publicity).append(line())
} else {
nil()
};
let head = if publicity.is_public() {
"export class "
} else {
"class "
};
let head = docvec![head, &constructor.name, " extends $CustomType {"];
if constructor.arguments.is_empty() {
return head.append("}");
};
let parameters = join(
constructor.arguments.iter().enumerate().map(parameter),
break_(",", ", "),
);
let constructor_body = join(
constructor.arguments.iter().enumerate().map(|(i, arg)| {
let var = parameter((i, arg));
match &arg.label {
None => docvec!["this[", i, "] = ", var, ";"],
Some((_, name)) => {
docvec!["this.", maybe_escape_property(name), " = ", var, ";"]
}
}
}),
line(),
);
let class_body = docvec![
line(),
"constructor(",
parameters,
") {",
docvec![line(), "super();", line(), constructor_body].nest(INDENT),
line(),
"}",
]
.nest(INDENT);
docvec![doc, head, class_body, line(), "}"]
}
fn definitions(&mut self) -> Vec<Document<'a>> {
let mut definitions = vec![];
for custom_type in &self.module.definitions.custom_types {
if let Some(mut new_definitions) = self.custom_type_definition(custom_type) {
definitions.append(&mut new_definitions)
}
}
for constant in &self.module.definitions.constants {
if let Some(definition) = self.module_constant(constant) {
definitions.push(definition)
}
}
for function in &self.module.definitions.functions {
if let Some(definition) = self.module_function(function) {
definitions.push(definition)
}
}
definitions
}
fn collect_imports(&mut self) -> Imports<'a> {
let mut imports = Imports::new();
for Import {
module,
as_name,
unqualified_values,
package,
..
} in &self.module.definitions.imports
{
self.register_import(&mut imports, package, module, as_name, unqualified_values);
}
for function in &self.module.definitions.functions {
if let Some((_, name)) = &function.name
&& let Some((module, external_function, _)) = &function.external_javascript
{
self.register_external_function(
&mut imports,
function.publicity,
name,
module,
external_function,
)
}
}
imports
}
fn import_path(&self, package: &'a str, module: &'a str) -> EcoString {
// TODO: strip shared prefixed between current module and imported
// module to avoid descending and climbing back out again
if package == self.module.type_info.package || package.is_empty() {
// Same package
match self.current_module_name_segments_count {
1 => eco_format!("./{module}.mjs"),
_ => {
let prefix = "../".repeat(self.current_module_name_segments_count - 1);
eco_format!("{prefix}{module}.mjs")
}
}
} else {
// Different package
let prefix = "../".repeat(self.current_module_name_segments_count);
eco_format!("{prefix}{package}/{module}.mjs")
}
}
fn register_import(
&mut self,
imports: &mut Imports<'a>,
package: &'a str,
module: &'a str,
as_name: &Option<(AssignName, SrcSpan)>,
unqualified: &[UnqualifiedImport],
) {
let get_name = |module: &'a str| {
module
.split('/')
.next_back()
.expect("JavaScript generator could not identify imported module name.")
};
let (discarded, module_name) = match as_name {
None => (false, get_name(module)),
Some((AssignName::Discard(_), _)) => (true, get_name(module)),
Some((AssignName::Variable(name), _)) => (false, name.as_str()),
};
let module_name = eco_format!("${module_name}");
let path = self.import_path(package, module);
let unqualified_imports = unqualified.iter().map(|i| {
let alias = i.as_name.as_ref().map(|n| {
self.register_in_scope(n);
maybe_escape_identifier(n).to_doc()
});
let name = maybe_escape_identifier(&i.name).to_doc();
Member { name, alias }
});
let aliases = if discarded { vec![] } else { vec![module_name] };
imports.register_module(path, aliases, unqualified_imports);
}
fn register_external_function(
&mut self,
imports: &mut Imports<'a>,
publicity: Publicity,
name: &'a str,
module: &'a str,
fun: &'a str,
) {
let needs_escaping = !is_usable_js_identifier(name);
let member = Member {
name: fun.to_doc(),
alias: if name == fun && !needs_escaping {
None
} else if needs_escaping {
Some(escape_identifier(name).to_doc())
} else {
Some(name.to_doc())
},
};
if publicity.is_importable() {
imports.register_export(maybe_escape_identifier_string(name))
}
imports.register_module(EcoString::from(module), [], [member]);
}
fn module_constant(&mut self, constant: &'a TypedModuleConstant) -> Option<Document<'a>> {
let TypedModuleConstant {
documentation,
location,
publicity,
name,
value,
..
} = constant;
// We don't generate any code for unused constants.
if self
.module
.unused_definition_positions
.contains(&location.start)
{
return None;
}
let head = if publicity.is_private() {
"const "
} else {
"export const "
};
let mut generator = expression::Generator::new(
self.module.name.clone(),
self.src_path.clone(),
self.line_numbers,
"".into(),
vec![],
&mut self.tracker,
self.module_scope.clone(),
);
let document = generator.constant_expression(Context::Constant, value);
let jsdoc = if let Some((_, documentation)) = documentation {
jsdoc_comment(documentation, *publicity).append(line())
} else {
nil()
};
Some(docvec![
jsdoc,
head,
maybe_escape_identifier(name),
" = ",
document,
";",
])
}
fn register_in_scope(&mut self, name: &str) {
let _ = self.module_scope.insert(name.into(), 0);
}
fn module_function(&mut self, function: &'a TypedFunction) -> Option<Document<'a>> {
// We don't generate any code for unused functions.
if self
.module
.unused_definition_positions
.contains(&function.location.start)
{
return None;
}
// If there's an external JavaScript implementation then it will be imported,
// so we don't need to generate a function definition.
if function.external_javascript.is_some() {
return None;
}
// If the function does not support JavaScript then we don't need to generate
// a function definition.
if !function.implementations.supports(Target::JavaScript) {
return None;
}
let (_, name) = function
.name
.as_ref()
.expect("A module's function must be named");
let argument_names = function
.arguments
.iter()
.map(|arg| arg.names.get_variable_name())
.collect();
let mut generator = expression::Generator::new(
self.module.name.clone(),
self.src_path.clone(),
self.line_numbers,
name.clone(),
argument_names,
&mut self.tracker,
self.module_scope.clone(),
);
let function_doc = match &function.documentation {
None => nil(),
Some((_, documentation)) => {
jsdoc_comment(documentation, function.publicity).append(line())
}
};
let head = if function.publicity.is_private() {
"function "
} else {
"export function "
};
let body = generator.function_body(function.body.as_slice(), function.arguments.as_slice());
Some(docvec![
function_doc,
head,
maybe_escape_identifier(name.as_str()),
fun_arguments(function.arguments.as_slice(), generator.tail_recursion_used),
" {",
docvec![line(), body].nest(INDENT).group(),
line(),
"}",
])
}
fn register_module_definitions_in_scope(&mut self) {
for constant in &self.module.definitions.constants {
self.register_in_scope(&constant.name)
}
for function in &self.module.definitions.functions {
if let Some((_, name)) = &function.name {
self.register_in_scope(name);
}
}
for import in &self.module.definitions.imports {
for unqualified_value in &import.unqualified_values {
self.register_in_scope(unqualified_value.used_name())
}
}
}
fn filepath_definition(&self) -> Document<'a> {
if !self.tracker.make_error_used {
return nil();
}
docvec!["const FILEPATH = ", self.src_path.clone(), ';', lines(2)]
}
}
fn jsdoc_comment(documentation: &EcoString, publicity: Publicity) -> Document<'_> {
let doc_lines = documentation
.trim_end()
.split('\n')
.map(|line| eco_format!(" *{line}", line = line.replace("*/", "*\\/")).to_doc())
.collect_vec();
// We start with the documentation of the function
let doc_body = join(doc_lines, line());
let mut doc = docvec!["/**", line(), doc_body, line()];
if !publicity.is_public() {
// If the function is not public we hide the documentation using
// the `@ignore` tag: https://jsdoc.app/tags-ignore
doc = docvec![doc, " * ", line(), " * @ignore", line()];
}
// And finally we close the doc comment
docvec![doc, " */"]
}
#[derive(Debug)]
pub struct ModuleConfig<'a> {
pub module: &'a TypedModule,
pub line_numbers: &'a LineNumbers,
pub src: &'a EcoString,
pub typescript: TypeScriptDeclarations,
pub stdlib_package: StdlibPackage,
pub path: &'a Utf8Path,
pub project_root: &'a Utf8Path,
}
pub fn module(config: ModuleConfig<'_>) -> String {
let document = Generator::new(config).compile();
document.to_pretty_string(80)
}
pub fn ts_declaration(module: &TypedModule) -> String {
let document = typescript::TypeScriptGenerator::new(module).compile();
document.to_pretty_string(80)
}
fn fun_arguments(arguments: &'_ [TypedArg], tail_recursion_used: bool) -> Document<'_> {
let mut discards = 0;
wrap_arguments(
arguments
.iter()
.map(|argument| match argument.get_variable_name() {
None => {
let doc = if discards == 0 {
"_".to_doc()
} else {
eco_format!("_{discards}").to_doc()
};
discards += 1;
doc
}
Some(name) if tail_recursion_used => eco_format!("loop${name}").to_doc(),
Some(name) => maybe_escape_identifier(name).to_doc(),
}),
)
}
fn wrap_arguments<'a, I>(arguments: I) -> Document<'a>
where
I: IntoIterator<Item = Document<'a>>,
{
break_("", "")
.append(join(arguments, break_(",", ", ")))
.nest(INDENT)
.append(break_("", ""))
.surround("(", ")")
.group()
}
fn wrap_object<'a>(
items: impl IntoIterator<Item = (Document<'a>, Option<Document<'a>>)>,
) -> Document<'a> {
let mut empty = true;
let fields = items.into_iter().map(|(key, value)| {
empty = false;
match value {
Some(value) => docvec![key, ": ", value],
None => key.to_doc(),
}
});
let fields = join(fields, break_(",", ", "));
if empty {
"{}".to_doc()
} else {
docvec![
docvec!["{", break_("", " "), fields]
.nest(INDENT)
.append(break_("", " "))
.group(),
"}"
]
}
}
fn is_usable_js_identifier(word: &str) -> bool {
!matches!(
word,
// Keywords and reserved words
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar
"await"
| "arguments"
| "break"
| "case"
| "catch"
| "class"
| "const"
| "continue"
| "debugger"
| "default"
| "delete"
| "do"
| "else"
| "enum"
| "export"
| "extends"
| "eval"
| "false"
| "finally"
| "for"
| "function"
| "if"
| "implements"
| "import"
| "in"
| "instanceof"
| "interface"
| "let"
| "new"
| "null"
| "package"
| "private"
| "protected"
| "public"
| "return"
| "static"
| "super"
| "switch"
| "this"
| "throw"
| "true"
| "try"
| "typeof"
| "var"
| "void"
| "while"
| "with"
| "yield"
// `undefined` to avoid any unintentional overriding.
| "undefined"
// `then` to avoid a module that defines a `then` function being
// used as a `thenable` in JavaScript when the module is imported
// dynamically, which results in unexpected behaviour.
// It is rather unfortunate that we have to do this.
| "then"
)
}
fn is_usable_js_property(label: &str) -> bool {
match label {
// `then` to avoid a custom type that defines a `then` function being
// used as a `thenable` in Javascript.
"then"
// `constructor` to avoid unintentional overriding of the constructor of
// records, leading to potential runtime crashes while using `withFields`.
| "constructor"
// `prototype` and `__proto__` to avoid unintentionally overriding the
// prototype chain.
| "prototype" | "__proto__" => false,
_ => true
}
}
fn maybe_escape_identifier_string(word: &str) -> EcoString {
if is_usable_js_identifier(word) {
EcoString::from(word)
} else {
escape_identifier(word)
}
}
fn escape_identifier(word: &str) -> EcoString {
eco_format!("{word}$")
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_.rs | compiler-core/src/type_.rs | pub(crate) mod environment;
pub mod error;
pub(crate) mod expression;
pub(crate) mod fields;
pub(crate) mod hydrator;
pub(crate) mod pattern;
pub(crate) mod pipe;
pub(crate) mod prelude;
pub mod pretty;
pub mod printer;
#[cfg(test)]
pub mod tests;
use camino::Utf8PathBuf;
use ecow::EcoString;
pub use environment::*;
pub use error::{Error, Problems, UnifyErrorSituation, Warning};
pub(crate) use expression::ExprTyper;
use expression::Purity;
pub use fields::FieldMap;
use hexpm::version::Version;
pub use prelude::*;
use printer::Names;
use serde::Serialize;
use crate::{
ast::{
ArgNames, BitArraySegment, CallArg, Constant, DefinitionLocation, Pattern, Publicity,
SrcSpan, TypedConstant, TypedExpr, TypedPattern, TypedPatternBitArraySegment,
UntypedMultiPattern, UntypedPattern, UntypedRecordUpdateArg,
},
bit_array,
build::{Origin, Target},
inline::InlinableFunction,
line_numbers::LineNumbers,
reference::ReferenceMap,
type_::expression::Implementations,
};
use error::*;
use hydrator::Hydrator;
use itertools::Itertools;
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
ops::Deref,
sync::Arc,
};
pub trait HasType {
fn type_(&self) -> Arc<Type>;
}
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum Type {
/// A nominal (named) type such as `Int`, `Float`, or a programmer defined
/// custom type such as `Person`. The type can take other types as
/// arguments (aka "generics" or "parametric polymorphism").
///
/// If the type is defined in the Gleam prelude the `module` field will be
/// the string "gleam", otherwise it will contain the name of the module
/// that defines the type.
///
Named {
publicity: Publicity,
package: EcoString,
module: EcoString,
name: EcoString,
arguments: Vec<Arc<Type>>,
/// Which variant of the types this value is, if it is known from variant inference.
/// This allows us to permit certain operations when we know this,
/// such as record updates for multi-constructor types, or field access
/// for fields not shared between type variants. For example:
///
/// ```gleam
/// type Wibble {
/// Wibble(wibble: Int, other, Int)
/// Wobble(wobble: Int, something: Int)
/// }
///
/// fn add_one(some_wibble: Wibble) -> Wibble {
/// case some_wibble {
/// Wibble(..) as wibble -> Wibble(..wibble, other: wibble.other + 1)
/// Wobble(..) as wobble -> Wobble(..wobble, something: wobble.something + 1)
/// }
/// }
/// ```
///
/// Here, the `wibble` variable has an inferred variant of `0`, since we know it's
/// of the `Wibble` variant. This means we can safely update it using the `Wibble`
/// constructor, and access the `other` field, which is only present in that variant.
///
/// However, the parameter `some_wibble` has no known variant; it could be either of the variants,
/// so we can't allow any of that until we pattern match on it.
///
inferred_variant: Option<u16>,
},
/// The type of a function. It takes arguments and returns a value.
///
Fn {
arguments: Vec<Arc<Type>>,
return_: Arc<Type>,
},
/// A type variable. See the contained `TypeVar` enum for more information.
///
Var { type_: Arc<RefCell<TypeVar>> },
/// A tuple is an ordered collection of 0 or more values, each of which
/// can have a different type, so the `tuple` type is the sum of all the
/// contained types.
///
Tuple { elements: Vec<Arc<Type>> },
}
impl Type {
pub fn is_result_constructor(&self) -> bool {
match self {
Type::Fn { return_, .. } => return_.is_result(),
Type::Var { type_ } => type_.borrow().is_result_constructor(),
Type::Named { .. } | Type::Tuple { .. } => false,
}
}
pub fn is_result(&self) -> bool {
match self {
Self::Named { name, module, .. } => "Result" == name && is_prelude_module(module),
Self::Var { type_ } => type_.borrow().is_result(),
Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_named(&self) -> bool {
match self {
Self::Named { .. } => true,
Self::Var { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn result_ok_type(&self) -> Option<Arc<Type>> {
match self {
Self::Named {
module,
name,
arguments,
..
} if "Result" == name && is_prelude_module(module) => arguments.first().cloned(),
Self::Var { type_ } => type_.borrow().result_ok_type(),
Self::Named { .. } | Self::Tuple { .. } | Type::Fn { .. } => None,
}
}
pub fn result_types(&self) -> Option<(Arc<Type>, Arc<Type>)> {
match self {
Self::Named {
module,
name,
arguments,
..
} if "Result" == name && is_prelude_module(module) => {
Some((arguments.first().cloned()?, arguments.get(1).cloned()?))
}
Self::Var { type_ } => type_.borrow().result_types(),
Self::Named { .. } | Self::Tuple { .. } | Type::Fn { .. } => None,
}
}
pub fn is_unbound(&self) -> bool {
match self {
Self::Var { type_ } => type_.borrow().is_unbound(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_variable(&self) -> bool {
match self {
Self::Var { type_ } => type_.borrow().is_variable(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn return_type(&self) -> Option<Arc<Self>> {
match self {
Self::Fn { return_, .. } => Some(return_.clone()),
Self::Var { type_ } => type_.borrow().return_type(),
Self::Named { .. } | Self::Tuple { .. } => None,
}
}
pub fn fn_types(&self) -> Option<(Vec<Arc<Self>>, Arc<Self>)> {
match self {
Self::Fn {
arguments, return_, ..
} => Some((arguments.clone(), return_.clone())),
Self::Var { type_ } => type_.borrow().fn_types(),
Self::Named { .. } | Self::Tuple { .. } => None,
}
}
/// Gets the types inside of a tuple. Returns `None` if the type is not a tuple.
pub fn tuple_types(&self) -> Option<Vec<Arc<Self>>> {
match self {
Self::Tuple { elements } => Some(elements.clone()),
Self::Var { type_, .. } => type_.borrow().tuple_types(),
Self::Named { .. } | Self::Fn { .. } => None,
}
}
/// Gets the argument types for a type constructor. Returns `None` if the type
/// does not lead to a type constructor.
pub fn constructor_types(&self) -> Option<Vec<Arc<Self>>> {
match self {
Self::Named { arguments, .. } => Some(arguments.clone()),
Self::Var { type_, .. } => type_.borrow().constructor_types(),
Self::Fn { .. } | Self::Tuple { .. } => None,
}
}
/// If the type is a Gleam's prelude's List this will return its wrapped
/// type.
pub fn list_type(&self) -> Option<Arc<Self>> {
match self {
Type::Named {
publicity: Publicity::Public,
name,
module,
package,
arguments,
inferred_variant: _,
} if package == PRELUDE_PACKAGE_NAME
&& module == PRELUDE_MODULE_NAME
&& name == LIST =>
{
match arguments.as_slice() {
[inner_type] => Some(inner_type.clone()),
[] | [_, _, ..] => None,
}
}
Type::Named { .. } | Type::Fn { .. } | Type::Var { .. } | Type::Tuple { .. } => None,
}
}
pub fn list(inner_type: Arc<Self>) -> Self {
Type::Named {
publicity: Publicity::Public,
package: PRELUDE_PACKAGE_NAME.into(),
module: PRELUDE_MODULE_NAME.into(),
name: LIST.into(),
arguments: vec![inner_type],
inferred_variant: None,
}
}
#[must_use]
fn is_fun(&self) -> bool {
match self {
Self::Fn { .. } => true,
Self::Var { type_ } => type_.borrow().is_fun(),
Self::Named { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_nil(&self) -> bool {
match self {
Self::Named { module, name, .. } if "Nil" == name && is_prelude_module(module) => true,
Self::Var { type_ } => type_.borrow().is_nil(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_bit_array(&self) -> bool {
match self {
Self::Named { module, name, .. } if "BitArray" == name && is_prelude_module(module) => {
true
}
Self::Var { type_ } => type_.borrow().is_nil(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_utf_codepoint(&self) -> bool {
match self {
Self::Named { module, name, .. }
if "UtfCodepoint" == name && is_prelude_module(module) =>
{
true
}
Self::Var { type_ } => type_.borrow().is_nil(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_bool(&self) -> bool {
match self {
Self::Named { module, name, .. } if "Bool" == name && is_prelude_module(module) => true,
Self::Var { type_ } => type_.borrow().is_bool(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_int(&self) -> bool {
match self {
Self::Named { module, name, .. } if "Int" == name && is_prelude_module(module) => true,
Self::Var { type_ } => type_.borrow().is_int(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_float(&self) -> bool {
match self {
Self::Named { module, name, .. } if "Float" == name && is_prelude_module(module) => {
true
}
Self::Var { type_ } => type_.borrow().is_float(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_string(&self) -> bool {
match self {
Self::Named { module, name, .. } if "String" == name && is_prelude_module(module) => {
true
}
Self::Var { type_ } => type_.borrow().is_string(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn is_list(&self) -> bool {
match self {
Self::Named { module, name, .. } if "List" == name && is_prelude_module(module) => true,
Self::Var { type_ } => type_.borrow().is_list(),
Self::Named { .. } | Self::Fn { .. } | Self::Tuple { .. } => false,
}
}
pub fn named_type_name(&self) -> Option<(EcoString, EcoString)> {
match self {
Self::Named { module, name, .. } => Some((module.clone(), name.clone())),
Self::Var { type_ } => type_.borrow().named_type_name(),
Self::Fn { .. } | Self::Tuple { .. } => None,
}
}
pub fn named_type_information(&self) -> Option<(EcoString, EcoString, Vec<Arc<Self>>)> {
match self {
Self::Named {
module,
name,
arguments,
..
} => Some((module.clone(), name.clone(), arguments.clone())),
Self::Var { type_ } => type_.borrow().named_type_information(),
Self::Fn { .. } | Self::Tuple { .. } => None,
}
}
pub fn set_custom_type_variant(&mut self, index: u16) {
match self {
Type::Named {
inferred_variant, ..
} => *inferred_variant = Some(index),
Type::Var { type_ } => type_.borrow_mut().set_custom_type_variant(index),
Type::Fn { .. } | Type::Tuple { .. } => {}
}
}
pub fn generalise_custom_type_variant(&mut self) {
match self {
Type::Named {
inferred_variant, ..
} => *inferred_variant = None,
Type::Var { type_ } => type_.borrow_mut().generalise_custom_type_variant(),
Type::Tuple { elements } => {
for element in elements {
Arc::make_mut(element).generalise_custom_type_variant();
}
}
Type::Fn { arguments, return_ } => {
for argument in arguments {
Arc::make_mut(argument).generalise_custom_type_variant();
}
Arc::make_mut(return_).generalise_custom_type_variant();
}
}
}
pub fn custom_type_inferred_variant(&self) -> Option<u16> {
match self {
Type::Named {
inferred_variant, ..
} => *inferred_variant,
Type::Var { type_ } => type_.borrow().custom_type_inferred_variant(),
Type::Fn { .. } | Type::Tuple { .. } => None,
}
}
/// Get the args for the type if the type is a specific `Type::Named`.
/// Returns None if the type is not a `Type::Named` or is an incorrect `Type:Named`
///
/// This function is currently only used for finding the `List` type.
///
// TODO: specialise this to just List.
pub fn named_type_arguments(
&self,
publicity: Publicity,
package: &str,
module: &str,
name: &str,
arity: usize,
environment: &mut Environment<'_>,
) -> Option<Vec<Arc<Self>>> {
match self {
Self::Named {
module: m,
name: n,
arguments,
..
} => {
if module == m && name == n && arguments.len() == arity {
Some(arguments.clone())
} else {
None
}
}
Self::Var { type_ } => {
let arguments: Vec<_> = match type_.borrow().deref() {
TypeVar::Link { type_ } => {
return type_.named_type_arguments(
publicity,
package,
module,
name,
arity,
environment,
);
}
TypeVar::Unbound { .. } => {
(0..arity).map(|_| environment.new_unbound_var()).collect()
}
TypeVar::Generic { .. } => return None,
};
// We are an unbound type variable! So convert us to a type link
// to the desired type.
*type_.borrow_mut() = TypeVar::Link {
type_: Arc::new(Self::Named {
name: name.into(),
package: package.into(),
module: module.into(),
arguments: arguments.clone(),
publicity,
inferred_variant: None,
}),
};
Some(arguments)
}
Self::Fn { .. } | Self::Tuple { .. } => None,
}
}
pub fn find_private_type(&self) -> Option<Self> {
match self {
Self::Named {
publicity: Publicity::Private,
..
} => Some(self.clone()),
Self::Named { arguments, .. } => {
arguments.iter().find_map(|type_| type_.find_private_type())
}
Self::Tuple { elements, .. } => {
elements.iter().find_map(|type_| type_.find_private_type())
}
Self::Fn {
return_, arguments, ..
} => return_
.find_private_type()
.or_else(|| arguments.iter().find_map(|type_| type_.find_private_type())),
Self::Var { type_, .. } => match type_.borrow().deref() {
TypeVar::Unbound { .. } => None,
TypeVar::Generic { .. } => None,
TypeVar::Link { type_, .. } => type_.find_private_type(),
},
}
}
pub fn find_internal_type(&self) -> Option<Self> {
match self {
Self::Named { publicity, .. } if publicity.is_internal() => Some(self.clone()),
Self::Named { arguments, .. } => arguments
.iter()
.find_map(|type_| type_.find_internal_type()),
Self::Tuple { elements, .. } => {
elements.iter().find_map(|type_| type_.find_internal_type())
}
Self::Fn {
return_, arguments, ..
} => return_.find_internal_type().or_else(|| {
arguments
.iter()
.find_map(|type_| type_.find_internal_type())
}),
Self::Var { type_, .. } => match type_.borrow().deref() {
TypeVar::Unbound { .. } | TypeVar::Generic { .. } => None,
TypeVar::Link { type_, .. } => type_.find_internal_type(),
},
}
}
pub fn fn_arity(&self) -> Option<usize> {
match self {
Self::Fn { arguments, .. } => Some(arguments.len()),
Self::Named { .. } | Self::Var { .. } | Self::Tuple { .. } => None,
}
}
#[must_use]
/// Returns `true` is the two types are the same. This differs from the
/// standard `Eq` implementation as it also follows all links to check if
/// two types are really the same.
///
pub fn same_as(&self, other: &Self) -> bool {
match (self, other) {
(Type::Named { .. }, Type::Fn { .. } | Type::Tuple { .. }) => false,
(one @ Type::Named { .. }, Type::Var { type_ }) => {
type_.as_ref().borrow().same_as_other_type(one)
}
// When comparing two types we don't care about the inferred variant:
// `True` has the same type as `False`, even if the inferred variants
// differ.
(
Type::Named {
publicity,
package,
module,
name,
arguments,
inferred_variant: _,
},
Type::Named {
publicity: other_publicity,
package: other_package,
module: other_module,
name: other_name,
arguments: other_arguments,
inferred_variant: _,
},
) => {
publicity == other_publicity
&& package == other_package
&& module == other_module
&& name == other_name
&& arguments.len() == other_arguments.len()
&& arguments
.iter()
.zip(other_arguments)
.all(|(one, other)| one.same_as(other))
}
(Type::Fn { .. }, Type::Named { .. } | Type::Tuple { .. }) => false,
(one @ Type::Fn { .. }, Type::Var { type_ }) => {
type_.as_ref().borrow().same_as_other_type(one)
}
(
Type::Fn { arguments, return_ },
Type::Fn {
arguments: other_arguments,
return_: other_return,
},
) => {
arguments.len() == other_arguments.len()
&& arguments
.iter()
.zip(other_arguments)
.all(|(one, other)| one.same_as(other))
&& return_.same_as(other_return)
}
(Type::Var { type_ }, other) => type_.as_ref().borrow().same_as_other_type(other),
(Type::Tuple { .. }, Type::Fn { .. } | Type::Named { .. }) => false,
(one @ Type::Tuple { .. }, Type::Var { type_ }) => {
type_.as_ref().borrow().same_as_other_type(one)
}
(
Type::Tuple { elements },
Type::Tuple {
elements: other_elements,
},
) => {
elements.len() == other_elements.len()
&& elements
.iter()
.zip(other_elements)
.all(|(one, other)| one.same_as(other))
}
}
}
}
impl TypeVar {
#[must_use]
fn same_as_other_type(&self, other: &Type) -> bool {
match (self, other) {
(TypeVar::Unbound { .. }, _) => true,
(TypeVar::Link { type_ }, other) => type_.same_as(other),
(
TypeVar::Generic { .. },
Type::Named { .. } | Type::Fn { .. } | Type::Tuple { .. },
) => false,
(one @ TypeVar::Generic { .. }, Type::Var { type_ }) => {
one.same_as(&type_.as_ref().borrow())
}
}
}
#[must_use]
fn same_as(&self, other: &Self) -> bool {
match (self, other) {
(TypeVar::Unbound { .. }, _) | (_, TypeVar::Unbound { .. }) => true,
(TypeVar::Link { type_ }, TypeVar::Link { type_: other_type }) => {
type_.same_as(other_type)
}
(TypeVar::Link { type_ }, other @ TypeVar::Generic { .. }) => {
other.same_as_other_type(type_)
}
(TypeVar::Generic { id }, TypeVar::Generic { id: other_id }) => id == other_id,
(one @ TypeVar::Generic { .. }, TypeVar::Link { type_ }) => {
one.same_as_other_type(type_)
}
}
}
}
pub fn collapse_links(t: Arc<Type>) -> Arc<Type> {
if let Type::Var { type_ } = t.deref()
&& let TypeVar::Link { type_ } = type_.borrow().deref()
{
return collapse_links(type_.clone());
}
t
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct AccessorsMap {
pub publicity: Publicity,
pub type_: Arc<Type>,
pub shared_accessors: HashMap<EcoString, RecordAccessor>,
pub variant_specific_accessors: Vec<HashMap<EcoString, RecordAccessor>>,
pub variant_positional_accessors: Vec<Vec<Arc<Type>>>,
}
impl AccessorsMap {
pub fn accessors_for_variant(
&self,
inferred_variant: Option<u16>,
) -> &HashMap<EcoString, RecordAccessor> {
inferred_variant
.and_then(|index| self.variant_specific_accessors.get(index as usize))
.unwrap_or(&self.shared_accessors)
}
pub fn positional_accessors(&self, inferred_variant: u16) -> Option<&Vec<Arc<Type>>> {
self.variant_positional_accessors
.get(inferred_variant as usize)
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct RecordAccessor {
// TODO: smaller int. Doesn't need to be this big
pub index: u64,
pub label: EcoString,
pub type_: Arc<Type>,
pub documentation: Option<EcoString>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ValueConstructorVariant {
/// A locally defined variable or function parameter
LocalVariable {
location: SrcSpan,
origin: VariableOrigin,
},
/// A module constant
ModuleConstant {
documentation: Option<EcoString>,
location: SrcSpan,
module: EcoString,
name: EcoString,
literal: Constant<Arc<Type>, EcoString>,
implementations: Implementations,
},
/// A function belonging to the module
ModuleFn {
name: EcoString,
field_map: Option<FieldMap>,
module: EcoString,
arity: usize,
location: SrcSpan,
documentation: Option<EcoString>,
implementations: Implementations,
external_erlang: Option<(EcoString, EcoString)>,
external_javascript: Option<(EcoString, EcoString)>,
purity: Purity,
},
/// A constructor for a custom type
Record {
name: EcoString,
arity: u16,
field_map: Option<FieldMap>,
location: SrcSpan,
module: EcoString,
variants_count: u16,
variant_index: u16,
documentation: Option<EcoString>,
},
}
impl ValueConstructorVariant {
fn to_module_value_constructor(
&self,
type_: Arc<Type>,
module_name: &EcoString,
function_name: &EcoString,
) -> ModuleValueConstructor {
match self {
Self::Record {
name,
arity,
field_map,
location,
documentation,
variant_index,
..
} => ModuleValueConstructor::Record {
name: name.clone(),
variant_index: *variant_index,
field_map: field_map.clone(),
arity: *arity,
type_,
location: *location,
documentation: documentation.clone(),
},
// TODO: remove this clone with an rc clone
Self::ModuleConstant {
documentation,
literal,
location,
..
} => ModuleValueConstructor::Constant {
literal: literal.clone(),
location: *location,
documentation: documentation.clone(),
},
Self::LocalVariable { location, .. } => ModuleValueConstructor::Fn {
name: function_name.clone(),
module: module_name.clone(),
external_erlang: None,
external_javascript: None,
documentation: None,
location: *location,
field_map: None,
purity: Purity::Impure,
},
Self::ModuleFn {
name,
module,
location,
documentation,
field_map,
external_erlang,
external_javascript,
purity,
..
} => ModuleValueConstructor::Fn {
name: name.clone(),
module: module.clone(),
documentation: documentation.clone(),
external_erlang: external_erlang.clone(),
external_javascript: external_javascript.clone(),
location: *location,
field_map: field_map.clone(),
purity: *purity,
},
}
}
pub fn definition_location(&self) -> SrcSpan {
match self {
ValueConstructorVariant::LocalVariable { location, .. }
| ValueConstructorVariant::ModuleConstant { location, .. }
| ValueConstructorVariant::ModuleFn { location, .. }
| ValueConstructorVariant::Record { location, .. } => *location,
}
}
/// Returns `true` if the variant is [`LocalVariable`].
pub fn is_local_variable(&self) -> bool {
matches!(self, Self::LocalVariable { .. })
}
/// Returns `true` if the variant is a local variable generated by the compiler.
#[must_use]
pub fn is_generated_variable(&self) -> bool {
match self {
ValueConstructorVariant::LocalVariable { origin, .. } => {
matches!(origin.syntax, VariableSyntax::Generated)
}
ValueConstructorVariant::ModuleConstant { .. }
| ValueConstructorVariant::ModuleFn { .. }
| ValueConstructorVariant::Record { .. } => false,
}
}
/// Returns `true` if the value constructor variant is [`ModuleFn`].
///
/// [`ModuleFn`]: ValueConstructorVariant::ModuleFn
#[must_use]
pub fn is_module_fn(&self) -> bool {
matches!(self, Self::ModuleFn { .. })
}
pub fn is_record(&self) -> bool {
matches!(self, Self::Record { .. })
}
pub fn implementations(&self) -> Implementations {
match self {
ValueConstructorVariant::Record { .. }
| ValueConstructorVariant::LocalVariable { .. } => Implementations {
gleam: true,
can_run_on_erlang: true,
can_run_on_javascript: true,
uses_javascript_externals: false,
uses_erlang_externals: false,
},
ValueConstructorVariant::ModuleFn {
implementations, ..
}
| ValueConstructorVariant::ModuleConstant {
implementations, ..
} => *implementations,
}
}
fn record_field_map(&self) -> Option<&FieldMap> {
match self {
ValueConstructorVariant::LocalVariable { .. }
| ValueConstructorVariant::ModuleConstant { .. }
| ValueConstructorVariant::ModuleFn { .. } => None,
ValueConstructorVariant::Record { field_map, .. } => field_map.as_ref(),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ModuleValueConstructor {
Record {
name: EcoString,
variant_index: u16,
arity: u16,
type_: Arc<Type>,
field_map: Option<FieldMap>,
location: SrcSpan,
documentation: Option<EcoString>,
},
Fn {
location: SrcSpan,
/// The name of the module and the function
/// This will be the module that this constructor belongs to
/// and the name that was used for the function.
module: EcoString,
name: EcoString,
/// If this is an `external` function, these will hold the name of the
/// external module and function.
///
/// This function has module "themodule" and name "wibble"
/// pub fn wibble() { Nil }
///
/// This function has module "themodule" and name "wibble"
/// and erlang external "other" and "whoop".
/// @external(erlang, "other", "whoop")
/// pub fn wibble() -> Nil
///
external_erlang: Option<(EcoString, EcoString)>,
external_javascript: Option<(EcoString, EcoString)>,
field_map: Option<FieldMap>,
documentation: Option<EcoString>,
purity: Purity,
},
Constant {
literal: TypedConstant,
location: SrcSpan,
documentation: Option<EcoString>,
},
}
impl ModuleValueConstructor {
pub fn location(&self) -> SrcSpan {
match self {
ModuleValueConstructor::Fn { location, .. }
| ModuleValueConstructor::Record { location, .. }
| ModuleValueConstructor::Constant { location, .. } => *location,
}
}
pub fn get_documentation(&self) -> Option<&str> {
match self {
ModuleValueConstructor::Record { documentation, .. }
| ModuleValueConstructor::Fn { documentation, .. }
| ModuleValueConstructor::Constant { documentation, .. } => documentation.as_deref(),
}
}
/// Returns the purity of this value constructor if it is called as a function.
/// Referencing a module value by itself is always pure, but calling is as a
/// function might not be.
pub fn called_function_purity(&self) -> Purity {
match self {
// If we call a module constant or local variable as a function, we
// no longer have enough information to determine its purity. For
// example:
//
// ```gleam
// const function1 = io.println
// const function2 = function.identity
//
// pub fn main() {
// function1("Hello")
// function2("Hello")
// }
// ```
//
// At this point, we don't have any information about the purity of
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/lib.rs | compiler-core/src/lib.rs | #![warn(
clippy::all,
clippy::dbg_macro,
clippy::todo,
clippy::mem_forget,
// TODO: enable once the false positive bug is solved
// clippy::use_self,
clippy::filter_map_next,
clippy::needless_continue,
clippy::needless_borrow,
clippy::match_wildcard_for_single_variants,
clippy::imprecise_flops,
clippy::suboptimal_flops,
clippy::lossy_float_literal,
clippy::rest_pat_in_fully_bound_structs,
clippy::fn_params_excessive_bools,
clippy::inefficient_to_string,
clippy::linkedlist,
clippy::macro_use_imports,
clippy::option_option,
clippy::verbose_file_reads,
clippy::unnested_or_patterns,
rust_2018_idioms,
missing_debug_implementations,
missing_copy_implementations,
trivial_casts,
trivial_numeric_casts,
nonstandard_style,
unexpected_cfgs,
unused_import_braces,
unused_qualifications,
clippy::wildcard_enum_match_arm
)]
#![deny(
clippy::await_holding_lock,
clippy::disallowed_methods,
clippy::if_let_mutex,
clippy::indexing_slicing,
clippy::mem_forget,
clippy::ok_expect,
clippy::unimplemented,
clippy::unwrap_used,
unsafe_code,
unstable_features,
unused_results
)]
#![allow(
clippy::assign_op_pattern,
clippy::to_string_trait_impl,
clippy::match_single_binding,
clippy::match_like_matches_macro,
clippy::inconsistent_struct_constructor,
clippy::len_without_is_empty,
// TODO: fix
clippy::arc_with_non_send_sync,
)]
#[cfg(test)]
#[macro_use]
extern crate pretty_assertions;
pub mod analyse;
pub mod ast;
pub mod bit_array;
pub mod build;
pub mod codegen;
pub mod config;
pub mod dependency;
pub mod diagnostic;
pub mod docs;
pub mod encryption;
pub mod erlang;
pub mod error;
pub mod fix;
pub mod format;
pub mod hex;
pub mod io;
pub mod javascript;
pub mod line_numbers;
pub mod manifest;
pub mod metadata;
pub mod package_interface;
pub mod parse;
pub mod paths;
pub mod pretty;
pub mod requirement;
pub mod strings;
pub mod type_;
pub mod uid;
pub mod version;
pub mod warning;
pub(crate) mod ast_folder;
mod call_graph;
mod dep_tree;
pub(crate) mod derivation_tree;
pub mod exhaustiveness;
pub(crate) mod graph;
pub(crate) mod inline;
pub mod reference;
pub use error::{Error, Result};
pub use warning::Warning;
const GLEAM_CORE_PACKAGE_NAME: &str = "";
pub const STDLIB_PACKAGE_NAME: &str = "gleam_stdlib";
mod schema_capnp {
#![allow(
dead_code,
unused_qualifications,
clippy::all,
clippy::unwrap_used,
missing_debug_implementations,
missing_copy_implementations
)]
include!("../generated/schema_capnp.rs");
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/line_numbers.rs | compiler-core/src/line_numbers.rs | use crate::ast::SrcSpan;
use lsp_types::Position;
use std::collections::HashMap;
/// A struct which contains information about line numbers of a source file,
/// and can convert between byte offsets that are used in the compiler and
/// line-column pairs used in LSP.
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)]
pub struct LineNumbers {
/// The byte offsets of the start of each line of the source file
pub line_starts: Vec<u32>,
/// The total length of the source file
pub length: u32,
/// A mapping of byte offsets to character length information. This is used
/// when converting between byte indices and line-column numbers, because
/// LSP uses UTF-16, while Rust encodes strings as UTF-8.
///
/// This only contains characters which are more than one byte in UTF-8,
/// because one byte UTF-8 characters are one UTF-16 segment also, so no
/// translation is needed.
///
/// We could store the whole source file here instead, however that would
/// be quite wasteful. Most Gleam programs use only ASCII characters, meaning
/// UTF-8 offsets are the same as UTF-16 ones. With this representation, we
/// only need to store a few characters.
///
/// In most programs this will be empty because they will only be using
/// ASCII characters.
pub mapping: HashMap<usize, Character>,
}
/// Information about how a character is encoded in UTF-8 and UTF-16.
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, PartialEq, Eq)]
pub struct Character {
/// The number of bytes needed to encode this in UTF-8.
pub length_utf8: u8,
/// The number of 16-bit segments needed to encode this in UTF-16.
pub length_utf16: u8,
}
impl LineNumbers {
pub fn new(src: &str) -> Self {
Self {
length: src.len() as u32,
line_starts: std::iter::once(0)
.chain(src.match_indices('\n').map(|(i, _)| i as u32 + 1))
.collect(),
mapping: Self::mapping(src),
}
}
fn mapping(src: &str) -> HashMap<usize, Character> {
let mut map = HashMap::new();
for (i, char) in src.char_indices() {
let length = char.len_utf8();
if length != 1 {
_ = map.insert(
i,
Character {
length_utf8: length as u8,
length_utf16: char.len_utf16() as u8,
},
);
}
}
map
}
/// Returns the 1-indexed line number of a given byte index
pub fn line_number(&self, byte_index: u32) -> u32 {
self.line_starts
.binary_search(&byte_index)
.unwrap_or_else(|next_line| next_line - 1) as u32
+ 1
}
/// Returns the 1-indexed line and column number of a given byte index,
/// using a UTF-16 character offset.
pub fn line_and_column_number(&self, byte_index: u32) -> LineColumn {
let line = self.line_number(byte_index);
let line_start = self
.line_starts
.get(line as usize - 1)
.copied()
.unwrap_or_default();
let mut u8_offset = line_start;
let mut u16_offset = 0;
loop {
if u8_offset >= byte_index {
break;
}
if let Some(length) = self.mapping.get(&(u8_offset as usize)) {
u8_offset += length.length_utf8 as u32;
u16_offset += length.length_utf16 as u32;
} else {
u16_offset += 1;
u8_offset += 1;
}
}
LineColumn {
line,
column: u16_offset + 1,
}
}
/// Returns the byte index of the corresponding LSP line-column `Position`,
/// translating from a UTF-16 character index to a UTF-8 byte index.
pub fn byte_index(&self, position: Position) -> u32 {
let line_start = match self.line_starts.get(position.line as usize) {
Some(&line_start) => line_start,
None => return self.length,
};
let mut u8_offset = line_start;
let mut u16_offset = 0;
loop {
if u16_offset >= position.character {
break;
}
if let Some(length) = self.mapping.get(&(u8_offset as usize)) {
u8_offset += length.length_utf8 as u32;
u16_offset += length.length_utf16 as u32;
} else {
u16_offset += 1;
u8_offset += 1;
}
}
u8_offset
}
/// Checks if the given span spans an entire line (excluding the newline
/// character itself).
pub fn spans_entire_line(&self, span: &SrcSpan) -> bool {
self.line_starts.iter().any(|&line_start| {
line_start == span.start && self.line_starts.contains(&(span.end + 1))
})
}
}
#[test]
fn byte_index() {
let src = r#"import gleam/io
pub fn main() {
io.println("Hello, world!")
}
"#;
let line_numbers = LineNumbers::new(src);
assert_eq!(
line_numbers.byte_index(Position {
line: 0,
character: 0
}),
0
);
assert_eq!(
line_numbers.byte_index(Position {
line: 0,
character: 4
}),
4
);
assert_eq!(
line_numbers.byte_index(Position {
line: 100,
character: 0
}),
src.len() as u32
);
assert_eq!(
line_numbers.byte_index(Position {
line: 2,
character: 1
}),
18
);
}
// https://github.com/gleam-lang/gleam/issues/3628
#[test]
fn byte_index_with_multibyte_characters() {
let src = r#"fn wibble(_a, _b, _c) {
todo
}
pub fn main() {
wibble("क्षि", 10, <<"abc">>)
}
"#;
let line_numbers = LineNumbers::new(src);
assert_eq!(
line_numbers.byte_index(Position {
line: 1,
character: 6
}),
30
);
assert_eq!(
line_numbers.byte_index(Position {
line: 5,
character: 2
}),
52
);
assert_eq!(
line_numbers.byte_index(Position {
line: 5,
character: 17
}),
75
);
assert_eq!(
line_numbers.byte_index(Position {
line: 6,
character: 1
}),
91
);
}
// https://github.com/gleam-lang/gleam/issues/3628
#[test]
fn line_and_column_with_multibyte_characters() {
let src = r#"fn wibble(_a, _b, _c) {
todo
}
pub fn main() {
wibble("क्षि", 10, <<"abc">>)
}
"#;
let line_numbers = LineNumbers::new(src);
assert_eq!(
line_numbers.line_and_column_number(30),
LineColumn { line: 2, column: 7 }
);
assert_eq!(
line_numbers.line_and_column_number(52),
LineColumn { line: 6, column: 3 }
);
assert_eq!(
line_numbers.line_and_column_number(75),
LineColumn {
line: 6,
column: 18
}
);
assert_eq!(
line_numbers.line_and_column_number(91),
LineColumn { line: 7, column: 2 }
);
}
/// A 1-index line and column position
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct LineColumn {
pub line: u32,
pub column: u32,
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/version.rs | compiler-core/src/version.rs | /// The current version of the gleam compiler. If this does not match what is
/// already in the build folder we will not reuse any cached artifacts and
/// instead build from scratch
pub const COMPILER_VERSION: &str = env!("CARGO_PKG_VERSION");
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/parse.rs | compiler-core/src/parse.rs | // Gleam Parser
//
// Terminology:
// Expression Unit:
// Essentially a thing that goes between operators.
// Int, Bool, function call, "{" expression-sequence "}", case x {}, ..etc
//
// Expression:
// One or more Expression Units separated by an operator
//
// Binding:
// (let|let assert|use) name (:TypeAnnotation)? = Expression
//
// Expression Sequence:
// * One or more Expressions
// * A Binding followed by at least one more Expression Sequences
//
// Naming Conventions:
// parse_x
// Parse a specific part of the grammar, not erroring if it cannot.
// Generally returns `Result<Option<A>, ParseError>`, note the inner Option
//
// expect_x
// Parse a generic or specific part of the grammar, erroring if it cannot.
// Generally returns `Result<A, ParseError>`, note no inner Option
//
// maybe_x
// Parse a generic part of the grammar. Returning `None` if it cannot.
// Returns `Some(x)` and advances the token stream if it can.
//
// Operator Precedence Parsing:
// Needs to take place in expressions and in clause guards.
// It is accomplished using the Simple Precedence Parser algorithm.
// See: https://en.wikipedia.org/wiki/Simple_precedence_parser
//
// It relies or the operator grammar being in the general form:
// e ::= expr op expr | expr
// Which just means that exprs and operators always alternate, starting with an expr
//
// The gist of the algorithm is:
// Create 2 stacks, one to hold expressions, and one to hold un-reduced operators.
// While consuming the input stream, if an expression is encountered add it to the top
// of the expression stack. If an operator is encountered, compare its precedence to the
// top of the operator stack and perform the appropriate action, which is either using an
// operator to reduce 2 expressions on the top of the expression stack or put it on the top
// of the operator stack. When the end of the input is reached, attempt to reduce all of the
// expressions down to a single expression(or no expression) using the remaining operators
// on the operator stack. If there are any operators left, or more than 1 expression left
// this is a syntax error. But the implementation here shouldn't need to handle that case
// as the outer parser ensures the correct structure.
//
pub mod error;
pub mod extra;
pub mod lexer;
mod token;
use crate::Warning;
use crate::analyse::Inferred;
use crate::ast::{
Arg, ArgNames, Assert, AssignName, Assignment, AssignmentKind, BinOp, BitArrayOption,
BitArraySegment, BitArraySize, CAPTURE_VARIABLE, CallArg, Clause, ClauseGuard, Constant,
CustomType, Definition, Function, FunctionLiteralKind, HasLocation, Import, IntOperator,
Module, ModuleConstant, Pattern, Publicity, RecordBeingUpdated, RecordConstructor,
RecordConstructorArg, RecordUpdateArg, SrcSpan, Statement, TailPattern, TargetedDefinition,
TodoKind, TypeAlias, TypeAst, TypeAstConstructor, TypeAstFn, TypeAstHole, TypeAstTuple,
TypeAstVar, UnqualifiedImport, UntypedArg, UntypedClause, UntypedClauseGuard, UntypedConstant,
UntypedDefinition, UntypedExpr, UntypedModule, UntypedPattern, UntypedRecordUpdateArg,
UntypedStatement, UntypedUseAssignment, Use, UseAssignment,
};
use crate::build::Target;
use crate::error::wrap;
use crate::exhaustiveness::CompiledCase;
use crate::parse::extra::ModuleExtra;
use crate::type_::Deprecation;
use crate::type_::error::{VariableDeclaration, VariableOrigin, VariableSyntax};
use crate::type_::expression::{Implementations, Purity};
use crate::warning::{DeprecatedSyntaxWarning, WarningEmitter};
use camino::Utf8PathBuf;
use ecow::EcoString;
use error::{LexicalError, ParseError, ParseErrorType};
use lexer::{LexResult, Spanned};
use num_bigint::BigInt;
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::VecDeque;
use std::hash::{Hash, Hasher};
use std::str::FromStr;
pub use token::Token;
use vec1::{Vec1, vec1};
#[cfg(test)]
mod tests;
#[derive(Debug)]
pub struct Parsed {
pub module: UntypedModule,
pub extra: ModuleExtra,
}
/// We use this to keep track of the `@internal` annotation for top level
/// definitions. Instead of using just a boolean we want to keep track of the
/// source position of the annotation in case it is present. This way we can
/// report a better error message highlighting the annotation in case it is
/// used on a private definition (it doesn't make sense to mark something
/// private as internal):
///
/// ```txt
/// @internal
/// ^^^^^^^^^ we first get to the annotation
/// fn wibble() {}
/// ^^ and only later discover it's applied on a private definition
/// so we have to keep track of the attribute's position to highlight it
/// in the resulting error message.
/// ```
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
enum InternalAttribute {
#[default]
Missing,
Present(SrcSpan),
}
#[derive(Debug, Default)]
struct Attributes {
target: Option<Target>,
deprecated: Deprecation,
external_erlang: Option<(EcoString, EcoString, SrcSpan)>,
external_javascript: Option<(EcoString, EcoString, SrcSpan)>,
internal: InternalAttribute,
}
impl Attributes {
fn has_function_only(&self) -> bool {
self.external_erlang.is_some() || self.external_javascript.is_some()
}
fn has_external_for(&self, target: Target) -> bool {
match target {
Target::Erlang => self.external_erlang.is_some(),
Target::JavaScript => self.external_javascript.is_some(),
}
}
fn set_external_for(&mut self, target: Target, ext: Option<(EcoString, EcoString, SrcSpan)>) {
match target {
Target::Erlang => self.external_erlang = ext,
Target::JavaScript => self.external_javascript = ext,
}
}
}
//
// Public Interface
//
pub type SpannedString = (SrcSpan, EcoString);
pub fn parse_module(
path: Utf8PathBuf,
src: &str,
warnings: &WarningEmitter,
) -> Result<Parsed, ParseError> {
let lex = lexer::make_tokenizer(src);
let mut parser = Parser::new(lex);
let mut parsed = parser.parse_module()?;
parsed.extra = parser.extra;
let src = EcoString::from(src);
for warning in parser.warnings {
warnings.emit(Warning::DeprecatedSyntax {
path: path.clone(),
src: src.clone(),
warning,
});
}
for detached in parser.detached_doc_comments {
warnings.emit(Warning::DetachedDocComment {
path: path.clone(),
src: src.clone(),
location: detached,
});
}
Ok(parsed)
}
//
// Test Interface
//
#[cfg(test)]
pub fn parse_statement_sequence(src: &str) -> Result<Vec1<UntypedStatement>, ParseError> {
let lex = lexer::make_tokenizer(src);
let mut parser = Parser::new(lex);
let expr = parser.parse_statement_seq();
let expr = parser.ensure_no_errors_or_remaining_input(expr)?;
match expr {
Some((e, _)) => Ok(e),
_ => parse_error(ParseErrorType::ExpectedExpr, SrcSpan { start: 0, end: 0 }),
}
}
//
// Test Interface
//
#[cfg(test)]
pub fn parse_const_value(src: &str) -> Result<Constant<(), ()>, ParseError> {
let lex = lexer::make_tokenizer(src);
let mut parser = Parser::new(lex);
let expr = parser.parse_const_value();
let expr = parser.ensure_no_errors_or_remaining_input(expr)?;
match expr {
Some(e) => Ok(e),
_ => parse_error(ParseErrorType::ExpectedExpr, SrcSpan { start: 0, end: 0 }),
}
}
//
// Parser
//
#[derive(Debug)]
pub struct Parser<T: Iterator<Item = LexResult>> {
tokens: T,
lex_errors: Vec<LexicalError>,
warnings: Vec<DeprecatedSyntaxWarning>,
tok0: Option<Spanned>,
tok1: Option<Spanned>,
extra: ModuleExtra,
doc_comments: VecDeque<(u32, EcoString)>,
detached_doc_comments: Vec<SrcSpan>,
}
impl<T> Parser<T>
where
T: Iterator<Item = LexResult>,
{
pub fn new(input: T) -> Self {
let mut parser = Parser {
tokens: input,
lex_errors: vec![],
warnings: vec![],
tok0: None,
tok1: None,
extra: ModuleExtra::new(),
doc_comments: VecDeque::new(),
detached_doc_comments: Vec::new(),
};
parser.advance();
parser.advance();
parser
}
fn parse_module(&mut self) -> Result<Parsed, ParseError> {
let definitions = Parser::series_of(self, &Parser::parse_definition, None);
let definitions = self.ensure_no_errors_or_remaining_input(definitions)?;
let module = Module {
name: "".into(),
documentation: vec![],
type_info: (),
definitions,
names: Default::default(),
unused_definition_positions: Default::default(),
};
Ok(Parsed {
module,
extra: Default::default(),
})
}
// The way the parser is currently implemented, it cannot exit immediately while advancing
// the token stream upon seeing a LexError. That is to avoid having to put `?` all over the
// place and instead we collect LexErrors in `self.lex_errors` and attempt to continue parsing.
// Once parsing has returned we want to surface an error in the order:
// 1) LexError, 2) ParseError, 3) More Tokens Left
fn ensure_no_errors_or_remaining_input<A>(
&mut self,
parse_result: Result<A, ParseError>,
) -> Result<A, ParseError> {
let parse_result = self.ensure_no_errors(parse_result)?;
if let Some((start, token, end)) = self.next_tok() {
// there are still more tokens
let expected = vec!["An import, const, type, or function.".into()];
return parse_error(
ParseErrorType::UnexpectedToken {
token,
expected,
hint: None,
},
SrcSpan { start, end },
);
}
// no errors
Ok(parse_result)
}
// The way the parser is currently implemented, it cannot exit immediately
// while advancing the token stream upon seeing a LexError. That is to avoid
// having to put `?` all over the place and instead we collect LexErrors in
// `self.lex_errors` and attempt to continue parsing.
// Once parsing has returned we want to surface an error in the order:
// 1) LexError, 2) ParseError
fn ensure_no_errors<A>(
&mut self,
parse_result: Result<A, ParseError>,
) -> Result<A, ParseError> {
if let Some(error) = self.lex_errors.first() {
// Lex errors first
let location = error.location;
let error = *error;
parse_error(ParseErrorType::LexError { error }, location)
} else {
// Return any existing parse error
parse_result
}
}
fn parse_definition(&mut self) -> Result<Option<TargetedDefinition>, ParseError> {
let mut attributes = Attributes::default();
let location = self.parse_attributes(&mut attributes)?;
let def = match (self.tok0.take(), self.tok1.as_ref()) {
// Imports
(Some((start, Token::Import, _)), _) => {
self.advance();
self.parse_import(start)
}
// Module Constants
(Some((start, Token::Const, _)), _) => {
self.advance();
self.parse_module_const(start, false, &attributes)
}
(Some((start, Token::Pub, _)), Some((_, Token::Const, _))) => {
self.advance();
self.advance();
self.parse_module_const(start, true, &attributes)
}
// Function
(Some((start, Token::Fn, _)), _) => {
self.advance();
self.parse_function(start, false, false, &mut attributes)
}
(Some((start, Token::Pub, _)), Some((_, Token::Fn, _))) => {
self.advance();
self.advance();
self.parse_function(start, true, false, &mut attributes)
}
// Custom Types, and Type Aliases
(Some((start, Token::Type, _)), _) => {
self.advance();
self.parse_custom_type(start, false, false, &mut attributes)
}
(Some((start, Token::Pub, _)), Some((_, Token::Opaque, _))) => {
self.advance();
self.advance();
let _ = self.expect_one(&Token::Type)?;
self.parse_custom_type(start, true, true, &mut attributes)
}
(Some((start, Token::Pub, _)), Some((_, Token::Type, _))) => {
self.advance();
self.advance();
self.parse_custom_type(start, true, false, &mut attributes)
}
(Some((start, Token::Opaque, _)), Some((_, Token::Type, _))) => {
// A private opaque type makes no sense! We still want to parse it
// and return an error later during the analysis phase.
self.advance();
self.advance();
self.parse_custom_type(start, false, true, &mut attributes)
}
(t0, _) => {
self.tok0 = t0;
Ok(None)
}
}?;
match (def, location) {
(Some(definition), _) if definition.is_function() || definition.is_custom_type() => {
Ok(Some(TargetedDefinition {
definition,
target: attributes.target,
}))
}
(Some(definition), None) => Ok(Some(TargetedDefinition {
definition,
target: attributes.target,
})),
(_, Some(location)) if attributes.has_function_only() => {
parse_error(ParseErrorType::ExpectedFunctionDefinition, location)
}
(Some(definition), _) => Ok(Some(TargetedDefinition {
definition,
target: attributes.target,
})),
(_, Some(location)) => parse_error(ParseErrorType::ExpectedDefinition, location),
(None, None) => Ok(None),
}
}
//
// Parse Expressions
//
// examples:
// unit
// unit op unit
// unit op unit pipe unit(call)
// unit op unit pipe unit(call) pipe unit(call)
fn parse_expression(&mut self) -> Result<Option<UntypedExpr>, ParseError> {
self.parse_expression_inner(false)
}
fn parse_expression_inner(
&mut self,
is_let_binding: bool,
) -> Result<Option<UntypedExpr>, ParseError> {
// uses the simple operator parser algorithm
let mut opstack = vec![];
let mut estack = vec![];
let mut last_op_start = 0;
let mut last_op_end = 0;
// This is used to keep track if we've just ran into a `|>` operator in
// order to properly parse an echo based on its position: if it is in a
// pipeline then it isn't expected to be followed by an expression.
// Otherwise, it's expected to be followed by an expression.
let mut expression_unit_context = ExpressionUnitContext::Other;
loop {
match self.parse_expression_unit(expression_unit_context)? {
Some(unit) => {
self.post_process_expression_unit(&unit, is_let_binding)?;
estack.push(unit)
}
_ if estack.is_empty() => return Ok(None),
_ => {
return parse_error(
ParseErrorType::OpNakedRight,
SrcSpan {
start: last_op_start,
end: last_op_end,
},
);
}
}
let Some((op_s, t, op_e)) = self.tok0.take() else {
break;
};
let Some(p) = precedence(&t) else {
self.tok0 = Some((op_s, t, op_e));
break;
};
expression_unit_context = if t == Token::Pipe {
ExpressionUnitContext::FollowingPipe
} else {
ExpressionUnitContext::Other
};
// Is Op
self.advance();
last_op_start = op_s;
last_op_end = op_e;
let _ = handle_op(
Some(((op_s, t, op_e), p)),
&mut opstack,
&mut estack,
&do_reduce_expression,
);
}
Ok(handle_op(
None,
&mut opstack,
&mut estack,
&do_reduce_expression,
))
}
fn post_process_expression_unit(
&mut self,
unit: &UntypedExpr,
is_let_binding: bool,
) -> Result<(), ParseError> {
// Produce better error message for `[x] = [1]` outside
// of `let` statement.
if !is_let_binding
&& let UntypedExpr::List { .. } = unit
&& let Some((start, Token::Equal, end)) = self.tok0
{
return parse_error(ParseErrorType::NoLetBinding, SrcSpan { start, end });
}
Ok(())
}
// examples:
// 1
// "one"
// True
// fn() { "hi" }
// unit().unit().unit()
// A(a.., label: tuple(1))
// { expression_sequence }
fn parse_expression_unit(
&mut self,
context: ExpressionUnitContext,
) -> Result<Option<UntypedExpr>, ParseError> {
let mut expr = match self.tok0.take() {
Some((start, Token::String { value }, end)) => {
self.advance();
UntypedExpr::String {
location: SrcSpan { start, end },
value,
}
}
Some((start, Token::Int { value, int_value }, end)) => {
self.advance();
UntypedExpr::Int {
location: SrcSpan { start, end },
value,
int_value,
}
}
Some((start, Token::Float { value, float_value }, end)) => {
self.advance();
UntypedExpr::Float {
location: SrcSpan { start, end },
value,
float_value,
}
}
// var lower_name and UpName
Some((start, Token::Name { name } | Token::UpName { name }, end)) => {
self.advance();
UntypedExpr::Var {
location: SrcSpan { start, end },
name,
}
}
Some((start, Token::Todo, end)) => {
self.advance();
let message = self.maybe_parse_as_message()?;
let end = message.as_ref().map_or(end, |m| m.location().end);
UntypedExpr::Todo {
location: SrcSpan { start, end },
kind: TodoKind::Keyword,
message,
}
}
Some((start, Token::Panic, end)) => {
self.advance();
let message = self.maybe_parse_as_message()?;
let end = message.as_ref().map_or(end, |m| m.location().end);
UntypedExpr::Panic {
location: SrcSpan { start, end },
message,
}
}
Some((start, Token::Echo, echo_end)) => {
self.advance();
if context == ExpressionUnitContext::FollowingPipe {
// If an echo is used as a step in a pipeline (`|> echo`)
// then it cannot be followed by an expression.
let message = self.maybe_parse_as_message()?;
let end = message.as_ref().map_or(echo_end, |m| m.location().end);
UntypedExpr::Echo {
location: SrcSpan { start, end },
keyword_end: echo_end,
expression: None,
message,
}
} else {
// Otherwise it must be followed by an expression.
// However, you might have noticed we're not erroring if the
// expression is not there. Instead we move this error to
// the analysis phase so that a wrong usage of echo won't
// stop analysis from happening everywhere and be fault
// tolerant like everything else.
let expression = self.parse_expression()?;
let end = expression.as_ref().map_or(echo_end, |e| e.location().end);
let message = self.maybe_parse_as_message()?;
let end = message.as_ref().map_or(end, |m| m.location().end);
UntypedExpr::Echo {
location: SrcSpan { start, end },
keyword_end: echo_end,
expression: expression.map(Box::new),
message,
}
}
}
Some((start, Token::Hash, _)) => {
self.advance();
let _ = self
.expect_one(&Token::LeftParen)
.map_err(|e| self.add_comment_style_hint(e))?;
let elements =
Parser::series_of(self, &Parser::parse_expression, Some(&Token::Comma))?;
let (_, end) =
self.expect_one_following_series(&Token::RightParen, "an expression")?;
UntypedExpr::Tuple {
location: SrcSpan { start, end },
elements,
}
}
// list
Some((start, Token::LeftSquare, _)) => {
self.advance();
let (elements, elements_end_with_comma) = self.series_of_has_trailing_separator(
&Parser::parse_expression,
Some(&Token::Comma),
)?;
// Parse an optional tail
let mut tail = None;
let mut elements_after_tail = None;
let mut dot_dot_location = None;
if let Some((start, end)) = self.maybe_one(&Token::DotDot) {
dot_dot_location = Some((start, end));
tail = self.parse_expression()?.map(Box::new);
if self.maybe_one(&Token::Comma).is_some() {
// See if there's a list of items after the tail,
// like `[..wibble, wobble, wabble]`
let elements =
self.series_of(&Parser::parse_expression, Some(&Token::Comma));
match elements {
Err(_) => {}
Ok(elements) => {
elements_after_tail = Some(elements);
}
};
};
if tail.is_some() {
if !elements_end_with_comma {
self.warnings
.push(DeprecatedSyntaxWarning::DeprecatedListPrepend {
location: SrcSpan { start, end },
});
}
// Give a better error when there is two consecutive spreads
// like `[..wibble, ..wabble, woo]`. However, if there's other
// elements after the tail of the list
if let Some((second_start, second_end)) = self.maybe_one(&Token::DotDot) {
let _second_tail = self.parse_expression();
if elements_after_tail.is_none()
|| elements_after_tail
.as_ref()
.is_some_and(|vec| vec.is_empty())
{
return parse_error(
ParseErrorType::ListSpreadWithAnotherSpread {
first_spread_location: SrcSpan { start, end },
},
SrcSpan {
start: second_start,
end: second_end,
},
);
}
}
}
}
let (_, end) = self.expect_one(&Token::RightSquare)?;
// Return errors for malformed lists
match dot_dot_location {
Some((start, end)) if tail.is_none() => {
return parse_error(
ParseErrorType::ListSpreadWithoutTail,
SrcSpan { start, end },
);
}
_ => {}
}
if tail.is_some()
&& elements.is_empty()
&& elements_after_tail.as_ref().is_none_or(|e| e.is_empty())
{
return parse_error(
ParseErrorType::ListSpreadWithoutElements,
SrcSpan { start, end },
);
}
match elements_after_tail {
Some(elements) if !elements.is_empty() => {
let (start, end) = match (dot_dot_location, tail) {
(Some((start, _)), Some(tail)) => (start, tail.location().end),
(_, _) => (start, end),
};
return parse_error(
ParseErrorType::ListSpreadFollowedByElements,
SrcSpan { start, end },
);
}
_ => {}
}
UntypedExpr::List {
location: SrcSpan { start, end },
elements,
tail,
}
}
// BitArray
Some((start, Token::LtLt, _)) => {
self.advance();
let segments = Parser::series_of(
self,
&|s| {
Parser::parse_bit_array_segment(
s,
&(|this| this.parse_expression_unit(ExpressionUnitContext::Other)),
&Parser::expect_expression,
&bit_array_expr_int,
)
},
Some(&Token::Comma),
)?;
let (_, end) =
self.expect_one_following_series(&Token::GtGt, "a bit array segment")?;
UntypedExpr::BitArray {
location: SrcSpan { start, end },
segments,
}
}
Some((start, Token::Fn, _)) => {
self.advance();
let mut attributes = Attributes::default();
match self.parse_function(start, false, true, &mut attributes)? {
Some(Definition::Function(Function {
location,
arguments,
body,
return_annotation,
end_position,
..
})) => {
let Ok(body) = Vec1::try_from_vec(body) else {
return parse_error(ParseErrorType::ExpectedFunctionBody, location);
};
UntypedExpr::Fn {
location: SrcSpan::new(location.start, end_position),
end_of_head_byte_index: location.end,
kind: FunctionLiteralKind::Anonymous { head: location },
arguments,
body,
return_annotation,
}
}
_ => {
// this isn't just none, it could also be Some(UntypedExpr::..)
return self.next_tok_unexpected(vec!["An opening parenthesis.".into()]);
}
}
}
// expression block "{" "}"
Some((start, Token::LeftBrace, _)) => {
self.advance();
self.parse_block(start)?
}
// case
Some((start, Token::Case, case_e)) => {
self.advance();
let subjects =
Parser::series_of(self, &Parser::parse_expression, Some(&Token::Comma))?;
if self.maybe_one(&Token::LeftBrace).is_some() {
let clauses = Parser::series_of(self, &Parser::parse_case_clause, None)?;
let (_, end) =
self.expect_one_following_series(&Token::RightBrace, "a case clause")?;
if subjects.is_empty() {
return parse_error(
ParseErrorType::ExpectedExpr,
SrcSpan { start, end: case_e },
);
} else {
UntypedExpr::Case {
location: SrcSpan { start, end },
subjects,
clauses: Some(clauses),
}
}
} else {
UntypedExpr::Case {
location: SrcSpan::new(
start,
subjects
.last()
.map(|subject| subject.location().end)
.unwrap_or(case_e),
),
subjects,
clauses: None,
}
}
}
// Helpful error if trying to write an if expression instead of a
// case.
Some((start, Token::If, end)) => {
return parse_error(ParseErrorType::IfExpression, SrcSpan { start, end });
}
// Helpful error on possibly trying to group with "(".
Some((start, Token::LeftParen, _)) => {
return parse_error(ParseErrorType::ExprLparStart, SrcSpan { start, end: start });
}
// Boolean negation
Some((start, Token::Bang, _end)) => {
self.advance();
match self.parse_expression_unit(ExpressionUnitContext::Other)? {
Some(value) => UntypedExpr::NegateBool {
location: SrcSpan {
start,
end: value.location().end,
},
value: Box::from(value),
},
None => {
return parse_error(
ParseErrorType::ExpectedExpr,
SrcSpan { start, end: start },
);
}
}
}
// Int negation
Some((start, Token::Minus, _end)) => {
self.advance();
match self.parse_expression_unit(ExpressionUnitContext::Other)? {
Some(value) => UntypedExpr::NegateInt {
location: SrcSpan {
start,
end: value.location().end,
},
value: Box::from(value),
},
None => {
return parse_error(
ParseErrorType::ExpectedExpr,
SrcSpan { start, end: start },
);
}
}
}
t0 => {
self.tok0 = t0;
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/dependency.rs | compiler-core/src/dependency.rs | use std::{cell::RefCell, cmp::Reverse, collections::HashMap, rc::Rc};
use crate::{Error, Result, manifest};
use ecow::EcoString;
use hexpm::{
Dependency, Release,
version::{Range, Version},
};
use pubgrub::{Dependencies, Map};
use thiserror::Error;
pub type PackageVersions = HashMap<String, Version>;
type PubgrubRange = pubgrub::Range<Version>;
pub fn resolve_versions<Requirements>(
package_fetcher: &impl PackageFetcher,
provided_packages: HashMap<EcoString, hexpm::Package>,
root_name: EcoString,
dependencies: Requirements,
locked: &HashMap<EcoString, Version>,
) -> Result<PackageVersions>
where
Requirements: Iterator<Item = (EcoString, Range)>,
{
tracing::info!("resolving_versions");
let root_version = Version::new(0, 0, 0);
let requirements = root_dependencies(dependencies, locked)?;
// Creating a map of all the required packages that have exact versions specified
let exact_deps = &requirements
.iter()
.filter_map(|(name, dep)| parse_exact_version(dep.requirement.as_str()).map(|v| (name, v)))
.map(|(name, version)| (name.clone(), version))
.collect();
let root = hexpm::Package {
name: root_name.as_str().into(),
repository: "local".into(),
releases: vec![Release {
version: root_version.clone(),
outer_checksum: vec![],
retirement_status: None,
requirements,
meta: (),
}],
};
let packages = pubgrub::resolve(
&DependencyProvider::new(package_fetcher, provided_packages, root, locked, exact_deps),
root_name.as_str().into(),
root_version,
)
.map_err(|error| Error::dependency_resolution_failed(error, root_name.clone()))?
.into_iter()
.filter(|(name, _)| name.as_str() != root_name.as_str())
.collect();
Ok(packages)
}
/**
* Used to compare 2 versions of a package.
*/
pub type PackageVersionDiffs = HashMap<String, (Version, Version)>;
fn resolve_versions_diffs(
package_fetcher: &impl PackageFetcher,
versions: PackageVersions,
check_major_versions: bool,
) -> PackageVersionDiffs {
versions
.iter()
.filter_map(|(package, version)| {
let Ok(hex_package) = package_fetcher.get_dependencies(package) else {
return None;
};
let latest = hex_package
.releases
.iter()
.map(|release| &release.version)
.filter(|version| !version.is_pre())
.max()?;
// If we're checking for major version updates, only include the
// package if a new major version is available. Otherwise, include
// the package if there is any new version available.
match check_major_versions {
true => {
if latest.major <= version.major {
return None;
}
}
false => {
if latest <= version {
return None;
}
}
}
Some((package.to_string(), (version.clone(), latest.clone())))
})
.collect()
}
/// Check for major version updates for direct dependencies that are being blocked by some version
/// constraints.
pub fn check_for_major_version_updates(
manifest: &manifest::Manifest,
package_fetcher: &impl PackageFetcher,
) -> PackageVersionDiffs {
let versions: PackageVersions = manifest
.packages
.iter()
.filter(|manifest_package| {
manifest
.requirements
.iter()
.any(|(required_pkg, _)| manifest_package.name == *required_pkg)
})
.map(|manifest_pkg| (manifest_pkg.name.to_string(), manifest_pkg.version.clone()))
.collect();
resolve_versions_diffs(package_fetcher, versions, true)
}
/// Check for version updates for direct and transitive dependencies that are being blocked by some version
/// constraints.
pub fn check_for_version_updates(
manifest: &manifest::Manifest,
package_fetcher: &impl PackageFetcher,
) -> PackageVersionDiffs {
let versions = manifest
.packages
.iter()
.filter(|manifest_package| {
matches!(
manifest_package.source,
manifest::ManifestPackageSource::Hex { .. }
)
})
.map(|manifest_pkg| (manifest_pkg.name.to_string(), manifest_pkg.version.clone()))
.collect();
resolve_versions_diffs(package_fetcher, versions, false)
}
// If the string would parse to an exact version then return the version
fn parse_exact_version(ver: &str) -> Option<Version> {
let version = ver.trim();
let first_byte = version.as_bytes().first();
// Version is exact if it starts with an explicit == or a number
if version.starts_with("==") || first_byte.is_some_and(|v| v.is_ascii_digit()) {
let version = version.replace("==", "");
let version = version.as_str().trim();
Version::parse(version).ok()
} else {
None
}
}
fn root_dependencies<Requirements>(
base_requirements: Requirements,
locked: &HashMap<EcoString, Version>,
) -> Result<HashMap<String, Dependency>, Error>
where
Requirements: Iterator<Item = (EcoString, Range)>,
{
// Record all of the already locked versions as hard requirements
let mut requirements: HashMap<_, _> = locked
.iter()
.map(|(name, version)| {
(
name.to_string(),
Dependency {
app: None,
optional: false,
repository: None,
requirement: version.clone().into(),
},
)
})
.collect();
for (name, range) in base_requirements {
match locked.get(&name) {
// If the package was not already locked then we can use the
// specified version requirement without modification.
None => {
let _ = requirements.insert(
name.into(),
Dependency {
app: None,
optional: false,
repository: None,
requirement: range,
},
);
}
// If the version was locked we verify that the requirement is
// compatible with the locked version.
Some(locked_version) => {
let compatible = range.to_pubgrub().contains(locked_version);
if !compatible {
return Err(Error::IncompatibleLockedVersion {
error: format!(
"{name} is specified with the requirement `{range}`, \
but it is locked to {locked_version}, which is incompatible.",
),
});
}
}
};
}
Ok(requirements)
}
pub trait PackageFetcher {
fn get_dependencies(&self, package: &str) -> Result<Rc<hexpm::Package>, PackageFetchError>;
}
#[derive(Debug, Error)]
pub enum PackageFetchError {
#[error("The package {0} was not found in the package repository")]
NotFoundError(String),
#[error("{0}")]
ApiError(hexpm::ApiError),
#[error("{0}")]
FetchError(String),
}
impl PackageFetchError {
pub fn fetch_error<T: std::error::Error>(err: T) -> Self {
Self::FetchError(err.to_string())
}
pub fn from_api_error(api_error: hexpm::ApiError, package: &str) -> Self {
match &api_error {
hexpm::ApiError::NotFound => Self::NotFoundError(package.to_string()),
hexpm::ApiError::Json(_)
| hexpm::ApiError::Io(_)
| hexpm::ApiError::InvalidProtobuf(_)
| hexpm::ApiError::UnexpectedResponse(_, _)
| hexpm::ApiError::RateLimited
| hexpm::ApiError::InvalidCredentials
| hexpm::ApiError::InvalidPackageNameFormat(_)
| hexpm::ApiError::IncorrectPayloadSignature
| hexpm::ApiError::InvalidVersionFormat(_)
| hexpm::ApiError::InvalidVersionRequirementFormat(_)
| hexpm::ApiError::IncorrectChecksum
| hexpm::ApiError::InvalidApiKey
| hexpm::ApiError::Forbidden
| hexpm::ApiError::NotReplacing
| hexpm::ApiError::LateModification => Self::ApiError(api_error),
}
}
}
#[derive(Debug)]
pub struct DependencyProvider<'a, T: PackageFetcher> {
packages: RefCell<HashMap<EcoString, hexpm::Package>>,
remote: &'a T,
locked: &'a HashMap<EcoString, Version>,
// Map of packages where an exact version was requested
// We need this because by default pubgrub checks exact version by checking if a version is between the exact
// and the version 1 bump ahead. That default breaks on prerelease builds since a bump includes the whole patch
exact_only: &'a HashMap<String, Version>,
optional_dependencies: RefCell<HashMap<EcoString, pubgrub::Range<Version>>>,
}
impl<'a, T> DependencyProvider<'a, T>
where
T: PackageFetcher,
{
fn new(
remote: &'a T,
mut packages: HashMap<EcoString, hexpm::Package>,
root: hexpm::Package,
locked: &'a HashMap<EcoString, Version>,
exact_only: &'a HashMap<String, Version>,
) -> Self {
let _ = packages.insert(root.name.as_str().into(), root);
Self {
packages: RefCell::new(packages),
locked,
remote,
exact_only,
optional_dependencies: RefCell::new(Default::default()),
}
}
/// Download information about the package from the registry into the local
/// store. Does nothing if the packages are already known.
///
/// Package versions are sorted from newest to oldest, with all pre-releases
/// at the end to ensure that a non-prerelease version will be picked first
/// if there is one.
//
fn ensure_package_fetched(
// We would like to use `&mut self` but the pubgrub library enforces
// `&self` with interop mutability.
&self,
name: &str,
) -> Result<(), PackageFetchError> {
let mut packages = self.packages.borrow_mut();
if packages.get(name).is_none() {
let package = self.remote.get_dependencies(name)?;
// mut (therefore clone) is required here in order to sort the releases
let mut package = (*package).clone();
// Sort the packages from newest to oldest, pres after all others
package.releases.sort_by(|a, b| a.version.cmp(&b.version));
package.releases.reverse();
let (pre, mut norm): (_, Vec<_>) = package
.releases
.into_iter()
.partition(|r| r.version.is_pre());
norm.extend(pre);
package.releases = norm;
let _ = packages.insert(name.into(), package);
}
Ok(())
}
}
type PackageName = String;
pub type ResolutionError<'a, T> = pubgrub::PubGrubError<DependencyProvider<'a, T>>;
impl<T> pubgrub::DependencyProvider for DependencyProvider<'_, T>
where
T: PackageFetcher,
{
fn get_dependencies(
&self,
package: &Self::P,
version: &Self::V,
) -> Result<Dependencies<Self::P, Self::VS, Self::M>, Self::Err> {
self.ensure_package_fetched(package)?;
let packages = self.packages.borrow();
let release = match packages
.get(package.as_str())
.into_iter()
.flat_map(|p| p.releases.iter())
.find(|r| &r.version == version)
{
Some(release) => release,
None => {
return Ok(Dependencies::Unavailable(format!(
"{package}@{version} is not available"
)));
}
};
// Only use retired versions if they have been locked
if release.is_retired() && self.locked.get(package.as_str()) != Some(version) {
return Ok(Dependencies::Unavailable(format!(
"{package}@{version} is retired"
)));
}
let mut deps: Map<PackageName, PubgrubRange> = Default::default();
for (name, d) in &release.requirements {
let mut range = d.requirement.to_pubgrub().clone();
let mut opt_deps = self.optional_dependencies.borrow_mut();
// if it's optional and it was not provided yet, store and skip
if d.optional && !packages.contains_key(name.as_str()) {
let _ = opt_deps
.entry(name.into())
.and_modify(|stored_range| {
*stored_range = range.intersection(stored_range);
})
.or_insert(range);
continue;
}
// if a now required dep was optional before, add back the constraints
if let Some(other_range) = opt_deps.remove(name.as_str()) {
range = range.intersection(&other_range);
}
let _ = deps.insert(name.clone(), range);
}
Ok(Dependencies::Available(deps))
}
fn prioritize(
&self,
package: &Self::P,
range: &Self::VS,
_package_conflicts_counts: &pubgrub::PackageResolutionStatistics,
) -> Self::Priority {
Reverse(
self.packages
.borrow()
.get(package.as_str())
.cloned()
.into_iter()
.flat_map(|p| {
p.releases
.into_iter()
.filter(|r| range.contains(&r.version))
})
.count(),
)
}
fn choose_version(
&self,
package: &Self::P,
range: &Self::VS,
) -> std::result::Result<Option<Self::V>, Self::Err> {
self.ensure_package_fetched(package)?;
let exact_package = self.exact_only.get(package);
let potential_versions = self
.packages
.borrow()
.get(package.as_str())
.cloned()
.into_iter()
.flat_map(move |p| {
p.releases
.into_iter()
// if an exact version of a package is specified then we only want to allow that version as available
.filter_map(move |release| match exact_package {
Some(ver) => (ver == &release.version).then_some(release.version),
_ => Some(release.version),
})
})
.filter(|v| range.contains(v));
match potential_versions.clone().filter(|v| !v.is_pre()).max() {
// Don't resolve to a pre-releaase package unless we *have* to
Some(v) => Ok(Some(v)),
None => Ok(potential_versions.max()),
}
}
type P = PackageName;
type V = Version;
type VS = PubgrubRange;
type Priority = Reverse<usize>;
type M = String;
type Err = PackageFetchError;
}
#[cfg(test)]
mod tests {
use hexpm::RetirementStatus;
use crate::{
derivation_tree::DerivationTreePrinter,
manifest::{Base16Checksum, ManifestPackage, ManifestPackageSource},
requirement,
};
use super::*;
struct Remote {
deps: HashMap<String, Rc<hexpm::Package>>,
}
impl PackageFetcher for Remote {
fn get_dependencies(&self, package: &str) -> Result<Rc<hexpm::Package>, PackageFetchError> {
self.deps
.get(package)
.map(Rc::clone)
.ok_or(PackageFetchError::NotFoundError(package.to_string()))
}
}
fn make_remote() -> Remote {
remote(vec![
(
"gleam_stdlib",
vec![
release("0.1.0", vec![]),
release("0.2.0", vec![]),
release("0.2.2", vec![]),
release("0.3.0", vec![]),
],
),
(
"gleam_otp",
vec![
release("0.1.0", vec![("gleam_stdlib", ">= 0.1.0")]),
release("0.2.0", vec![("gleam_stdlib", ">= 0.1.0")]),
release("0.3.0-rc1", vec![("gleam_stdlib", ">= 0.1.0")]),
release("0.3.0-rc2", vec![("gleam_stdlib", ">= 0.1.0")]),
],
),
(
"package_with_retired",
vec![
release("0.1.0", vec![]),
retired_release(
"0.2.0",
vec![],
hexpm::RetirementReason::Security,
"it's bad",
),
],
),
(
"package_with_optional",
vec![release_with_optional(
"0.1.0",
vec![],
vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")],
)],
),
(
"direct_pkg_with_major_version",
vec![
release("0.1.0", vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")]),
release("1.0.0", vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")]),
release("1.1.0", vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")]),
],
),
(
"depends_on_old_version_of_direct_pkg",
vec![release(
"0.1.0",
vec![("direct_pkg_with_major_version", ">= 0.1.0 and < 0.3.0")],
)],
),
(
"this_pkg_depends_on_indirect_pkg",
vec![release(
"0.1.0",
vec![("indirect_pkg_with_major_version", ">= 0.1.0 and < 1.0.0")],
)],
),
(
"indirect_pkg_with_major_version",
vec![
release("0.1.0", vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")]),
release("1.0.0", vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")]),
release("1.1.0", vec![("gleam_stdlib", ">= 0.1.0 and < 0.3.0")]),
],
),
])
}
#[test]
fn resolution_with_locked() {
let locked_stdlib = ("gleam_stdlib".into(), Version::parse("0.1.0").unwrap());
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_stdlib".into(), Range::new("~> 0.1".into()).unwrap())].into_iter(),
&vec![locked_stdlib].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![("gleam_stdlib".into(), Version::parse("0.1.0").unwrap())]
.into_iter()
.collect()
);
}
#[test]
fn resolution_without_deps() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(result, vec![].into_iter().collect())
}
#[test]
fn resolution_1_dep() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_stdlib".into(), Range::new("~> 0.1".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![("gleam_stdlib".into(), Version::try_from("0.3.0").unwrap())]
.into_iter()
.collect()
);
}
#[test]
fn resolution_with_nested_deps() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_otp".into(), Range::new("~> 0.1".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![
("gleam_otp".into(), Version::try_from("0.2.0").unwrap()),
("gleam_stdlib".into(), Version::try_from("0.3.0").unwrap())
]
.into_iter()
.collect()
);
}
#[test]
fn resolution_with_optional_deps() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![(
"package_with_optional".into(),
Range::new("~> 0.1".into()).unwrap(),
)]
.into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![(
"package_with_optional".into(),
Version::try_from("0.1.0").unwrap()
)]
.into_iter()
.collect()
);
}
#[test]
fn resolution_with_optional_deps_explicitly_provided() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![
(
"package_with_optional".into(),
Range::new("~> 0.1".into()).unwrap(),
),
("gleam_stdlib".into(), Range::new("~> 0.1".into()).unwrap()),
]
.into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![
("gleam_stdlib".into(), Version::try_from("0.2.2").unwrap()),
(
"package_with_optional".into(),
Version::try_from("0.1.0").unwrap()
),
]
.into_iter()
.collect()
);
}
#[test]
fn resolution_with_optional_deps_incompatible() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![
(
"package_with_optional".into(),
Range::new("~> 0.1".into()).unwrap(),
),
("gleam_stdlib".into(), Range::new("~> 0.3".into()).unwrap()),
]
.into_iter(),
&vec![].into_iter().collect(),
);
assert!(result.is_err());
}
#[test]
fn resolution_with_optional_deps_required_by_nested_deps() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![
(
"package_with_optional".into(),
Range::new("~> 0.1".into()).unwrap(),
),
("gleam_otp".into(), Range::new("~> 0.1".into()).unwrap()),
]
.into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![
("gleam_stdlib".into(), Version::try_from("0.2.2").unwrap()),
("gleam_otp".into(), Version::try_from("0.2.0").unwrap()),
(
"package_with_optional".into(),
Version::try_from("0.1.0").unwrap()
),
]
.into_iter()
.collect()
);
}
#[test]
fn resolution_with_optional_deps_keep_constraints() {}
#[test]
fn resolution_locked_to_older_version() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_otp".into(), Range::new("~> 0.1.0".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![
("gleam_otp".into(), Version::try_from("0.1.0").unwrap()),
("gleam_stdlib".into(), Version::try_from("0.3.0").unwrap())
]
.into_iter()
.collect()
);
}
#[test]
fn resolution_retired_versions_not_used_by_default() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![(
"package_with_retired".into(),
Range::new("> 0.0.0".into()).unwrap(),
)]
.into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![(
"package_with_retired".into(),
// Uses the older version that hasn't been retired
Version::try_from("0.1.0").unwrap()
),]
.into_iter()
.collect()
);
}
#[test]
fn resolution_retired_versions_can_be_used_if_locked() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![(
"package_with_retired".into(),
Range::new("> 0.0.0".into()).unwrap(),
)]
.into_iter(),
&vec![("package_with_retired".into(), Version::new(0, 2, 0))]
.into_iter()
.collect(),
)
.unwrap();
assert_eq!(
result,
vec![(
"package_with_retired".into(),
// Uses the locked version even though it's retired
Version::new(0, 2, 0)
),]
.into_iter()
.collect()
);
}
#[test]
fn resolution_prerelease_can_be_selected() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![(
"gleam_otp".into(),
Range::new("~> 0.3.0-rc1".into()).unwrap(),
)]
.into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![
("gleam_stdlib".into(), Version::try_from("0.3.0").unwrap()),
("gleam_otp".into(), Version::try_from("0.3.0-rc2").unwrap()),
]
.into_iter()
.collect(),
);
}
#[test]
fn resolution_exact_prerelease_can_be_selected() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_otp".into(), Range::new("0.3.0-rc1".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![
("gleam_stdlib".into(), Version::try_from("0.3.0").unwrap()),
("gleam_otp".into(), Version::try_from("0.3.0-rc1").unwrap()),
]
.into_iter()
.collect(),
);
}
#[test]
fn resolution_not_found_dep() {
let err = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("unknown".into(), Range::new("~> 0.1".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap_err();
match err {
Error::DependencyResolutionError(error) => assert_eq!(
error,
"An error occurred while choosing the version of unknown: The package unknown was not found in the package repository"
),
_ => panic!("wrong error: {err}"),
}
}
#[test]
fn resolution_no_matching_version() {
let _ = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_stdlib".into(), Range::new("~> 99.0".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap_err();
}
#[test]
fn resolution_locked_version_doesnt_satisfy_requirements() {
let err = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![(
"gleam_stdlib".into(),
Range::new("~> 0.1.0".into()).unwrap(),
)]
.into_iter(),
&vec![("gleam_stdlib".into(), Version::new(0, 2, 0))]
.into_iter()
.collect(),
)
.unwrap_err();
match err {
Error::IncompatibleLockedVersion { error } => assert_eq!(
error,
"gleam_stdlib is specified with the requirement `~> 0.1.0`, but it is locked to 0.2.0, which is incompatible."
),
_ => panic!("wrong error: {err}"),
}
}
#[test]
fn resolution_with_exact_dep() {
let result = resolve_versions(
&make_remote(),
HashMap::new(),
"app".into(),
vec![("gleam_stdlib".into(), Range::new("0.1.0".into()).unwrap())].into_iter(),
&vec![].into_iter().collect(),
)
.unwrap();
assert_eq!(
result,
vec![("gleam_stdlib".into(), Version::try_from("0.1.0").unwrap())]
.into_iter()
.collect()
);
}
#[test]
fn parse_exact_version_test() {
assert_eq!(
parse_exact_version("1.0.0"),
Some(Version::parse("1.0.0").unwrap())
);
assert_eq!(
parse_exact_version("==1.0.0"),
Some(Version::parse("1.0.0").unwrap())
);
assert_eq!(
parse_exact_version("== 1.0.0"),
Some(Version::parse("1.0.0").unwrap())
);
assert_eq!(parse_exact_version("~> 1.0.0"), None);
assert_eq!(parse_exact_version(">= 1.0.0"), None);
}
#[test]
fn resolve_major_version_upgrades() {
let manifest = manifest::Manifest {
requirements: vec![
(
EcoString::from("package_depends_on_indirect_pkg"),
requirement::Requirement::Hex {
version: Range::new("> 0.1.0 and <= 1.0.0".into()).unwrap(),
},
),
(
EcoString::from("direct_pkg_with_major_version"),
requirement::Requirement::Hex {
version: Range::new("> 0.1.0 and <= 2.0.0".into()).unwrap(),
},
),
(
EcoString::from("depends_on_old_version_of_direct_pkg"),
requirement::Requirement::Hex {
version: Range::new("> 0.1.0 and <= 1.0.0".into()).unwrap(),
},
),
]
.into_iter()
.collect(),
packages: vec![
ManifestPackage {
name: "direct_pkg_with_major_version".into(),
version: Version::parse("0.1.0").unwrap(),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![1, 2, 3]),
},
},
ManifestPackage {
name: "depends_on_old_version_of_direct_pkg".into(),
version: Version::parse("0.1.0").unwrap(),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec!["direct_pkg_with_major_version".into()],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![1, 2, 3]),
},
},
ManifestPackage {
name: "pkg_depends_on_indirect_pkg".into(),
version: Version::parse("0.1.0").unwrap(),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec!["indirect_pkg_with_major_version".into()],
source: ManifestPackageSource::Hex {
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/call_graph.rs | compiler-core/src/call_graph.rs | //! Graphs that represent the relationships between entities in a Gleam module,
//! such as module functions or constants.
#[cfg(test)]
mod into_dependency_order_tests;
use crate::{
Result,
ast::{
AssignName, AssignmentKind, BitArrayOption, BitArraySize, ClauseGuard, Constant, Pattern,
SrcSpan, Statement, UntypedClauseGuard, UntypedExpr, UntypedFunction,
UntypedModuleConstant, UntypedPattern, UntypedStatement,
},
type_::Error,
};
use itertools::Itertools;
use petgraph::stable_graph::NodeIndex;
use petgraph::{Directed, stable_graph::StableGraph};
#[derive(Debug, Default)]
struct CallGraphBuilder<'a> {
names: im::HashMap<&'a str, Option<(NodeIndex, SrcSpan)>>,
graph: StableGraph<(), (), Directed>,
current_function: NodeIndex,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum CallGraphNode {
Function(UntypedFunction),
ModuleConstant(UntypedModuleConstant),
}
impl<'a> CallGraphBuilder<'a> {
fn into_graph(self) -> StableGraph<(), (), Directed> {
self.graph
}
/// Add each function to the graph, storing the index of the node under the
/// name of the function.
fn register_module_function_existence(
&mut self,
function: &'a UntypedFunction,
) -> Result<(), Error> {
let (_, name) = function
.name
.as_ref()
.expect("A module's function must be named");
let location = function.location;
let index = self.graph.add_node(());
let previous = self.names.insert(name, Some((index, location)));
if let Some(Some((_, previous_location))) = previous {
return Err(Error::DuplicateName {
location_a: location,
location_b: previous_location,
name: name.clone(),
});
}
Ok(())
}
/// Add each constant to the graph, storing the index of the node under the
/// name of the constant.
fn register_module_const_existence(
&mut self,
constant: &'a UntypedModuleConstant,
) -> Result<(), Error> {
let name = &constant.name;
let location = constant.location;
let index = self.graph.add_node(());
let previous = self.names.insert(name, Some((index, location)));
if let Some(Some((_, previous_location))) = previous {
return Err(Error::DuplicateName {
location_a: location,
location_b: previous_location,
name: name.clone(),
});
}
Ok(())
}
fn register_references_constant(&mut self, constant: &'a UntypedModuleConstant) {
self.current_function = self
.names
.get(constant.name.as_str())
.expect("Constant must already have been registered as existing")
.expect("Constant must not be shadowed at module level")
.0;
self.constant(&constant.value);
}
fn register_references(&mut self, function: &'a UntypedFunction) {
let names = self.names.clone();
self.current_function = self
.names
.get(
function
.name
.as_ref()
.map(|(_, name)| name.as_str())
.expect("A module's function must be named"),
)
.expect("Function must already have been registered as existing")
.expect("Function must not be shadowed at module level")
.0;
for name in function
.arguments
.iter()
.flat_map(|a| a.get_variable_name())
{
self.define(name);
}
self.statements(&function.body);
self.names = names;
}
fn referenced(&mut self, name: &str) {
// If we don't know what the target is then it's either a programmer
// error to be detected later, or it's not a module function and as such
// is not a value we are tracking.
let Some(target) = self.names.get(name) else {
return;
};
// If the target is known but registered as None then it's local value
// that shadows a module function.
let Some((target, _)) = target else { return };
_ = self.graph.add_edge(self.current_function, *target, ());
}
fn statements(&mut self, statements: &'a [UntypedStatement]) {
let names = self.names.clone();
for statement in statements {
self.statement(statement);
}
self.names = names;
}
fn statement(&mut self, statement: &'a UntypedStatement) {
match statement {
Statement::Expression(expression) => {
self.expression(expression);
}
Statement::Assignment(assignment) => {
self.expression(&assignment.value);
self.pattern(&assignment.pattern);
match &assignment.kind {
AssignmentKind::Assert {
message: Some(message),
..
} => self.expression(message),
AssignmentKind::Let
| AssignmentKind::Generated
| AssignmentKind::Assert { message: None, .. } => {}
}
}
Statement::Use(use_) => {
self.expression(&use_.call);
for assignment in &use_.assignments {
self.pattern(&assignment.pattern);
}
}
Statement::Assert(assert) => {
self.expression(&assert.value);
if let Some(message) = &assert.message {
self.expression(message)
}
}
};
}
fn expression(&mut self, expression: &'a UntypedExpr) {
match expression {
UntypedExpr::Int { .. } | UntypedExpr::Float { .. } | UntypedExpr::String { .. } => (),
UntypedExpr::Todo { message, .. } => {
if let Some(msg_expr) = message {
self.expression(msg_expr)
}
}
UntypedExpr::Panic { message, .. } => {
if let Some(msg_expr) = message {
self.expression(msg_expr)
}
}
UntypedExpr::Echo {
expression,
location: _,
keyword_end: _,
message,
} => {
if let Some(expression) = expression {
self.expression(expression);
}
if let Some(message) = message {
self.expression(message);
}
}
// Aha! A variable is being referenced.
UntypedExpr::Var { name, .. } => {
self.referenced(name);
}
UntypedExpr::Call { fun, arguments, .. } => {
self.expression(fun);
for argument in arguments {
self.expression(&argument.value);
}
}
UntypedExpr::PipeLine { expressions } => {
for expression in expressions {
self.expression(expression);
}
}
UntypedExpr::Tuple { elements, .. } => {
for expression in elements {
self.expression(expression);
}
}
UntypedExpr::Block { statements, .. } => {
let names = self.names.clone();
self.statements(statements);
self.names = names;
}
UntypedExpr::BinOp { left, right, .. } => {
self.expression(left);
self.expression(right);
}
UntypedExpr::List { elements, tail, .. } => {
for element in elements {
self.expression(element);
}
if let Some(tail) = tail {
self.expression(tail);
}
}
UntypedExpr::NegateInt {
value: expression, ..
}
| UntypedExpr::NegateBool {
value: expression, ..
}
| UntypedExpr::TupleIndex {
tuple: expression, ..
}
| UntypedExpr::FieldAccess {
container: expression,
..
} => {
self.expression(expression);
}
UntypedExpr::BitArray { segments, .. } => {
for segment in segments {
self.expression(&segment.value);
for option in &segment.options {
if let BitArrayOption::Size { value, .. } = option {
self.expression(value);
}
}
}
}
UntypedExpr::RecordUpdate {
record, arguments, ..
} => {
self.expression(&record.base);
for argument in arguments {
self.expression(&argument.value);
}
}
UntypedExpr::Fn {
arguments, body, ..
} => {
let names = self.names.clone();
for argument in arguments {
if let Some(name) = argument.names.get_variable_name() {
self.define(name)
}
}
self.statements(body);
self.names = names;
}
UntypedExpr::Case {
subjects, clauses, ..
} => {
for subject in subjects {
self.expression(subject);
}
for clause in clauses.as_deref().unwrap_or_default() {
let names = self.names.clone();
for pattern in &clause.pattern {
self.pattern(pattern);
}
if let Some(guard) = &clause.guard {
self.guard(guard);
}
self.expression(&clause.then);
self.names = names;
}
}
}
}
fn pattern(&mut self, pattern: &'a UntypedPattern) {
match pattern {
Pattern::Discard { .. }
| Pattern::Int { .. }
| Pattern::Float { .. }
| Pattern::String { .. }
| Pattern::StringPrefix {
right_side_assignment: AssignName::Discard(_),
..
}
| Pattern::Invalid { .. } => (),
Pattern::StringPrefix {
right_side_assignment: AssignName::Variable(name),
..
}
| Pattern::Variable { name, .. } => {
self.define(name);
}
Pattern::Tuple {
elements: patterns, ..
} => {
for pattern in patterns {
self.pattern(pattern);
}
}
Pattern::List { elements, tail, .. } => {
for element in elements {
self.pattern(element);
}
if let Some(tail) = tail {
self.pattern(&tail.pattern);
}
}
Pattern::BitArraySize(size) => {
self.bit_array_size(size);
}
Pattern::Assign { name, pattern, .. } => {
self.define(name);
self.pattern(pattern);
}
Pattern::Constructor { arguments, .. } => {
for argument in arguments {
self.pattern(&argument.value);
}
}
Pattern::BitArray { segments, .. } => {
for segment in segments {
for option in &segment.options {
self.bit_array_option(option, |s, p| s.pattern(p));
}
self.pattern(&segment.value);
}
}
}
}
fn bit_array_size(&mut self, size: &'a BitArraySize<()>) {
match size {
BitArraySize::Int { .. } => {}
BitArraySize::Variable { name, .. } => self.referenced(name),
BitArraySize::BinaryOperator { left, right, .. } => {
self.bit_array_size(left);
self.bit_array_size(right);
}
BitArraySize::Block { inner, .. } => self.bit_array_size(inner),
}
}
fn define(&mut self, name: &'a str) {
_ = self.names.insert(name, None);
}
fn bit_array_option<T>(
&mut self,
option: &'a BitArrayOption<T>,
process: impl Fn(&mut Self, &'a T),
) {
match option {
BitArrayOption::Big { .. }
| BitArrayOption::Bytes { .. }
| BitArrayOption::Bits { .. }
| BitArrayOption::Float { .. }
| BitArrayOption::Int { .. }
| BitArrayOption::Little { .. }
| BitArrayOption::Native { .. }
| BitArrayOption::Signed { .. }
| BitArrayOption::Unit { .. }
| BitArrayOption::Unsigned { .. }
| BitArrayOption::Utf16 { .. }
| BitArrayOption::Utf16Codepoint { .. }
| BitArrayOption::Utf32 { .. }
| BitArrayOption::Utf32Codepoint { .. }
| BitArrayOption::Utf8 { .. }
| BitArrayOption::Utf8Codepoint { .. } => (),
BitArrayOption::Size { value: pattern, .. } => {
process(self, pattern);
}
}
}
fn guard(&mut self, guard: &'a UntypedClauseGuard) {
match guard {
ClauseGuard::Equals { left, right, .. }
| ClauseGuard::NotEquals { left, right, .. }
| ClauseGuard::GtInt { left, right, .. }
| ClauseGuard::GtEqInt { left, right, .. }
| ClauseGuard::LtInt { left, right, .. }
| ClauseGuard::LtEqInt { left, right, .. }
| ClauseGuard::GtFloat { left, right, .. }
| ClauseGuard::GtEqFloat { left, right, .. }
| ClauseGuard::LtFloat { left, right, .. }
| ClauseGuard::LtEqFloat { left, right, .. }
| ClauseGuard::AddInt { left, right, .. }
| ClauseGuard::AddFloat { left, right, .. }
| ClauseGuard::SubInt { left, right, .. }
| ClauseGuard::SubFloat { left, right, .. }
| ClauseGuard::MultInt { left, right, .. }
| ClauseGuard::MultFloat { left, right, .. }
| ClauseGuard::DivInt { left, right, .. }
| ClauseGuard::DivFloat { left, right, .. }
| ClauseGuard::RemainderInt { left, right, .. }
| ClauseGuard::Or { left, right, .. }
| ClauseGuard::And { left, right, .. } => {
self.guard(left);
self.guard(right);
}
ClauseGuard::Block { value, .. } => self.guard(value),
ClauseGuard::Not { expression, .. } => self.guard(expression),
ClauseGuard::Var { name, .. } => self.referenced(name),
ClauseGuard::TupleIndex { tuple, .. } => self.guard(tuple),
ClauseGuard::FieldAccess { container, .. } => self.guard(container),
ClauseGuard::ModuleSelect { module_name, .. } => self.referenced(module_name),
ClauseGuard::Constant(constant) => self.constant(constant),
}
}
fn constant(&mut self, constant: &'a Constant<(), ()>) {
match constant {
Constant::Int { .. }
| Constant::Float { .. }
| Constant::String { .. }
| Constant::Invalid { .. }
| Constant::Var {
module: Some(_), ..
} => (),
Constant::List { elements, .. } | Constant::Tuple { elements, .. } => {
for element in elements {
self.constant(element);
}
}
Constant::Record { arguments, .. } => {
for argument in arguments {
self.constant(&argument.value);
}
}
Constant::RecordUpdate {
record, arguments, ..
} => {
self.constant(&record.base);
for argument in arguments {
self.constant(&argument.value);
}
}
Constant::Var {
module: None, name, ..
} => self.referenced(name),
Constant::BitArray { segments, .. } => {
for segment in segments {
for option in &segment.options {
self.bit_array_option(option, |s, c| s.constant(c));
}
self.constant(&segment.value);
}
}
Constant::StringConcatenation { left, right, .. } => {
self.constant(left);
self.constant(right);
}
}
}
}
/// Determine the order in which functions and constants should be compiled and if any
/// mutually recursive functions need to be compiled together.
///
pub fn into_dependency_order(
functions: Vec<UntypedFunction>,
constants: Vec<UntypedModuleConstant>,
) -> Result<Vec<Vec<CallGraphNode>>, Error> {
let mut grapher = CallGraphBuilder::default();
for function in &functions {
grapher.register_module_function_existence(function)?;
}
for constant in &constants {
grapher.register_module_const_existence(constant)?;
}
// Build the call graph between the module functions.
for function in &functions {
grapher.register_references(function);
}
for constant in &constants {
grapher.register_references_constant(constant);
}
// Consume the grapher to get the graph
let graph = grapher.into_graph();
// Determine the order in which the functions should be compiled by looking
// at which other functions they depend on.
let indices = crate::graph::into_dependency_order(graph);
// We got node indices back, so we need to map them back to the functions
// they represent.
// We wrap them each with `Some` so we can use `.take()`.
let mut definitions = functions
.into_iter()
.map(CallGraphNode::Function)
.chain(constants.into_iter().map(CallGraphNode::ModuleConstant))
.map(Some)
.collect_vec();
let ordered = indices
.into_iter()
.map(|level| {
level
.into_iter()
.map(|index| {
definitions
.get_mut(index.index())
.expect("Index out of bounds")
.take()
.expect("Function already taken")
})
.collect_vec()
})
.collect_vec();
Ok(ordered)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/fix.rs | compiler-core/src/fix.rs | use crate::{
Error, Result,
format::{Formatter, Intermediate},
warning::WarningEmitter,
};
use camino::Utf8Path;
use ecow::EcoString;
pub fn parse_fix_and_format(src: &EcoString, path: &Utf8Path) -> Result<String> {
// Parse
let parsed = crate::parse::parse_module(path.to_owned(), src, &WarningEmitter::null())
.map_err(|error| Error::Parse {
path: path.to_path_buf(),
src: src.clone(),
error: Box::new(error),
})?;
let intermediate = Intermediate::from_extra(&parsed.extra, src);
let module = parsed.module;
// Fix
// let module = some_fixer_module::Fixer::fix(module);
// Format
let mut buffer = String::new();
Formatter::with_comments(&intermediate)
.module(&module)
.pretty_print(80, &mut buffer)?;
Ok(buffer)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/io.rs | compiler-core/src/io.rs | pub mod memory;
use crate::error::{Error, FileIoAction, FileKind, Result};
use async_trait::async_trait;
use debug_ignore::DebugIgnore;
use flate2::read::GzDecoder;
use std::{
collections::{HashMap, HashSet, VecDeque},
fmt::Debug,
io,
iter::Extend,
time::SystemTime,
vec::IntoIter,
};
use tar::{Archive, Entry};
use camino::{Utf8Path, Utf8PathBuf};
/// Takes in a source path and a target path and determines a relative path
/// from source -> target.
/// If given a relative target path, no calculation occurs.
/// # Panics
/// The provided source path should be absolute, otherwise will panic.
pub fn make_relative(source_path: &Utf8Path, target_path: &Utf8Path) -> Utf8PathBuf {
assert!(source_path.is_absolute());
// Input target will always be canonicalised whereas source will not
// This causes problems with diffing on windows since canonicalised paths have a special root
// As such we are attempting to strip the target path
// Based on https://github.com/rust-lang/rust/issues/42869#issuecomment-1712317081
#[cfg(target_family = "windows")]
let binding = target_path.to_string();
#[cfg(target_family = "windows")]
let target_path = Utf8Path::new(binding.trim_start_matches(r"\\?\"));
match target_path.is_absolute() {
true => pathdiff::diff_utf8_paths(target_path, source_path)
.expect("Should not fail on two absolute paths"),
false => target_path.into(),
}
}
pub trait Reader: io::Read {
/// A wrapper around `std::io::Read` that has Gleam's error handling.
fn read_bytes(&mut self, buffer: &mut [u8]) -> Result<usize> {
self.read(buffer).map_err(|e| self.convert_err(e))
}
fn convert_err<E: std::error::Error>(&self, error: E) -> Error;
}
pub trait Utf8Writer: std::fmt::Write {
/// A wrapper around `fmt::Write` that has Gleam's error handling.
fn str_write(&mut self, str: &str) -> Result<()> {
self.write_str(str).map_err(|e| self.convert_err(e))
}
fn convert_err<E: std::error::Error>(&self, err: E) -> Error;
}
impl Utf8Writer for String {
fn convert_err<E: std::error::Error>(&self, error: E) -> Error {
Error::FileIo {
action: FileIoAction::WriteTo,
kind: FileKind::File,
path: Utf8PathBuf::from("<in memory>"),
err: Some(error.to_string()),
}
}
}
pub trait Writer: io::Write + Utf8Writer {
/// A wrapper around `io::Write` that has Gleam's error handling.
fn write(&mut self, bytes: &[u8]) -> Result<(), Error> {
io::Write::write(self, bytes)
.map(|_| ())
.map_err(|e| self.convert_err(e))
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Content {
Binary(Vec<u8>),
Text(String),
}
impl Content {
pub fn as_bytes(&self) -> &[u8] {
match self {
Content::Binary(data) => data,
Content::Text(data) => data.as_bytes(),
}
}
pub fn text(&self) -> Option<&str> {
match self {
Content::Binary(_) => None,
Content::Text(s) => Some(s),
}
}
}
impl From<Vec<u8>> for Content {
fn from(bytes: Vec<u8>) -> Self {
Content::Binary(bytes)
}
}
impl From<&[u8]> for Content {
fn from(bytes: &[u8]) -> Self {
Content::Binary(bytes.to_vec())
}
}
impl From<String> for Content {
fn from(text: String) -> Self {
Content::Text(text)
}
}
impl From<&str> for Content {
fn from(text: &str) -> Self {
Content::Text(text.to_string())
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct OutputFile {
pub content: Content,
pub path: Utf8PathBuf,
}
#[derive(Debug)]
pub struct ReadDir {
entries: Vec<io::Result<DirEntry>>,
}
impl FromIterator<io::Result<DirEntry>> for ReadDir {
fn from_iter<I: IntoIterator<Item = io::Result<DirEntry>>>(iter: I) -> Self {
ReadDir {
entries: iter.into_iter().collect(),
}
}
}
impl ReadDir {
pub fn extend(mut self, other: ReadDir) -> Self {
self.entries.extend(other);
ReadDir {
entries: self.entries,
}
}
}
impl IntoIterator for ReadDir {
type Item = io::Result<DirEntry>;
type IntoIter = IntoIter<Self::Item>;
fn into_iter(self) -> Self::IntoIter {
self.entries.into_iter()
}
}
#[derive(Debug, Clone)]
pub struct DirEntry {
pub pathbuf: Utf8PathBuf,
}
impl DirEntry {
pub fn from_path<P: AsRef<Utf8Path>>(path: P) -> DirEntry {
DirEntry {
pathbuf: path.as_ref().to_path_buf(),
}
}
pub fn from_pathbuf(pathbuf: Utf8PathBuf) -> DirEntry {
DirEntry { pathbuf }
}
pub fn as_path(&self) -> &Utf8Path {
self.pathbuf.as_path()
}
pub fn into_path(self) -> Utf8PathBuf {
self.pathbuf
}
}
/// Structure holding state to walk across a directory's descendant files at
/// any level. Note that each descendant directory is only visited once
/// regardless of symlinks, avoiding infinite symlink loops.
#[derive(Debug, Clone)]
pub struct DirWalker {
walk_queue: VecDeque<Utf8PathBuf>,
dirs_walked: im::HashSet<Utf8PathBuf>,
}
impl DirWalker {
/// Create a directory walker starting at the given path.
pub fn new(dir: Utf8PathBuf) -> Self {
Self {
walk_queue: VecDeque::from([dir]),
dirs_walked: im::HashSet::new(),
}
}
/// Convert this walker to an iterator over file paths.
///
/// This iterator calls [`Self::next_file`]. Errors are returned if certain
/// directories cannot be read.
pub fn into_file_iter(
mut self,
io: &impl FileSystemReader,
) -> impl Iterator<Item = Result<Utf8PathBuf>> + '_ {
std::iter::from_fn(move || self.next_file(io).transpose())
}
/// Advance the directory walker to the next file. The returned path will
/// be relative to the starting directory's path, even with symlinks
/// (it is not canonicalised).
pub fn next_file(&mut self, io: &impl FileSystemReader) -> Result<Option<Utf8PathBuf>> {
while let Some(next_path) = self.walk_queue.pop_front() {
let real_path = io.canonicalise(&next_path)?;
if io.is_file(&real_path) {
// Return the path relative to the starting directory, not the
// canonicalised path (which we only use to check for already
// visited directories).
return Ok(Some(next_path));
}
// If it's not a directory then it contains no other files, so there's nothing to do.
if !io.is_directory(&real_path) {
continue;
}
// If we have already processed this directory then we don't need to do it again.
// This could be due to symlinks.
let already_seen = self.dirs_walked.insert(real_path.clone()).is_some();
if already_seen {
continue;
}
for entry in io.read_dir(&next_path)? {
let Ok(entry) = entry else {
return Err(Error::FileIo {
kind: FileKind::Directory,
action: FileIoAction::Read,
path: next_path,
err: None,
});
};
self.walk_queue.push_back(entry.into_path())
}
}
Ok(None)
}
}
/// A trait used to read files.
/// Typically we use an implementation that reads from the file system,
/// but in tests and in other places other implementations may be used.
pub trait FileSystemReader {
fn read_dir(&self, path: &Utf8Path) -> Result<ReadDir>;
fn read(&self, path: &Utf8Path) -> Result<String, Error>;
fn read_bytes(&self, path: &Utf8Path) -> Result<Vec<u8>, Error>;
fn reader(&self, path: &Utf8Path) -> Result<WrappedReader, Error>;
fn is_file(&self, path: &Utf8Path) -> bool;
fn is_directory(&self, path: &Utf8Path) -> bool;
fn modification_time(&self, path: &Utf8Path) -> Result<SystemTime, Error>;
fn canonicalise(&self, path: &Utf8Path) -> Result<Utf8PathBuf, Error>;
}
/// Iterates over files with the given extension in a certain directory.
/// Symlinks are followed.
pub fn files_with_extension<'a>(
io: &'a impl FileSystemReader,
dir: &'a Utf8Path,
extension: &'a str,
) -> impl Iterator<Item = Utf8PathBuf> + 'a {
DirWalker::new(dir.to_path_buf())
.into_file_iter(io)
.filter_map(Result::ok)
.filter(|path| path.extension() == Some(extension))
}
/// A trait used to run other programs.
pub trait CommandExecutor {
fn exec(&self, command: Command) -> Result<i32, Error>;
}
/// A command one can run with a `CommandExecutor`
#[derive(Debug, Eq, PartialEq)]
pub struct Command {
pub program: String,
pub args: Vec<String>,
pub env: Vec<(String, String)>,
pub cwd: Option<Utf8PathBuf>,
pub stdio: Stdio,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Stdio {
Inherit,
Null,
}
impl Stdio {
pub fn get_process_stdio(&self) -> std::process::Stdio {
match self {
Stdio::Inherit => std::process::Stdio::inherit(),
Stdio::Null => std::process::Stdio::null(),
}
}
}
/// A trait used to compile Erlang and Elixir modules to BEAM bytecode.
pub trait BeamCompiler {
fn compile_beam(
&self,
out: &Utf8Path,
lib: &Utf8Path,
modules: &HashSet<Utf8PathBuf>,
stdio: Stdio,
) -> Result<Vec<String>, Error>;
}
/// A trait used to write files.
/// Typically we use an implementation that writes to the file system,
/// but in tests and in other places other implementations may be used.
pub trait FileSystemWriter {
fn mkdir(&self, path: &Utf8Path) -> Result<(), Error>;
fn write(&self, path: &Utf8Path, content: &str) -> Result<(), Error>;
fn write_bytes(&self, path: &Utf8Path, content: &[u8]) -> Result<(), Error>;
fn delete_directory(&self, path: &Utf8Path) -> Result<(), Error>;
fn copy(&self, from: &Utf8Path, to: &Utf8Path) -> Result<(), Error>;
fn copy_dir(&self, from: &Utf8Path, to: &Utf8Path) -> Result<(), Error>;
fn hardlink(&self, from: &Utf8Path, to: &Utf8Path) -> Result<(), Error>;
fn symlink_dir(&self, from: &Utf8Path, to: &Utf8Path) -> Result<(), Error>;
fn delete_file(&self, path: &Utf8Path) -> Result<(), Error>;
fn exists(&self, path: &Utf8Path) -> bool;
}
#[derive(Debug)]
/// A wrapper around a Read implementing object that has Gleam's error handling.
pub struct WrappedReader {
path: Utf8PathBuf,
inner: DebugIgnore<Box<dyn io::Read>>,
}
impl WrappedReader {
pub fn new(path: &Utf8Path, inner: Box<dyn io::Read>) -> Self {
Self {
path: path.to_path_buf(),
inner: DebugIgnore(inner),
}
}
fn read(&mut self, buffer: &mut [u8]) -> io::Result<usize> {
self.inner.read(buffer)
}
}
impl io::Read for WrappedReader {
fn read(&mut self, buffer: &mut [u8]) -> io::Result<usize> {
self.read(buffer)
}
}
impl Reader for WrappedReader {
fn convert_err<E: std::error::Error>(&self, err: E) -> Error {
Error::FileIo {
kind: FileKind::File,
action: FileIoAction::Read,
path: self.path.clone(),
err: Some(err.to_string()),
}
}
}
#[async_trait]
pub trait HttpClient {
async fn send(&self, request: http::Request<Vec<u8>>)
-> Result<http::Response<Vec<u8>>, Error>;
}
pub trait TarUnpacker {
// FIXME: The reader types are restrictive here. We should be more generic
// than this.
fn io_result_entries<'a>(
&self,
archive: &'a mut Archive<WrappedReader>,
) -> io::Result<tar::Entries<'a, WrappedReader>>;
fn entries<'a>(
&self,
archive: &'a mut Archive<WrappedReader>,
) -> Result<tar::Entries<'a, WrappedReader>> {
tracing::debug!("iterating through tar archive");
self.io_result_entries(archive)
.map_err(|e| Error::ExpandTar {
error: e.to_string(),
})
}
fn io_result_unpack(
&self,
path: &Utf8Path,
archive: Archive<GzDecoder<Entry<'_, WrappedReader>>>,
) -> io::Result<()>;
fn unpack(
&self,
path: &Utf8Path,
archive: Archive<GzDecoder<Entry<'_, WrappedReader>>>,
) -> Result<()> {
tracing::debug!(path = ?path, "unpacking tar archive");
self.io_result_unpack(path, archive)
.map_err(|e| Error::FileIo {
action: FileIoAction::WriteTo,
kind: FileKind::Directory,
path: path.to_path_buf(),
err: Some(e.to_string()),
})
}
}
#[inline]
pub fn is_native_file_extension(extension: &str) -> bool {
matches!(
extension,
"erl" | "hrl" | "ex" | "js" | "mjs" | "cjs" | "ts"
)
}
pub fn ordered_map<S, K, V>(value: &HashMap<K, V>, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
K: serde::Serialize + Ord,
V: serde::Serialize,
{
use serde::Serialize;
let ordered: std::collections::BTreeMap<_, _> = value.iter().collect();
ordered.serialize(serializer)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/uid.rs | compiler-core/src/uid.rs | use std::sync::{
Arc,
atomic::{AtomicU64, Ordering},
};
/// A generator of unique ids. Only one should be used per compilation run to
/// ensure ids do not get reused.
#[derive(Debug, Clone, Default)]
pub struct UniqueIdGenerator {
id: Arc<AtomicU64>,
}
impl UniqueIdGenerator {
pub fn new() -> Self {
Self::default()
}
pub fn next(&self) -> u64 {
self.id.fetch_add(1, Ordering::Relaxed)
}
}
#[test]
fn id_geneation() {
let ids = UniqueIdGenerator::new();
let ids2 = ids.clone();
assert_eq!(ids.next(), 0);
assert_eq!(ids.next(), 1);
assert_eq!(ids.next(), 2);
// Cloned ones use the same counter
assert_eq!(ids2.next(), 3);
assert_eq!(ids2.next(), 4);
assert_eq!(ids2.next(), 5);
// The original is updated
assert_eq!(ids.next(), 6);
assert_eq!(ids.next(), 7);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/manifest.rs | compiler-core/src/manifest.rs | use std::collections::HashMap;
use std::fmt;
use crate::Result;
use crate::io::{make_relative, ordered_map};
use crate::requirement::Requirement;
use camino::{Utf8Path, Utf8PathBuf};
use ecow::EcoString;
use hexpm::version::Version;
use itertools::Itertools;
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq, Eq)]
pub struct Manifest {
#[serde(serialize_with = "ordered_map")]
pub requirements: HashMap<EcoString, Requirement>,
#[serde(serialize_with = "sorted_vec")]
pub packages: Vec<ManifestPackage>,
}
impl Manifest {
// Rather than using the toml library to do serialization we implement it
// manually so that we can control the formatting.
// We want to keep entries on a single line each so that they are more
// resistant to merge conflicts and are easier to fix when it does happen.
pub fn to_toml(&self, root_path: &Utf8Path) -> String {
let mut buffer = String::new();
let Self {
requirements,
packages,
} = self;
buffer.push_str(
"# This file was generated by Gleam
# You typically do not need to edit this file
",
);
// Packages
buffer.push_str("packages = [\n");
for ManifestPackage {
name,
source,
version,
otp_app,
build_tools,
requirements,
} in packages.iter().sorted_by(|a, b| a.name.cmp(&b.name))
{
buffer.push_str(r#" {"#);
buffer.push_str(r#" name = ""#);
buffer.push_str(name);
buffer.push_str(r#"", version = ""#);
buffer.push_str(&version.to_string());
buffer.push_str(r#"", build_tools = ["#);
for (i, tool) in build_tools.iter().enumerate() {
if i != 0 {
buffer.push_str(", ");
}
buffer.push('"');
buffer.push_str(tool);
buffer.push('"');
}
buffer.push_str("], requirements = [");
for (i, package) in requirements.iter().sorted_by(|a, b| a.cmp(b)).enumerate() {
if i != 0 {
buffer.push_str(", ");
}
buffer.push('"');
buffer.push_str(package);
buffer.push('"');
}
buffer.push(']');
if let Some(app) = otp_app {
buffer.push_str(", otp_app = \"");
buffer.push_str(app);
buffer.push('"');
}
match source {
ManifestPackageSource::Hex { outer_checksum } => {
buffer.push_str(r#", source = "hex", outer_checksum = ""#);
buffer.push_str(&outer_checksum.to_string());
buffer.push('"');
}
ManifestPackageSource::Git { repo, commit } => {
buffer.push_str(r#", source = "git", repo = ""#);
buffer.push_str(repo);
buffer.push_str(r#"", commit = ""#);
buffer.push_str(commit);
buffer.push('"');
}
ManifestPackageSource::Local { path } => {
buffer.push_str(r#", source = "local", path = ""#);
buffer.push_str(&make_relative(root_path, path).as_str().replace('\\', "/"));
buffer.push('"');
}
};
buffer.push_str(" },\n");
}
buffer.push_str("]\n\n");
// Requirements
buffer.push_str("[requirements]\n");
for (name, requirement) in requirements.iter().sorted_by(|a, b| a.0.cmp(b.0)) {
buffer.push_str(name);
buffer.push_str(" = ");
buffer.push_str(&requirement.to_toml(root_path));
buffer.push('\n');
}
buffer
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Base16Checksum(pub Vec<u8>);
impl ToString for Base16Checksum {
fn to_string(&self) -> String {
base16::encode_upper(&self.0)
}
}
impl serde::Serialize for Base16Checksum {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&base16::encode_upper(&self.0))
}
}
impl<'de> serde::Deserialize<'de> for Base16Checksum {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserializer.deserialize_str(Base16ChecksumVisitor)
}
}
struct Base16ChecksumVisitor;
impl<'de> serde::de::Visitor<'de> for Base16ChecksumVisitor {
type Value = Base16Checksum;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a base 16 checksum")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
base16::decode(value)
.map(Base16Checksum)
.map_err(serde::de::Error::custom)
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
pub struct ManifestPackage {
pub name: EcoString,
pub version: Version,
pub build_tools: Vec<EcoString>,
#[serde(default)]
pub otp_app: Option<EcoString>,
#[serde(serialize_with = "sorted_vec")]
pub requirements: Vec<EcoString>,
#[serde(flatten)]
pub source: ManifestPackageSource,
}
impl ManifestPackage {
pub fn with_build_tools(mut self, build_tools: &'static [&'static str]) -> Self {
self.build_tools = build_tools.iter().map(|s| (*s).into()).collect();
self
}
pub fn application_name(&self) -> &EcoString {
match self.otp_app {
Some(ref app) => app,
None => &self.name,
}
}
#[inline]
pub fn is_hex(&self) -> bool {
matches!(self.source, ManifestPackageSource::Hex { .. })
}
#[inline]
pub fn is_local(&self) -> bool {
matches!(self.source, ManifestPackageSource::Local { .. })
}
#[inline]
pub fn is_git(&self) -> bool {
matches!(self.source, ManifestPackageSource::Git { .. })
}
}
#[cfg(test)]
impl Default for ManifestPackage {
fn default() -> Self {
Self {
name: Default::default(),
build_tools: Default::default(),
otp_app: Default::default(),
requirements: Default::default(),
version: Version::new(1, 0, 0),
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![]),
},
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
#[serde(tag = "source")]
pub enum ManifestPackageSource {
#[serde(rename = "hex")]
Hex { outer_checksum: Base16Checksum },
#[serde(rename = "git")]
Git { repo: EcoString, commit: EcoString },
#[serde(rename = "local")]
Local { path: Utf8PathBuf }, // should be the canonical path
}
impl ManifestPackageSource {
pub fn kind(&self) -> ManifestPackageSourceKind {
match self {
ManifestPackageSource::Hex { .. } => ManifestPackageSourceKind::Hex,
ManifestPackageSource::Git { .. } => ManifestPackageSourceKind::Git,
ManifestPackageSource::Local { .. } => ManifestPackageSourceKind::Local,
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ManifestPackageSourceKind {
Hex,
Git,
Local,
}
fn sorted_vec<S, T>(value: &[T], serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
T: serde::Serialize + Ord,
{
use serde::Serialize;
let mut value: Vec<&T> = value.iter().collect();
value.sort();
value.serialize(serializer)
}
#[cfg(test)]
mod tests {
use super::*;
#[cfg(windows)]
const HOME: &'static str = "C:\\home\\louis\\packages\\some_folder";
#[cfg(windows)]
const PACKAGE: &'static str = "C:\\home\\louis\\packages\\path\\to\\package";
#[cfg(windows)]
const PACKAGE_WITH_UNC: &'static str = "\\\\?\\C:\\home\\louis\\packages\\path\\to\\package";
#[cfg(not(windows))]
const HOME: &str = "/home/louis/packages/some_folder";
#[cfg(not(windows))]
const PACKAGE: &str = "/home/louis/packages/path/to/package";
#[test]
fn manifest_toml_format() {
let manifest = Manifest {
requirements: [
("zzz".into(), Requirement::hex("> 0.0.0").unwrap()),
("aaa".into(), Requirement::hex("> 0.0.0").unwrap()),
(
"awsome_local2".into(),
Requirement::git("https://github.com/gleam-lang/gleam.git", "bd9fe02f"),
),
(
"awsome_local1".into(),
Requirement::path("../path/to/package"),
),
("gleam_stdlib".into(), Requirement::hex("~> 0.17").unwrap()),
("gleeunit".into(), Requirement::hex("~> 0.1").unwrap()),
]
.into(),
packages: vec![
ManifestPackage {
name: "gleam_stdlib".into(),
version: Version::new(0, 17, 1),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![1, 22]),
},
},
ManifestPackage {
name: "aaa".into(),
version: Version::new(0, 4, 0),
build_tools: ["rebar3".into(), "make".into()].into(),
otp_app: Some("aaa_app".into()),
requirements: vec!["zzz".into(), "gleam_stdlib".into()],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![3, 22]),
},
},
ManifestPackage {
name: "zzz".into(),
version: Version::new(0, 4, 0),
build_tools: ["mix".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![3, 22]),
},
},
ManifestPackage {
name: "awsome_local2".into(),
version: Version::new(1, 2, 3),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Git {
repo: "https://github.com/gleam-lang/gleam.git".into(),
commit: "bd9fe02f72250e6a136967917bcb1bdccaffa3c8".into(),
},
},
ManifestPackage {
name: "awsome_local1".into(),
version: Version::new(1, 2, 3),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Local {
path: PACKAGE.into(),
},
},
ManifestPackage {
name: "gleeunit".into(),
version: Version::new(0, 4, 0),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec!["gleam_stdlib".into()],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![3, 46]),
},
},
],
};
let buffer = manifest.to_toml(HOME.into());
assert_eq!(
buffer,
r#"# This file was generated by Gleam
# You typically do not need to edit this file
packages = [
{ name = "aaa", version = "0.4.0", build_tools = ["rebar3", "make"], requirements = ["gleam_stdlib", "zzz"], otp_app = "aaa_app", source = "hex", outer_checksum = "0316" },
{ name = "awsome_local1", version = "1.2.3", build_tools = ["gleam"], requirements = [], source = "local", path = "../path/to/package" },
{ name = "awsome_local2", version = "1.2.3", build_tools = ["gleam"], requirements = [], source = "git", repo = "https://github.com/gleam-lang/gleam.git", commit = "bd9fe02f72250e6a136967917bcb1bdccaffa3c8" },
{ name = "gleam_stdlib", version = "0.17.1", build_tools = ["gleam"], requirements = [], source = "hex", outer_checksum = "0116" },
{ name = "gleeunit", version = "0.4.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], source = "hex", outer_checksum = "032E" },
{ name = "zzz", version = "0.4.0", build_tools = ["mix"], requirements = [], source = "hex", outer_checksum = "0316" },
]
[requirements]
aaa = { version = "> 0.0.0" }
awsome_local1 = { path = "../path/to/package" }
awsome_local2 = { git = "https://github.com/gleam-lang/gleam.git", ref = "bd9fe02f" }
gleam_stdlib = { version = "~> 0.17" }
gleeunit = { version = "~> 0.1" }
zzz = { version = "> 0.0.0" }
"#
);
}
#[cfg(windows)]
#[test]
fn manifest_toml_format_with_unc() {
let manifest = Manifest {
requirements: [
("zzz".into(), Requirement::hex("> 0.0.0").unwrap()),
("aaa".into(), Requirement::hex("> 0.0.0").unwrap()),
(
"awsome_local2".into(),
Requirement::git("https://github.com/gleam-lang/gleam.git", "main"),
),
(
"awsome_local1".into(),
Requirement::path("../path/to/package"),
),
("gleam_stdlib".into(), Requirement::hex("~> 0.17").unwrap()),
("gleeunit".into(), Requirement::hex("~> 0.1").unwrap()),
]
.into(),
packages: vec![
ManifestPackage {
name: "gleam_stdlib".into(),
version: Version::new(0, 17, 1),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![1, 22]),
},
},
ManifestPackage {
name: "aaa".into(),
version: Version::new(0, 4, 0),
build_tools: ["rebar3".into(), "make".into()].into(),
otp_app: Some("aaa_app".into()),
requirements: vec!["zzz".into(), "gleam_stdlib".into()],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![3, 22]),
},
},
ManifestPackage {
name: "zzz".into(),
version: Version::new(0, 4, 0),
build_tools: ["mix".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![3, 22]),
},
},
ManifestPackage {
name: "awsome_local2".into(),
version: Version::new(1, 2, 3),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Git {
repo: "https://github.com/gleam-lang/gleam.git".into(),
commit: "bd9fe02f72250e6a136967917bcb1bdccaffa3c8".into(),
},
},
ManifestPackage {
name: "awsome_local1".into(),
version: Version::new(1, 2, 3),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Local {
path: PACKAGE_WITH_UNC.into(),
},
},
ManifestPackage {
name: "gleeunit".into(),
version: Version::new(0, 4, 0),
build_tools: ["gleam".into()].into(),
otp_app: None,
requirements: vec!["gleam_stdlib".into()],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![3, 46]),
},
},
],
};
let buffer = manifest.to_toml(HOME.into());
assert_eq!(
buffer,
r#"# This file was generated by Gleam
# You typically do not need to edit this file
packages = [
{ name = "aaa", version = "0.4.0", build_tools = ["rebar3", "make"], requirements = ["gleam_stdlib", "zzz"], otp_app = "aaa_app", source = "hex", outer_checksum = "0316" },
{ name = "awsome_local1", version = "1.2.3", build_tools = ["gleam"], requirements = [], source = "local", path = "../path/to/package" },
{ name = "awsome_local2", version = "1.2.3", build_tools = ["gleam"], requirements = [], source = "git", repo = "https://github.com/gleam-lang/gleam.git", commit = "bd9fe02f72250e6a136967917bcb1bdccaffa3c8" },
{ name = "gleam_stdlib", version = "0.17.1", build_tools = ["gleam"], requirements = [], source = "hex", outer_checksum = "0116" },
{ name = "gleeunit", version = "0.4.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], source = "hex", outer_checksum = "032E" },
{ name = "zzz", version = "0.4.0", build_tools = ["mix"], requirements = [], source = "hex", outer_checksum = "0316" },
]
[requirements]
aaa = { version = "> 0.0.0" }
awsome_local1 = { path = "../path/to/package" }
awsome_local2 = { git = "https://github.com/gleam-lang/gleam.git", ref = "main" }
gleam_stdlib = { version = "~> 0.17" }
gleeunit = { version = "~> 0.1" }
zzz = { version = "> 0.0.0" }
"#
);
}
}
#[derive(Debug)]
pub struct Resolved {
pub manifest: Manifest,
pub package_changes: PackageChanges,
pub requirements_changed: bool,
}
#[derive(Debug)]
pub struct PackageChanges {
pub added: Vec<(EcoString, Version)>,
pub changed: Vec<Changed>,
/// When updating git dependencies, it is possible to update to a newer commit
/// without updating the version of the package (which is specified in
/// `gleam.toml`). In this case, we still want to record the change, but it
/// must be stored differently.
pub changed_git: Vec<ChangedGit>,
pub removed: Vec<EcoString>,
}
#[derive(Debug, PartialEq)]
pub struct Changed {
pub name: EcoString,
pub old: Version,
pub new: Version,
}
#[derive(Debug, PartialEq)]
pub struct ChangedGit {
pub name: EcoString,
pub old_hash: EcoString,
pub new_hash: EcoString,
}
impl Resolved {
pub fn any_changes(&self) -> bool {
self.requirements_changed || self.package_changes.any_changes()
}
pub fn all_added(manifest: Manifest) -> Resolved {
let added = manifest
.packages
.iter()
.map(|package| (package.name.clone(), package.version.clone()))
.collect();
Self {
manifest,
requirements_changed: true,
package_changes: PackageChanges {
added,
changed: vec![],
changed_git: vec![],
removed: vec![],
},
}
}
pub fn no_change(manifest: Manifest) -> Self {
Self {
manifest,
requirements_changed: false,
package_changes: PackageChanges {
added: vec![],
changed: vec![],
changed_git: vec![],
removed: vec![],
},
}
}
}
impl PackageChanges {
pub fn any_changes(&self) -> bool {
!self.added.is_empty()
|| !self.changed.is_empty()
|| !self.changed_git.is_empty()
|| !self.removed.is_empty()
}
/// Compare the old and new versions of the manifest and determine the package changes
pub fn between_manifests(old: &Manifest, new: &Manifest) -> Self {
let mut added = vec![];
let mut changed = vec![];
let mut changed_git = vec![];
let mut removed = vec![];
let mut old: HashMap<_, _> = old
.packages
.iter()
.map(|package| (&package.name, package))
.collect();
for new in &new.packages {
match old.remove(&new.name) {
// If the kind of source changed, the packages bear essentially no connection
Some(old) if new.source.kind() != old.source.kind() => {
removed.push(old.name.clone());
added.push((new.name.clone(), new.version.clone()));
}
Some(old) if old.version == new.version => match (&old.source, &new.source) {
(
ManifestPackageSource::Git {
commit: old_hash, ..
},
ManifestPackageSource::Git {
commit: new_hash, ..
},
) if old_hash != new_hash => changed_git.push(ChangedGit {
name: new.name.clone(),
old_hash: old_hash.clone(),
new_hash: new_hash.clone(),
}),
(
ManifestPackageSource::Hex { .. }
| ManifestPackageSource::Local { .. }
| ManifestPackageSource::Git { .. },
_,
) => {}
},
Some(old) => {
changed.push(Changed {
name: new.name.clone(),
old: old.version.clone(),
new: new.version.clone(),
});
}
None => {
added.push((new.name.clone(), new.version.clone()));
}
}
}
removed.extend(old.into_keys().cloned());
Self {
added,
changed,
changed_git,
removed,
}
}
}
#[cfg(test)]
mod manifest_update_tests {
use std::collections::HashMap;
use ecow::EcoString;
use hexpm::version::Version;
use crate::manifest::{Base16Checksum, ManifestPackage, ManifestPackageSource, PackageChanges};
use crate::manifest::{Changed, Manifest};
#[test]
fn resolved_with_updated() {
let package = |name: &str, version| ManifestPackage {
name: EcoString::from(name),
version,
build_tools: vec![],
otp_app: None,
requirements: vec![],
source: ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![]),
},
};
let old = Manifest {
requirements: HashMap::new(),
packages: vec![
package("unchanged1", Version::new(3, 0, 0)),
package("unchanged2", Version::new(0, 1, 0)),
package("changed1", Version::new(3, 0, 0)),
package("changed2", Version::new(0, 1, 0)),
package("removed1", Version::new(10, 0, 0)),
package("removed2", Version::new(20, 1, 0)),
],
};
let new = Manifest {
requirements: HashMap::new(),
packages: vec![
package("new1", Version::new(1, 0, 0)),
package("new2", Version::new(2, 1, 0)),
package("unchanged1", Version::new(3, 0, 0)),
package("unchanged2", Version::new(0, 1, 0)),
package("changed1", Version::new(5, 0, 0)),
package("changed2", Version::new(3, 0, 0)),
],
};
let mut changes = PackageChanges::between_manifests(&old, &new);
changes.added.sort();
changes.changed.sort_by(|a, b| a.name.cmp(&b.name));
changes.removed.sort();
assert_eq!(
changes.added,
vec![
("new1".into(), Version::new(1, 0, 0)),
("new2".into(), Version::new(2, 1, 0)),
]
);
assert_eq!(
changes.changed,
vec![
Changed {
name: "changed1".into(),
old: Version::new(3, 0, 0),
new: Version::new(5, 0, 0)
},
Changed {
name: "changed2".into(),
old: Version::new(0, 1, 0),
new: Version::new(3, 0, 0)
},
]
);
assert_eq!(
changes.removed,
vec![EcoString::from("removed1"), EcoString::from("removed2")]
);
}
#[test]
fn resolved_with_source_type_change() {
let name = EcoString::from("wibble");
let version = Version::new(1, 0, 0);
let package = |source| ManifestPackage {
name: name.clone(),
version: version.clone(),
build_tools: vec![],
otp_app: None,
requirements: vec![],
source,
};
let old = Manifest {
requirements: HashMap::new(),
packages: vec![package(ManifestPackageSource::Local {
path: "wibble".into(),
})],
};
let new = Manifest {
requirements: HashMap::new(),
packages: vec![package(ManifestPackageSource::Hex {
outer_checksum: Base16Checksum(vec![]),
})],
};
let changes = PackageChanges::between_manifests(&old, &new);
assert!(changes.changed.is_empty());
assert_eq!(changes.removed, vec![name.clone()]);
assert_eq!(changes.added, vec![(name.clone(), version.clone())]);
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/warning.rs | compiler-core/src/warning.rs | use crate::{
ast::{BitArraySegmentTruncation, SrcSpan, TodoKind},
build::Target,
diagnostic::{self, Diagnostic, ExtraLabel, Location},
error::wrap,
exhaustiveness::ImpossibleBitArraySegmentPattern,
type_::{
self,
error::{
AssertImpossiblePattern, FeatureKind, LiteralCollectionKind, PanicPosition,
TodoOrPanic, UnreachablePatternReason,
},
expression::ComparisonOutcome,
pretty::Printer,
},
};
use camino::Utf8PathBuf;
use debug_ignore::DebugIgnore;
use ecow::EcoString;
use itertools::Itertools;
use std::{
io::Write,
sync::{Arc, atomic::Ordering},
};
use std::{rc::Rc, sync::atomic::AtomicUsize};
use termcolor::Buffer;
pub trait WarningEmitterIO {
fn emit_warning(&self, warning: Warning);
}
#[derive(Debug, Clone, Copy)]
pub struct NullWarningEmitterIO;
impl WarningEmitterIO for NullWarningEmitterIO {
fn emit_warning(&self, _warning: Warning) {}
}
#[derive(Debug, Clone, Default)]
pub struct VectorWarningEmitterIO {
pub warnings: Arc<std::sync::RwLock<Vec<Warning>>>,
}
impl VectorWarningEmitterIO {
pub fn new() -> Self {
Self::default()
}
pub fn take(&self) -> Vec<Warning> {
let mut warnings = self.write_lock();
std::mem::take(&mut *warnings)
}
pub fn reset(&self) {
let mut warnings = self.write_lock();
warnings.clear();
}
pub fn pop(&self) -> Option<Warning> {
let mut warnings = self.write_lock();
warnings.pop()
}
fn write_lock(&self) -> std::sync::RwLockWriteGuard<'_, Vec<Warning>> {
self.warnings.write().expect("Vector lock poisoned")
}
}
impl WarningEmitterIO for VectorWarningEmitterIO {
fn emit_warning(&self, warning: Warning) {
let mut warnings = self.write_lock();
warnings.push(warning);
}
}
#[derive(Debug, Clone)]
pub struct WarningEmitter {
/// The number of warnings emitted.
/// In the context of the project compiler this is the count for the root
/// package only, the count is reset back to zero after the dependencies are
/// compiled.
count: Arc<AtomicUsize>,
emitter: DebugIgnore<Rc<dyn WarningEmitterIO>>,
}
impl WarningEmitter {
pub fn new(emitter: Rc<dyn WarningEmitterIO>) -> Self {
Self {
count: Arc::new(AtomicUsize::new(0)),
emitter: DebugIgnore(emitter),
}
}
pub fn null() -> Self {
Self::new(Rc::new(NullWarningEmitterIO))
}
pub fn reset_count(&self) {
self.count.store(0, Ordering::Relaxed);
}
pub fn count(&self) -> usize {
self.count.load(Ordering::Relaxed)
}
pub fn emit(&self, warning: Warning) {
_ = self.count.fetch_add(1, Ordering::Relaxed);
self.emitter.emit_warning(warning);
}
pub fn vector() -> (Self, Rc<VectorWarningEmitterIO>) {
let io = Rc::new(VectorWarningEmitterIO::default());
let emitter = Self::new(io.clone());
(emitter, Rc::clone(&io))
}
}
#[derive(Debug, Clone)]
pub struct TypeWarningEmitter {
module_path: Utf8PathBuf,
module_src: EcoString,
emitter: WarningEmitter,
}
impl TypeWarningEmitter {
pub fn new(module_path: Utf8PathBuf, module_src: EcoString, emitter: WarningEmitter) -> Self {
Self {
module_path,
module_src,
emitter,
}
}
pub fn null() -> Self {
Self {
module_path: Utf8PathBuf::new(),
module_src: EcoString::from(""),
emitter: WarningEmitter::new(Rc::new(NullWarningEmitterIO)),
}
}
pub fn emit(&self, warning: type_::Warning) {
self.emitter.emit(Warning::Type {
path: self.module_path.clone(),
src: self.module_src.clone(),
warning,
});
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Warning {
Type {
path: Utf8PathBuf,
src: EcoString,
warning: type_::Warning,
},
InvalidSource {
path: Utf8PathBuf,
},
DeprecatedSyntax {
path: Utf8PathBuf,
src: EcoString,
warning: DeprecatedSyntaxWarning,
},
DeprecatedEnvironmentVariable {
variable: DeprecatedEnvironmentVariable,
},
EmptyModule {
path: Utf8PathBuf,
name: EcoString,
},
DetachedDocComment {
path: Utf8PathBuf,
src: EcoString,
location: SrcSpan,
},
}
#[derive(Debug, Clone, Eq, PartialEq, Copy)]
pub enum DeprecatedEnvironmentVariable {
HexpmUser,
HexpmPass,
}
impl DeprecatedEnvironmentVariable {
fn name(&self) -> &'static str {
match self {
DeprecatedEnvironmentVariable::HexpmUser => "HEXPM_USER",
DeprecatedEnvironmentVariable::HexpmPass => "HEXPM_PASS",
}
}
fn message(&self) -> &'static str {
match self {
DeprecatedEnvironmentVariable::HexpmUser => {
"Use the `{API_ENV_NAME}` environment variable instead."
}
DeprecatedEnvironmentVariable::HexpmPass => {
"Use the `{API_ENV_NAME}` environment variable instead."
}
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Copy)]
pub enum DeprecatedSyntaxWarning {
/// If someone uses the deprecated syntax to append to a list:
/// `["a"..rest]`, notice how there's no comma!
DeprecatedListPrepend {
location: SrcSpan,
},
/// If someone uses the deprecated syntax to pattern match on a list:
/// ```gleam
/// case list {
/// [first..rest] -> todo
/// // ^^ notice there's no comma!
/// _ ->
/// }
/// ```
///
DeprecatedListPattern {
location: SrcSpan,
},
/// If someone uses the deprecated syntax to match on all lists instead of
/// a common `_`:
/// ```gleam
/// case list {
/// [..] -> todo
/// //^^^^ this matches on all lists so a `_` should be used instead!
/// _ ->
/// }
/// ```
///
DeprecatedListCatchAllPattern {
location: SrcSpan,
},
/// If a record pattern has a spread that is not preceded by a comma:
/// ```gleam
/// case wibble {
/// Wibble(arg1: name ..) -> todo
/// // ^^ this should be preceded by a comma!
/// }
/// ```
///
DeprecatedRecordSpreadPattern {
location: SrcSpan,
},
/// If a guard has an empty clause :
/// ```gleam
/// case wibble {
/// big if -> True
/// ^^ This can be removed.
/// }
/// ```
DeprecatedEmptyClauseGuard {
location: SrcSpan,
},
DeprecatedTargetShorthand {
target: Target,
location: SrcSpan,
},
}
impl Warning {
pub fn to_diagnostic(&self) -> Diagnostic {
match self {
Warning::InvalidSource { path } => Diagnostic {
title: "Invalid module name".into(),
text: "\
Module names must begin with a lowercase letter and contain
only lowercase alphanumeric characters or underscores."
.into(),
level: diagnostic::Level::Warning,
location: None,
hint: Some(format!(
"Rename `{path}` to be valid, or remove this file from the project source."
)),
},
Warning::DeprecatedSyntax {
path,
src,
warning: DeprecatedSyntaxWarning::DeprecatedListPrepend { location },
} => Diagnostic {
title: "Deprecated prepend syntax".into(),
text: wrap(
"This syntax for prepending to a list is deprecated.
When prepending an item to a list it should be preceded by a comma, \
like this: `[item, ..list]`.",
),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
label: diagnostic::Label {
text: Some("This spread should be preceded by a comma".into()),
span: *location,
},
path: path.clone(),
src: src.clone(),
extra_labels: vec![],
}),
},
Warning::DeprecatedSyntax {
path,
src,
warning: DeprecatedSyntaxWarning::DeprecatedListPattern { location },
} => Diagnostic {
title: "Deprecated list pattern matching syntax".into(),
text: wrap(
"This syntax for pattern matching on a list is deprecated.
When matching on the rest of a list it should always be preceded by a comma, \
like this: `[item, ..list]`.",
),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
label: diagnostic::Label {
text: Some("This spread should be preceded by a comma".into()),
span: *location,
},
path: path.clone(),
src: src.clone(),
extra_labels: vec![],
}),
},
Warning::DeprecatedSyntax {
path,
src,
warning: DeprecatedSyntaxWarning::DeprecatedRecordSpreadPattern { location },
} => Diagnostic {
title: "Deprecated record pattern matching syntax".into(),
text: wrap("This syntax for pattern matching on a record is deprecated."),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
label: diagnostic::Label {
text: Some("This should be preceded by a comma".into()),
span: *location,
},
path: path.clone(),
src: src.clone(),
extra_labels: vec![],
}),
},
Warning::DeprecatedSyntax {
path,
src,
warning: DeprecatedSyntaxWarning::DeprecatedListCatchAllPattern { location },
} => Diagnostic {
title: "Deprecated list pattern matching syntax".into(),
text: wrap(
"This syntax for pattern matching on lists is deprecated.
To match on all possible lists, use the `_` catch-all pattern instead.",
),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
label: diagnostic::Label {
text: Some("This can be replaced with `_`".into()),
span: *location,
},
path: path.clone(),
src: src.clone(),
extra_labels: vec![],
}),
},
Warning::DeprecatedSyntax {
path,
src,
warning: DeprecatedSyntaxWarning::DeprecatedEmptyClauseGuard { location },
} => Diagnostic {
title: "Deprecated empty guard syntax".into(),
text: wrap(
"This syntax for an empty guard is deprecated. \
To have a clause without a guard, remove this.",
),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
label: diagnostic::Label {
text: Some("This can be removed.".into()),
span: *location,
},
path: path.clone(),
src: src.clone(),
extra_labels: vec![],
}),
},
Warning::DeprecatedSyntax {
path,
src,
warning: DeprecatedSyntaxWarning::DeprecatedTargetShorthand { location, target },
} => {
let full_name = match target {
Target::Erlang => "erlang",
Target::JavaScript => "javascript",
};
Diagnostic {
title: "Deprecated target shorthand syntax".into(),
text: wrap(&format!(
"This shorthand target name is deprecated. Use the full name: `{full_name}` instead."
)),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
label: diagnostic::Label {
text: Some(format!("This should be replaced with `{full_name}`")),
span: *location,
},
path: path.clone(),
src: src.clone(),
extra_labels: vec![],
}),
}
}
Warning::DetachedDocComment {
path,
src,
location,
} => Diagnostic {
title: "Detached doc comment".into(),
text: wrap(
"This doc comment is followed by a regular \
comment so it is not attached to any definition.",
),
level: diagnostic::Level::Warning,
location: Some(Location {
path: path.to_path_buf(),
src: src.clone(),
label: diagnostic::Label {
text: Some("This is not attached to a definition".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
hint: Some("Move the comment above the doc comment".into()),
},
Warning::Type { path, warning, src } => match warning {
type_::Warning::Todo {
kind,
location,
type_,
} => {
let mut text = String::new();
text.push_str(
"\
This code will crash if it is run. Be sure to finish it before
running your program.",
);
let title = match kind {
TodoKind::Keyword => "Todo found",
TodoKind::EmptyBlock => {
text.push_str(
"
A block must always contain at least one expression.",
);
"Incomplete block"
}
TodoKind::EmptyFunction { .. } => "Unimplemented function",
TodoKind::IncompleteUse => {
text.push_str(
"
A use expression must always be followed by at least one expression.",
);
"Incomplete use expression"
}
}
.into();
if !type_.is_variable() {
text.push_str(&format!(
"\n\nHint: I think its type is `{}`.\n",
Printer::new().pretty_print(type_, 0)
));
}
Diagnostic {
title,
text,
level: diagnostic::Level::Warning,
location: Some(Location {
path: path.to_path_buf(),
src: src.clone(),
label: diagnostic::Label {
text: Some("This code is incomplete".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
hint: None,
}
}
type_::Warning::ImplicitlyDiscardedResult { location } => Diagnostic {
title: "Unused result value".into(),
text: "".into(),
hint: Some(
"If you are sure you don't need it you can assign it to `_`.".into(),
),
level: diagnostic::Level::Warning,
location: Some(Location {
path: path.to_path_buf(),
src: src.clone(),
label: diagnostic::Label {
text: Some("The Result value created here is unused".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedLiteral { location } => Diagnostic {
title: "Unused literal".into(),
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
path: path.to_path_buf(),
src: src.clone(),
label: diagnostic::Label {
text: Some("This value is never used".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::NoFieldsRecordUpdate { location } => Diagnostic {
title: "Fieldless record update".into(),
text: "".into(),
hint: Some(
"Add some fields to change or replace it with the record itself.".into(),
),
level: diagnostic::Level::Warning,
location: Some(Location {
path: path.to_path_buf(),
src: src.clone(),
label: diagnostic::Label {
text: Some("This record update doesn't change any fields".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::AllFieldsRecordUpdate { location } => Diagnostic {
title: "Redundant record update".into(),
text: "".into(),
hint: Some("It is better style to use the record creation syntax.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This record update specifies all fields".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedType {
location, imported, ..
} => {
let title = if *imported {
"Unused imported type".into()
} else {
"Unused private type".into()
};
let label = if *imported {
"This imported type is never used".into()
} else {
"This private type is never used".into()
};
Diagnostic {
title,
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some(label),
span: *location,
},
extra_labels: Vec::new(),
}),
}
}
type_::Warning::UnusedConstructor {
location, imported, ..
} => {
let title = if *imported {
"Unused imported item".into()
} else {
"Unused private constructor".into()
};
let label = if *imported {
"This imported constructor is never used".into()
} else {
"This private constructor is never used".into()
};
Diagnostic {
title,
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some(label),
span: *location,
},
extra_labels: Vec::new(),
}),
}
}
type_::Warning::UnusedImportedModule { location, .. } => Diagnostic {
title: "Unused imported module".into(),
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This imported module is never used".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedImportedModuleAlias {
location,
module_name,
..
} => {
let text = format!(
"\
Hint: You can safely remove it.
import {module_name} as _
"
);
Diagnostic {
title: "Unused imported module alias".into(),
text,
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This alias is never used".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
}
}
type_::Warning::UnusedImportedValue { location, .. } => Diagnostic {
title: "Unused imported value".into(),
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This imported value is never used".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedPrivateModuleConstant { location, .. } => Diagnostic {
title: "Unused private constant".into(),
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This private constant is never used".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedPrivateFunction { location, .. } => Diagnostic {
title: "Unused private function".into(),
text: "".into(),
hint: Some("You can safely remove it.".into()),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This private function is never used".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedVariable { location, origin } => Diagnostic {
title: if origin.is_function_parameter() {
"Unused function argument".into()
} else {
"Unused variable".into()
},
text: "".into(),
hint: origin.how_to_ignore(),
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: if origin.is_function_parameter() {
Some("This argument is never used".into())
} else {
Some("This variable is never used".into())
},
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnusedRecursiveArgument { location } => Diagnostic {
title: "Unused function argument".into(),
text: wrap(
"This argument is passed to the function when recursing, \
but it's never used for anything.",
),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("This argument is never used".into()),
span: *location,
},
extra_labels: vec![],
}),
},
type_::Warning::UnnecessaryDoubleIntNegation { location } => Diagnostic {
title: "Unnecessary double negation (--) on integer".into(),
text: "".into(),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("You can safely remove this.".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::UnnecessaryDoubleBoolNegation { location } => Diagnostic {
title: "Unnecessary double negation (!!) on bool".into(),
text: "".into(),
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: Some("You can safely remove this.".into()),
span: *location,
},
extra_labels: Vec::new(),
}),
},
type_::Warning::InefficientEmptyListCheck { location, kind } => {
use type_::error::EmptyListCheckKind;
let text = "The `list.length` function has to iterate across the whole
list to calculate the length, which is wasteful if you only
need to know if the list is empty or not.
"
.into();
let hint = Some(match kind {
EmptyListCheckKind::Empty => "You can use `the_list == []` instead.".into(),
EmptyListCheckKind::NonEmpty => {
"You can use `the_list != []` instead.".into()
}
});
Diagnostic {
title: "Inefficient use of `list.length`".into(),
text,
hint,
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: None,
span: *location,
},
extra_labels: Vec::new(),
}),
}
}
type_::Warning::TransitiveDependencyImported {
location,
module,
package,
} => {
let text = wrap(&format!(
"The module `{module}` is being imported, but \
`{package}`, the package it belongs to, is not a direct dependency of your \
package.
In a future version of Gleam this may become a compile error.
Run this command to add it to your dependencies:
gleam add {package}
"
));
Diagnostic {
title: "Transitive dependency imported".into(),
text,
hint: None,
level: diagnostic::Level::Warning,
location: Some(Location {
src: src.clone(),
path: path.to_path_buf(),
label: diagnostic::Label {
text: None,
span: *location,
},
extra_labels: Vec::new(),
}),
}
}
type_::Warning::DeprecatedItem {
location,
message,
layer,
} => {
let text = wrap(&format!("It was deprecated with this message: {message}"));
let (title, diagnostic_label_text) = if layer.is_value() {
(
"Deprecated value used".into(),
Some("This value has been deprecated".into()),
)
} else {
(
"Deprecated type used".into(),
Some("This type has been deprecated".into()),
)
};
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/analyse.rs | compiler-core/src/analyse.rs | mod imports;
pub mod name;
#[cfg(test)]
mod tests;
use crate::{
GLEAM_CORE_PACKAGE_NAME,
ast::{
self, Arg, BitArrayOption, CustomType, DefinitionLocation, Function, GroupedDefinitions,
Import, ModuleConstant, Publicity, RecordConstructor, RecordConstructorArg, SrcSpan,
Statement, TypeAlias, TypeAst, TypeAstConstructor, TypeAstFn, TypeAstHole, TypeAstTuple,
TypeAstVar, TypedCustomType, TypedDefinitions, TypedExpr, TypedFunction, TypedImport,
TypedModule, TypedModuleConstant, TypedTypeAlias, UntypedArg, UntypedCustomType,
UntypedFunction, UntypedImport, UntypedModule, UntypedModuleConstant, UntypedStatement,
UntypedTypeAlias,
},
build::{Origin, Outcome, Target},
call_graph::{CallGraphNode, into_dependency_order},
config::PackageConfig,
dep_tree,
inline::{self, InlinableFunction},
line_numbers::LineNumbers,
parse::SpannedString,
reference::{EntityKind, ReferenceKind},
type_::{
self, AccessorsMap, Deprecation, FieldMap, ModuleInterface, Opaque, PatternConstructor,
RecordAccessor, References, Type, TypeAliasConstructor, TypeConstructor,
TypeValueConstructor, TypeValueConstructorField, TypeVariantConstructors, ValueConstructor,
ValueConstructorVariant, Warning,
environment::*,
error::{Error, FeatureKind, MissingAnnotation, Named, Problems, convert_unify_error},
expression::{ExprTyper, FunctionDefinition, Implementations, Purity},
fields::FieldMapBuilder,
hydrator::Hydrator,
prelude::*,
},
uid::UniqueIdGenerator,
warning::TypeWarningEmitter,
};
use camino::Utf8PathBuf;
use ecow::{EcoString, eco_format};
use hexpm::version::Version;
use itertools::Itertools;
use name::{check_argument_names, check_name_case};
use std::{
collections::{HashMap, HashSet},
ops::Deref,
sync::{Arc, OnceLock},
};
use vec1::Vec1;
use self::imports::Importer;
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub enum Inferred<T> {
Known(T),
#[default]
Unknown,
}
impl<T> Inferred<T> {
pub fn expect(self, message: &str) -> T {
match self {
Inferred::Known(value) => Some(value),
Inferred::Unknown => None,
}
.expect(message)
}
pub fn expect_ref(&self, message: &str) -> &T {
match self {
Inferred::Known(value) => Some(value),
Inferred::Unknown => None,
}
.expect(message)
}
}
impl Inferred<PatternConstructor> {
pub fn definition_location(&self) -> Option<DefinitionLocation> {
match self {
Inferred::Known(value) => value.definition_location(),
Inferred::Unknown => None,
}
}
pub fn get_documentation(&self) -> Option<&str> {
match self {
Inferred::Known(value) => value.get_documentation(),
Inferred::Unknown => None,
}
}
pub fn field_map(&self) -> Option<&FieldMap> {
match self {
Inferred::Known(value) => value.field_map.as_ref(),
Inferred::Unknown => None,
}
}
}
/// How the compiler should treat target support.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum TargetSupport {
/// Target support is enfored, meaning if a function is found to not have an implementation for
/// the current target then an error is emitted and compilation halts.
///
/// This is used when compiling the root package, with the exception of when using
/// `gleam run --module $module` to run a module from a dependency package, in which case we do
/// not want to error as the root package code isn't going to be run.
Enforced,
/// Target support is enfored, meaning if a function is found to not have an implementation for
/// the current target it will continue onwards and not generate any code for this function.
///
/// This is used when compiling dependencies.
NotEnforced,
}
impl TargetSupport {
/// Returns `true` if the target support is [`Enforced`].
///
/// [`Enforced`]: TargetSupport::Enforced
#[must_use]
pub fn is_enforced(&self) -> bool {
match self {
Self::Enforced => true,
Self::NotEnforced => false,
}
}
}
impl<T> From<Error> for Outcome<T, Vec1<Error>> {
fn from(error: Error) -> Self {
Outcome::TotalFailure(Vec1::new(error))
}
}
/// This struct is used to take the data required for analysis. It is used to
/// construct the private ModuleAnalyzer which has this data plus any
/// internal state.
///
#[derive(Debug)]
pub struct ModuleAnalyzerConstructor<'a, A> {
pub target: Target,
pub ids: &'a UniqueIdGenerator,
pub origin: Origin,
pub importable_modules: &'a im::HashMap<EcoString, ModuleInterface>,
pub warnings: &'a TypeWarningEmitter,
pub direct_dependencies: &'a HashMap<EcoString, A>,
pub dev_dependencies: &'a HashSet<EcoString>,
pub target_support: TargetSupport,
pub package_config: &'a PackageConfig,
}
impl<A> ModuleAnalyzerConstructor<'_, A> {
/// Crawl the AST, annotating each node with the inferred type or
/// returning an error.
///
pub fn infer_module(
self,
module: UntypedModule,
line_numbers: LineNumbers,
src_path: Utf8PathBuf,
) -> Outcome<TypedModule, Vec1<Error>> {
ModuleAnalyzer {
target: self.target,
ids: self.ids,
origin: self.origin,
importable_modules: self.importable_modules,
warnings: self.warnings,
direct_dependencies: self.direct_dependencies,
dev_dependencies: self.dev_dependencies,
target_support: self.target_support,
package_config: self.package_config,
line_numbers,
src_path,
problems: Problems::new(),
value_names: HashMap::with_capacity(module.definitions.len()),
hydrators: HashMap::with_capacity(module.definitions.len()),
module_name: module.name.clone(),
inline_functions: HashMap::new(),
minimum_required_version: Version::new(0, 1, 0),
}
.infer_module(module)
}
}
struct ModuleAnalyzer<'a, A> {
target: Target,
ids: &'a UniqueIdGenerator,
origin: Origin,
importable_modules: &'a im::HashMap<EcoString, ModuleInterface>,
warnings: &'a TypeWarningEmitter,
direct_dependencies: &'a HashMap<EcoString, A>,
dev_dependencies: &'a HashSet<EcoString>,
target_support: TargetSupport,
package_config: &'a PackageConfig,
line_numbers: LineNumbers,
src_path: Utf8PathBuf,
problems: Problems,
value_names: HashMap<EcoString, SrcSpan>,
hydrators: HashMap<EcoString, Hydrator>,
module_name: EcoString,
inline_functions: HashMap<EcoString, InlinableFunction>,
/// The minimum Gleam version required to compile the analysed module.
minimum_required_version: Version,
}
impl<'a, A> ModuleAnalyzer<'a, A> {
pub fn infer_module(mut self, mut module: UntypedModule) -> Outcome<TypedModule, Vec1<Error>> {
if let Err(error) = validate_module_name(&self.module_name) {
return self.all_errors(error);
}
let documentation = std::mem::take(&mut module.documentation);
let env = EnvironmentArguments {
ids: self.ids.clone(),
current_package: self.package_config.name.clone(),
gleam_version: self
.package_config
.gleam_version
.clone()
.map(|version| version.into()),
current_module: self.module_name.clone(),
target: self.target,
importable_modules: self.importable_modules,
target_support: self.target_support,
current_origin: self.origin,
dev_dependencies: self.dev_dependencies,
}
.build();
let definitions = GroupedDefinitions::new(module.into_iter_definitions(self.target));
// Register any modules, types, and values being imported
// We process imports first so that anything imported can be referenced
// anywhere in the module.
let mut env = Importer::run(self.origin, env, &definitions.imports, &mut self.problems);
// Register types so they can be used in constructors and functions
// earlier in the module.
for type_ in &definitions.custom_types {
if let Err(error) = self.register_types_from_custom_type(type_, &mut env) {
return self.all_errors(error);
}
}
let sorted_aliases = match sorted_type_aliases(&definitions.type_aliases) {
Ok(sorted_aliases) => sorted_aliases,
Err(error) => return self.all_errors(error),
};
for type_alias in sorted_aliases {
self.register_type_alias(type_alias, &mut env);
}
for function in &definitions.functions {
self.register_value_from_function(function, &mut env);
}
// Infer the types of each statement in the module
let typed_imports = definitions
.imports
.into_iter()
.filter_map(|import| self.analyse_import(import, &env))
.collect_vec();
let typed_custom_types = definitions
.custom_types
.into_iter()
.filter_map(|custom_type| self.analyse_custom_type(custom_type, &mut env))
.collect_vec();
let typed_type_aliases = definitions
.type_aliases
.into_iter()
.map(|type_alias| analyse_type_alias(type_alias, &mut env))
.collect_vec();
// Sort functions and constants into dependency order for inference.
// Definitions that do not depend on other definitions are inferred
// first, then ones that depend on those, etc.
let mut typed_functions = Vec::with_capacity(definitions.functions.len());
let mut typed_constants = Vec::with_capacity(definitions.constants.len());
let definition_groups =
match into_dependency_order(definitions.functions, definitions.constants) {
Ok(definition_groups) => definition_groups,
Err(error) => return self.all_errors(error),
};
let mut working_constants = vec![];
let mut working_functions = vec![];
for group in definition_groups {
// A group may have multiple functions and constants that depend on
// each other by mutual reference.
for definition in group {
match definition {
CallGraphNode::Function(function) => {
working_functions.push(self.infer_function(function, &mut env))
}
CallGraphNode::ModuleConstant(constant) => {
working_constants.push(self.infer_module_constant(constant, &mut env))
}
};
}
// Now that the entire group has been inferred, generalise their types.
for inferred_constant in working_constants.drain(..) {
typed_constants.push(generalise_module_constant(
inferred_constant,
&mut env,
&self.module_name,
))
}
for inferred_function in working_functions.drain(..) {
typed_functions.push(generalise_function(
inferred_function,
&mut env,
&self.module_name,
));
}
}
let typed_definitions = TypedDefinitions {
imports: typed_imports,
constants: typed_constants,
custom_types: typed_custom_types,
type_aliases: typed_type_aliases,
functions: typed_functions,
};
// Generate warnings for unused items
let unused_definition_positions = env.handle_unused(&mut self.problems);
// Remove imported types and values to create the public interface
// Private types and values are retained so they can be used in the language
// server, but are filtered out when type checking to prevent using private
// items.
env.module_types
.retain(|_, info| info.module == self.module_name);
// Ensure no exported values have private types in their type signature
for value in env.module_values.values() {
self.check_for_type_leaks(value)
}
let Environment {
module_types: types,
module_types_constructors: types_constructors,
module_values: values,
accessors,
names: type_names,
module_type_aliases: type_aliases,
echo_found,
..
} = env;
let is_internal = self
.package_config
.is_internal_module(self.module_name.as_str());
// We sort warnings and errors to ensure they are emitted in a
// deterministic order, making them easier to test and debug, and to
// make the output predictable.
self.problems.sort();
let warnings = self.problems.take_warnings();
for warning in &warnings {
// TODO: remove this clone
self.warnings.emit(warning.clone());
}
let module = ast::Module {
documentation: documentation.clone(),
name: self.module_name.clone(),
definitions: typed_definitions,
names: type_names,
unused_definition_positions,
type_info: ModuleInterface {
name: self.module_name,
types,
types_value_constructors: types_constructors,
values,
accessors,
origin: self.origin,
package: self.package_config.name.clone(),
is_internal,
line_numbers: self.line_numbers,
src_path: self.src_path,
warnings,
minimum_required_version: self.minimum_required_version,
type_aliases,
documentation,
contains_echo: echo_found,
references: References {
imported_modules: env
.imported_modules
.values()
.map(|(_location, module)| module.name.clone())
.collect(),
value_references: env.references.value_references,
type_references: env.references.type_references,
},
inline_functions: self.inline_functions,
},
};
match Vec1::try_from_vec(self.problems.take_errors()) {
Err(_) => Outcome::Ok(module),
Ok(errors) => Outcome::PartialFailure(module, errors),
}
}
fn all_errors<T>(&mut self, error: Error) -> Outcome<T, Vec1<Error>> {
Outcome::TotalFailure(Vec1::from_vec_push(self.problems.take_errors(), error))
}
fn infer_module_constant(
&mut self,
c: UntypedModuleConstant,
environment: &mut Environment<'_>,
) -> TypedModuleConstant {
let ModuleConstant {
documentation: doc,
location,
name,
name_location,
annotation,
publicity,
value,
deprecation,
..
} = c;
self.check_name_case(name_location, &name, Named::Constant);
// If the constant's name matches an unqualified import, emit a warning:
self.check_shadow_import(&name, c.location, environment);
environment.references.begin_constant();
let definition = FunctionDefinition {
has_body: true,
has_erlang_external: false,
has_javascript_external: false,
};
let mut expr_typer = ExprTyper::new(environment, definition, &mut self.problems);
let typed_expr = expr_typer.infer_const(&annotation, *value);
let type_ = typed_expr.type_();
let implementations = expr_typer.implementations;
let minimum_required_version = expr_typer.minimum_required_version;
if minimum_required_version > self.minimum_required_version {
self.minimum_required_version = minimum_required_version;
}
match publicity {
Publicity::Private
| Publicity::Public
| Publicity::Internal {
attribute_location: None,
} => (),
Publicity::Internal {
attribute_location: Some(location),
} => self.track_feature_usage(FeatureKind::InternalAnnotation, location),
}
let variant = ValueConstructor {
publicity,
deprecation: deprecation.clone(),
variant: ValueConstructorVariant::ModuleConstant {
documentation: doc.as_ref().map(|(_, doc)| doc.clone()),
location,
literal: typed_expr.clone(),
module: self.module_name.clone(),
name: name.clone(),
implementations,
},
type_: type_.clone(),
};
environment.insert_variable(
name.clone(),
variant.variant.clone(),
type_.clone(),
publicity,
Deprecation::NotDeprecated,
);
environment.insert_module_value(name.clone(), variant);
environment
.references
.register_constant(name.clone(), location, publicity);
environment.references.register_value_reference(
environment.current_module.clone(),
name.clone(),
&name,
name_location,
ReferenceKind::Definition,
);
ModuleConstant {
documentation: doc,
location,
name,
name_location,
annotation,
publicity,
value: Box::new(typed_expr),
type_,
deprecation,
implementations,
}
}
// TODO: Extract this into a class of its own! Or perhaps it just wants some
// helper methods extracted. There's a whole bunch of state in this one
// function, and it does a handful of things.
fn infer_function(
&mut self,
f: UntypedFunction,
environment: &mut Environment<'_>,
) -> TypedFunction {
let Function {
documentation: doc,
location,
name,
publicity,
arguments,
body,
body_start,
return_annotation,
end_position: end_location,
deprecation,
external_erlang,
external_javascript,
return_type: (),
implementations: _,
purity: _,
} = f;
let (name_location, name) = name.expect("Function in a definition must be named");
let target = environment.target;
let body_location = body
.last()
.map(|statement| statement.location())
.unwrap_or(location);
let preregistered_fn = environment
.get_variable(&name)
.expect("Could not find preregistered type for function");
let field_map = preregistered_fn.field_map().cloned();
let preregistered_type = preregistered_fn.type_.clone();
let (prereg_arguments_types, prereg_return_type) = preregistered_type
.fn_types()
.expect("Preregistered type for fn was not a fn");
// Ensure that folks are not writing inline JavaScript expressions as
// the implementation for JS externals.
self.assert_valid_javascript_external(&name, external_javascript.as_ref(), location);
// Find the external implementation for the current target, if one has been given.
let external =
target_function_implementation(target, &external_erlang, &external_javascript);
// The function must have at least one implementation somewhere.
let has_implementation = self.ensure_function_has_an_implementation(
&body,
&external_erlang,
&external_javascript,
location,
);
if external.is_some() {
// There was an external implementation, so type annotations are
// mandatory as the Gleam implementation may be absent, and because we
// think you should always specify types for external functions for
// clarity + to avoid accidental mistakes.
self.ensure_annotations_present(&arguments, return_annotation.as_ref(), location);
}
let has_body = !body.is_empty();
let definition = FunctionDefinition {
has_body,
has_erlang_external: external_erlang.is_some(),
has_javascript_external: external_javascript.is_some(),
};
// We have already registered the function in the `register_value_from_function`
// method, but here we must set this as the current function again, so that anything
// we reference in the body of it can be tracked properly in the call graph.
environment.references.set_current_node(name.clone());
let mut typed_arguments = Vec::with_capacity(arguments.len());
// Infer the type using the preregistered args + return types as a starting point
let result = environment.in_new_scope(&mut self.problems, |environment, problems| {
for (argument, type_) in arguments.into_iter().zip(&prereg_arguments_types) {
let argument = argument.set_type(type_.clone());
// We track which arguments are discarded so we can provide nice
// error messages when someone
match &argument.names {
ast::ArgNames::Named { .. } | ast::ArgNames::NamedLabelled { .. } => (),
ast::ArgNames::Discard { name, location }
| ast::ArgNames::LabelledDiscard {
name,
name_location: location,
..
} => {
let _ = environment.discarded_names.insert(name.clone(), *location);
}
}
typed_arguments.push(argument);
}
let mut expr_typer = ExprTyper::new(environment, definition, problems);
expr_typer.hydrator = self
.hydrators
.remove(&name)
.expect("Could not find hydrator for fn");
let (arguments, body) = expr_typer.infer_fn_with_known_types(
Some(name.clone()),
typed_arguments.clone(),
body,
Some(prereg_return_type.clone()),
)?;
let arguments_types = arguments.iter().map(|a| a.type_.clone()).collect();
let return_type = body
.last()
.map_or(prereg_return_type.clone(), |last| last.type_());
let type_ = fn_(arguments_types, return_type);
Ok((
type_,
body,
expr_typer.implementations,
expr_typer.minimum_required_version,
expr_typer.purity,
))
});
// If we could not successfully infer the type etc information of the
// function then register the error and continue anaylsis using the best
// information that we have, so we can still learn about the rest of the
// module.
let (type_, body, implementations, required_version, purity) = match result {
Ok((type_, body, implementations, required_version, purity)) => {
(type_, body, implementations, required_version, purity)
}
Err(error) => {
self.problems.error(error);
let type_ = preregistered_type.clone();
let body = vec![Statement::Expression(TypedExpr::Invalid {
type_: prereg_return_type.clone(),
location: SrcSpan {
start: body_location.end,
end: body_location.end,
},
extra_information: None,
})];
let implementations = Implementations::supporting_all();
(
type_,
body,
implementations,
Version::new(1, 0, 0),
Purity::Impure,
)
}
};
if required_version > self.minimum_required_version {
self.minimum_required_version = required_version;
}
match publicity {
Publicity::Private
| Publicity::Public
| Publicity::Internal {
attribute_location: None,
} => (),
Publicity::Internal {
attribute_location: Some(location),
} => self.track_feature_usage(FeatureKind::InternalAnnotation, location),
}
if let Some((module, _, location)) = &external_javascript
&& module.contains('@')
{
self.track_feature_usage(FeatureKind::AtInJavascriptModules, *location)
}
// Assert that the inferred type matches the type of any recursive call
if let Err(error) = unify(preregistered_type.clone(), type_) {
self.problems.error(convert_unify_error(error, location));
}
// Ensure that the current target has an implementation for the function.
// This is done at the expression level while inferring the function body, but we do it again
// here as externally implemented functions may not have a Gleam body.
//
// We don't emit this error if there is no implementation, as this would
// have already emitted an error above.
if has_implementation
&& publicity.is_importable()
&& environment.target_support.is_enforced()
&& !implementations.supports(target)
// We don't emit this error if there is a body
// since this would be caught at the statement level
&& !has_body
{
self.problems.error(Error::UnsupportedPublicFunctionTarget {
name: name.clone(),
target,
location,
});
}
let variant = ValueConstructorVariant::ModuleFn {
documentation: doc.as_ref().map(|(_, doc)| doc.clone()),
name: name.clone(),
external_erlang: external_erlang
.as_ref()
.map(|(m, f, _)| (m.clone(), f.clone())),
external_javascript: external_javascript
.as_ref()
.map(|(m, f, _)| (m.clone(), f.clone())),
field_map,
module: environment.current_module.clone(),
arity: typed_arguments.len(),
location,
implementations,
purity,
};
environment.insert_variable(
name.clone(),
variant,
preregistered_type.clone(),
publicity,
deprecation.clone(),
);
environment.references.register_value_reference(
environment.current_module.clone(),
name.clone(),
&name,
name_location,
ReferenceKind::Definition,
);
let function = Function {
documentation: doc,
location,
name: Some((name_location, name.clone())),
publicity,
deprecation,
arguments: typed_arguments,
body_start,
end_position: end_location,
return_annotation,
return_type: preregistered_type
.return_type()
.expect("Could not find return type for fn"),
body,
external_erlang,
external_javascript,
implementations,
purity,
};
if let Some(inline_function) = inline::function_to_inlinable(
&environment.current_package,
&environment.current_module,
&function,
) {
_ = self.inline_functions.insert(name, inline_function);
}
function
}
fn assert_valid_javascript_external(
&mut self,
function_name: &EcoString,
external_javascript: Option<&(EcoString, EcoString, SrcSpan)>,
location: SrcSpan,
) {
use regex::Regex;
static MODULE: OnceLock<Regex> = OnceLock::new();
static FUNCTION: OnceLock<Regex> = OnceLock::new();
let (module, function) = match external_javascript {
None => return,
Some((module, function, _location)) => (module, function),
};
if !MODULE
.get_or_init(|| Regex::new("^[@a-zA-Z0-9\\./:_-]+$").expect("regex"))
.is_match(module)
{
self.problems.error(Error::InvalidExternalJavascriptModule {
location,
module: module.clone(),
name: function_name.clone(),
});
}
if !FUNCTION
.get_or_init(|| Regex::new("^[a-zA-Z_][a-zA-Z0-9_]*$").expect("regex"))
.is_match(function)
{
self.problems
.error(Error::InvalidExternalJavascriptFunction {
location,
function: function.clone(),
name: function_name.clone(),
});
}
}
fn ensure_annotations_present(
&mut self,
arguments: &[UntypedArg],
return_annotation: Option<&TypeAst>,
location: SrcSpan,
) {
for arg in arguments {
if arg.annotation.is_none() {
self.problems.error(Error::ExternalMissingAnnotation {
location: arg.location,
kind: MissingAnnotation::Parameter,
});
}
}
if return_annotation.is_none() {
self.problems.error(Error::ExternalMissingAnnotation {
location,
kind: MissingAnnotation::Return,
});
}
}
fn ensure_function_has_an_implementation(
&mut self,
body: &[UntypedStatement],
external_erlang: &Option<(EcoString, EcoString, SrcSpan)>,
external_javascript: &Option<(EcoString, EcoString, SrcSpan)>,
location: SrcSpan,
) -> bool {
match (external_erlang, external_javascript) {
(None, None) if body.is_empty() => {
self.problems.error(Error::NoImplementation { location });
false
}
_ => true,
}
}
fn analyse_import(
&mut self,
i: UntypedImport,
environment: &Environment<'_>,
) -> Option<TypedImport> {
let Import {
documentation,
location,
module,
as_name,
unqualified_values,
unqualified_types,
..
} = i;
// Find imported module
let Some(module_info) = environment.importable_modules.get(&module) else {
// Here the module being imported doesn't exist. We don't emit an
// error here as the `Importer` that was run earlier will have
// already emitted an error for this.
return None;
};
// Modules should belong to a package that is a direct dependency of the
// current package to be imported.
// Upgrade this to an error in future.
if module_info.package != GLEAM_CORE_PACKAGE_NAME
&& module_info.package != self.package_config.name
&& !self.direct_dependencies.contains_key(&module_info.package)
{
self.warnings.emit(Warning::TransitiveDependencyImported {
location,
module: module_info.name.clone(),
package: module_info.package.clone(),
})
}
Some(Import {
documentation,
location,
module,
as_name,
unqualified_values,
unqualified_types,
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/package_interface.rs | compiler-core/src/package_interface.rs | use std::{collections::HashMap, ops::Deref};
use ecow::EcoString;
use serde::Serialize;
#[cfg(test)]
mod tests;
use crate::{
io::ordered_map,
type_::{
self, Deprecation, Opaque, Type, TypeConstructor, TypeVar, TypeVariantConstructors,
ValueConstructorVariant, expression::Implementations,
},
};
use crate::build::Package;
/// The public interface of a package that gets serialised as a json object.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct PackageInterface {
name: EcoString,
version: EcoString,
/// The Gleam version constraint that the package specifies in its `gleam.toml`.
gleam_version_constraint: Option<EcoString>,
/// A map from module name to its interface.
#[serde(serialize_with = "ordered_map")]
modules: HashMap<EcoString, ModuleInterface>,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct ModuleInterface {
/// A vector with the lines composing the module's documentation (that is
/// every line preceded by a `////`).
documentation: Vec<EcoString>,
/// A map from type alias name to its interface.
#[serde(serialize_with = "ordered_map")]
type_aliases: HashMap<EcoString, TypeAliasInterface>,
/// A map from type name to its interface.
#[serde(serialize_with = "ordered_map")]
types: HashMap<EcoString, TypeDefinitionInterface>,
/// A map from constant name to its interface.
#[serde(serialize_with = "ordered_map")]
constants: HashMap<EcoString, ConstantInterface>,
/// A map from function name to its interface.
#[serde(serialize_with = "ordered_map")]
functions: HashMap<EcoString, FunctionInterface>,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct TypeDefinitionInterface {
/// The definition's documentation comment (that is every line preceded by
/// `///`).
documentation: Option<EcoString>,
/// If the definition has a deprecation annotation `@deprecated("...")`
/// this field will hold the reason of the deprecation.
deprecation: Option<DeprecationInterface>,
/// The number of type variables in the type definition.
/// ```gleam
/// /// This type has 2 type variables.
/// type Result(a, b) {
/// Ok(a)
/// Error(b)
/// }
/// ```
parameters: usize,
/// A list of the type constructors. If the type is marked as opaque it
/// won't have any visible constructors.
constructors: Vec<TypeConstructorInterface>,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct TypeConstructorInterface {
/// The constructor's documentation comment (that is every line preceded by
/// `///`).
documentation: Option<EcoString>,
/// The name of the type constructor.
/// ```gleam
/// pub type Box(a) {
/// MyBox(value: a)
/// //^^^^^ This is the constructor's name
/// }
/// ```
name: EcoString,
/// A list of the parameters needed by the constructor.
/// ```gleam
/// pub type Box(a) {
/// MyBox(value: a)
/// // ^^^^^^^^ This is the constructor's parameter.
/// }
/// ```
parameters: Vec<ParameterInterface>,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct TypeAliasInterface {
/// The constructor's documentation comment (that is every line preceded by
/// `///`).
documentation: Option<EcoString>,
/// If the alias has a deprecation annotation `@deprecated("...")`
/// this field will hold the reason of the deprecation.
deprecation: Option<DeprecationInterface>,
/// The number of type variables in the type alias definition.
/// ```gleam
/// /// This type alias has 2 type variables.
/// type Results(a, b) = List(Restul(a, b))
/// ```
parameters: usize,
/// The aliased type.
/// ```gleam
/// type Ints = List(Int)
/// // ^^^^^^^^^ This is the aliased type in a type alias.
/// ```
alias: TypeInterface,
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct ConstantInterface {
/// The constant's documentation comment (that is every line preceded by
/// `///`).
documentation: Option<EcoString>,
/// If the constant has a deprecation annotation `@deprecated("...")`
/// this field will hold the reason of the deprecation.
deprecation: Option<DeprecationInterface>,
implementations: ImplementationsInterface,
/// The constant's type.
#[serde(rename = "type")]
type_: TypeInterface,
}
/// A module's function. This differs from a simple `Fn` type as its arguments
/// can be labelled.
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct FunctionInterface {
/// The function's documentation comment (that is every line preceded by
/// `///`).
documentation: Option<EcoString>,
/// If the constant has a deprecation annotation `@deprecated("...")`
/// this field will hold the reason of the deprecation.
deprecation: Option<DeprecationInterface>,
implementations: ImplementationsInterface,
parameters: Vec<ParameterInterface>,
#[serde(rename = "return")]
return_: TypeInterface,
}
/// Informations about how a value is implemented.
#[derive(Debug, Serialize, Copy, Clone)]
#[serde(rename_all = "kebab-case")]
pub struct ImplementationsInterface {
/// Set to `true` if the const/function has a pure Gleam implementation
/// (that is, it never uses external code).
/// Being pure Gleam means that the function will support all Gleam
/// targets, even future ones that are not present to this day.
///
/// Consider the following function:
///
/// ```gleam
/// @external(erlang, "wibble", "wobble")
/// pub fn a_random_number() -> Int {
/// 4
/// // This is a default implementation.
/// }
/// ```
///
/// The implementations for this function will look like this:
///
/// ```json
/// {
/// gleam: true,
/// can_run_on_erlang: true,
/// can_run_on_javascript: true,
/// uses_erlang_externals: true,
/// uses_javascript_externals: false,
/// }
/// ```
///
/// - `gleam: true` means that the function has a pure Gleam implementation
/// and thus it can be used on all Gleam targets with no problems.
/// - `can_run_on_erlang: false` the function can be called on the Erlang
/// target.
/// - `can_run_on_javascript: true` the function can be called on the JavaScript
/// target.
/// - `uses_erlang_externals: true` means that the function will use Erlang
/// external code when compiled to the Erlang target.
/// - `uses_javascript_externals: false` means that the function won't use
/// JavaScript external code when compiled to JavaScript. The function can
/// still be used on the JavaScript target since it has a pure Gleam
/// implementation.
gleam: bool,
/// Set to `true` if the const/function is defined using Erlang external
/// code. That means that the function will use Erlang code through FFI when
/// compiled for the Erlang target.
uses_erlang_externals: bool,
/// Set to `true` if the const/function is defined using JavaScript external
/// code. That means that the function will use JavaScript code through FFI
/// when compiled for the JavaScript target.
///
/// Let's have a look at an example:
///
/// ```gleam
/// @external(javascript, "wibble", "wobble")
/// pub fn javascript_only() -> Int
/// ```
///
/// It's implementations field will look like this:
///
/// ```json
/// {
/// gleam: false,
/// can_run_on_erlang: false,
/// can_run_on_javascript: true,
/// uses_erlang_externals: false,
/// uses_javascript_externals: true,
/// }
/// ```
///
/// - `gleam: false` means that the function doesn't have a pure Gleam
/// implementations. This means that the function is only defined using
/// externals and can only be used on some targets.
/// - `can_run_on_erlang: false` the function cannot be called on the Erlang
/// target.
/// - `can_run_on_javascript: true` the function can be called on the JavaScript
/// target.
/// - `uses_erlang_externals: false` the function is not using external
/// Erlang code.
/// - `uses_javascript_externals: true` the function is using JavaScript
/// external code.
uses_javascript_externals: bool,
/// Whether the function can be called on the Erlang target, either due to a
/// pure Gleam implementation or an implementation that uses some Erlang
/// externals.
can_run_on_erlang: bool,
/// Whether the function can be called on the JavaScript target, either due
/// to a pure Gleam implementation or an implementation that uses some
/// JavaScript externals.
can_run_on_javascript: bool,
}
impl ImplementationsInterface {
fn from_implementations(implementations: &Implementations) -> ImplementationsInterface {
// It might look a bit silly to just recreate an identical structure with
// a different name. However, this way we won't inadvertently cause breaking
// changes if we were to change the names used by the `Implementations` struct
// that is used by the target tracking algorithm.
// By doing this we can change the target tracking and package interface
// separately!
//
// This pattern matching makes sure we will remember to handle any change
// in the `Implementations` struct.
let Implementations {
gleam,
uses_erlang_externals,
uses_javascript_externals,
can_run_on_erlang,
can_run_on_javascript,
} = implementations;
ImplementationsInterface {
gleam: *gleam,
uses_erlang_externals: *uses_erlang_externals,
uses_javascript_externals: *uses_javascript_externals,
can_run_on_erlang: *can_run_on_erlang,
can_run_on_javascript: *can_run_on_javascript,
}
}
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct DeprecationInterface {
/// The reason for the deprecation.
message: EcoString,
}
impl DeprecationInterface {
fn from_deprecation(deprecation: &Deprecation) -> Option<DeprecationInterface> {
match deprecation {
Deprecation::NotDeprecated => None,
Deprecation::Deprecated { message } => Some(DeprecationInterface {
message: message.clone(),
}),
}
}
}
#[derive(Serialize, Debug)]
#[serde(tag = "kind")]
#[serde(rename_all = "kebab-case")]
pub enum TypeInterface {
/// A tuple type like `#(Int, Float)`.
Tuple {
/// The types composing the tuple.
elements: Vec<TypeInterface>,
},
/// A function type like `fn(Int, String) -> String`.
Fn {
parameters: Vec<TypeInterface>,
#[serde(rename = "return")]
return_: Box<TypeInterface>,
},
/// A type variable.
/// ```gleam
/// pub fn wibble(value: a) -> a {}
/// // ^ This is a type variable.
/// ```
Variable { id: u64 },
/// A custom named type.
/// ```gleam
/// let value: Bool = True
/// ^^^^ This is a named type.
/// ```
///
/// All prelude types - like Bool, String, etc. - are named types as well.
/// In that case their package is an empty string `""` and their module
/// name is the string `"gleam"`.
///
Named {
name: EcoString,
/// The package the type is defined in.
package: EcoString,
/// The module the type is defined in.
module: EcoString,
/// The type parameters that might be needed to define a named type.
/// ```gleam
/// let result: Result(Int, e) = Ok(1)
/// // ^^^^^^ The `Result` named type has 2 parameters.
/// // In this case it's the Int type and a type
/// // variable.
/// ```
parameters: Vec<TypeInterface>,
},
}
#[derive(Serialize, Debug)]
#[serde(rename_all = "kebab-case")]
pub struct ParameterInterface {
/// If the parameter is labelled this will hold the label's name.
/// ```gleam
/// pub fn repeat(times n: Int) -> List(Int)
/// // ^^^^^ This is the parameter's label.
/// ```
label: Option<EcoString>,
/// The parameter's type.
/// ```gleam
/// pub fn repeat(times n: Int) -> List(Int)
/// // ^^^ This is the parameter's type.
/// ```
#[serde(rename = "type")]
type_: TypeInterface,
}
impl PackageInterface {
pub fn from_package(
package: &Package,
cached_modules: &im::HashMap<EcoString, type_::ModuleInterface>,
) -> PackageInterface {
PackageInterface {
name: package.config.name.clone(),
version: package.config.version.to_string().into(),
gleam_version_constraint: package
.config
.gleam_version
.clone()
.map(|version| EcoString::from(version.hex().to_string())),
modules: package
.modules
.iter()
.map(|module| &module.ast.type_info)
.chain(
package
.cached_module_names
.iter()
.filter_map(|name| cached_modules.get(name)),
)
.filter(|module| !package.config.is_internal_module(module.name.as_str()))
.map(|module| (module.name.clone(), ModuleInterface::from_interface(module)))
.collect(),
}
}
}
impl ModuleInterface {
fn from_interface(interface: &type_::ModuleInterface) -> ModuleInterface {
let mut types = HashMap::new();
let mut type_aliases = HashMap::new();
let mut constants = HashMap::new();
let mut functions = HashMap::new();
for (name, constructor) in interface.types.iter().filter(|(name, c)| {
// Aliases are stored separately
c.publicity.is_public() && !interface.type_aliases.contains_key(*name)
}) {
let mut id_map = IdMap::new();
let TypeConstructor {
deprecation,
documentation,
..
} = constructor;
for typed_parameter in &constructor.parameters {
id_map.add_type_variable_id(typed_parameter.as_ref());
}
let _ = types.insert(
name.clone(),
TypeDefinitionInterface {
documentation: documentation.clone(),
deprecation: DeprecationInterface::from_deprecation(deprecation),
parameters: interface
.types
.get(&name.clone())
.map_or(vec![], |t| t.parameters.clone())
.len(),
constructors: match interface.types_value_constructors.get(&name.clone()) {
Some(TypeVariantConstructors {
variants,
opaque: Opaque::NotOpaque,
..
}) => variants
.iter()
.map(|constructor| TypeConstructorInterface {
documentation: constructor.documentation.clone(),
name: constructor.name.clone(),
parameters: constructor
.parameters
.iter()
.map(|arg| ParameterInterface {
label: arg.label.clone(),
// We share the same id_map between each step so that the
// incremental ids assigned are consisten with each other
type_: from_type_helper(arg.type_.as_ref(), &mut id_map),
})
.collect(),
})
.collect(),
Some(_) | None => Vec::new(),
},
},
);
}
for (name, alias) in interface
.type_aliases
.iter()
.filter(|(_, v)| v.publicity.is_public())
{
let _ = type_aliases.insert(
name.clone(),
TypeAliasInterface {
documentation: alias.documentation.clone(),
deprecation: DeprecationInterface::from_deprecation(&alias.deprecation),
parameters: alias.arity,
alias: TypeInterface::from_type(&alias.type_),
},
);
}
for (name, value) in interface
.values
.iter()
.filter(|(_, v)| v.publicity.is_public())
{
match (value.type_.as_ref(), value.variant.clone()) {
(
Type::Fn {
arguments,
return_: return_type,
},
ValueConstructorVariant::ModuleFn {
documentation,
implementations,
field_map,
..
},
) => {
let mut id_map = IdMap::new();
let reverse_field_map = field_map
.as_ref()
.map(|field_map| field_map.indices_to_labels())
.unwrap_or_default();
let _ = functions.insert(
name.clone(),
FunctionInterface {
implementations: ImplementationsInterface::from_implementations(
&implementations,
),
deprecation: DeprecationInterface::from_deprecation(&value.deprecation),
documentation,
parameters: arguments
.iter()
.enumerate()
.map(|(index, type_)| ParameterInterface {
label: reverse_field_map
.get(&(index as u32))
.map(|label| (*label).clone()),
type_: from_type_helper(type_, &mut id_map),
})
.collect(),
return_: from_type_helper(return_type, &mut id_map),
},
);
}
(
type_,
ValueConstructorVariant::ModuleConstant {
documentation,
implementations,
..
},
) => {
let _ = constants.insert(
name.clone(),
ConstantInterface {
implementations: ImplementationsInterface::from_implementations(
&implementations,
),
type_: TypeInterface::from_type(type_),
deprecation: DeprecationInterface::from_deprecation(&value.deprecation),
documentation,
},
);
}
_ => {}
}
}
ModuleInterface {
documentation: interface.documentation.clone(),
types,
type_aliases,
constants,
functions,
}
}
}
impl TypeInterface {
fn from_type(type_: &Type) -> TypeInterface {
from_type_helper(type_, &mut IdMap::new())
}
}
/// Turns a type into its interface, an `IdMap` is needed to make sure that all
/// the type variables' ids that appear in the type are mapped to an incremental
/// number and consistent with each other (that is, two types variables that
/// have the same id will also have the same incremental number in the end).
fn from_type_helper(type_: &Type, id_map: &mut IdMap) -> TypeInterface {
match type_ {
Type::Fn { arguments, return_ } => TypeInterface::Fn {
parameters: arguments
.iter()
.map(|argument| from_type_helper(argument.as_ref(), id_map))
.collect(),
return_: Box::new(from_type_helper(return_, id_map)),
},
Type::Tuple { elements } => TypeInterface::Tuple {
elements: elements
.iter()
.map(|element| from_type_helper(element.as_ref(), id_map))
.collect(),
},
Type::Var { type_ } => match type_
.as_ref()
.try_borrow()
.expect("borrow type after inference")
.deref()
{
TypeVar::Link { type_ } => from_type_helper(type_, id_map),
// Since package serialisation happens after inference there
// should be no unbound type variables.
// TODO: This branch should be `unreachable!()` but because of
// https://github.com/gleam-lang/gleam/issues/2533
// we sometimes end up with those in top level
// definitions.
// However, `Unbound` and `Generic` ids are generated
// using the same generator so we have no problem treating
// unbound variables as generic ones since ids will never
// overlap.
// Once #2533 is closed this branch can be turned back to
// be unreachable!().
TypeVar::Unbound { id } | TypeVar::Generic { id } => TypeInterface::Variable {
id: id_map.map_id(*id),
},
},
Type::Named {
name,
module,
arguments,
package,
..
} => TypeInterface::Named {
name: name.clone(),
package: package.clone(),
module: module.clone(),
parameters: arguments
.iter()
.map(|argument| from_type_helper(argument.as_ref(), id_map))
.collect(),
},
}
}
/// This is a map that is used to map type variable id's to progressive numbers
/// starting from 0.
/// After type inference the ids associated with type variables can be quite
/// high and are not the best to produce a human/machine readable output.
///
/// Imagine a function like this one: `pub fn wibble(item: a, rest: b) -> c`
/// What we want here is for type variables to have increasing ids starting from
/// 0: `a` with id `0`, `b` with id `1` and `c` with id `2`.
///
/// This map allows us to keep track of the ids we've run into and map those to
/// their incremental counterpart starting from 0.
struct IdMap {
next_id: u64,
ids: HashMap<u64, u64>,
}
impl IdMap {
/// Create a new map that will assign id numbers starting from 0.
fn new() -> IdMap {
IdMap {
next_id: 0,
ids: HashMap::new(),
}
}
/// Map an id to its mapped counterpart starting from 0. If an id has never
/// been seen before it will be assigned a new incremental number.
fn map_id(&mut self, id: u64) -> u64 {
match self.ids.get(&id) {
Some(mapped_id) => *mapped_id,
None => {
let mapped_id = self.next_id;
let _ = self.ids.insert(id, mapped_id);
self.next_id += 1;
mapped_id
}
}
}
/// If the type is a type variable, and has not been seen before, it will
/// be assigned to a new incremental number.
fn add_type_variable_id(&mut self, type_: &Type) {
match type_ {
// These types have no id to add to the map.
Type::Named { .. } | Type::Fn { .. } | Type::Tuple { .. } => (),
// If the type is actually a type variable whose id needs to be mapped.
Type::Var { type_ } => match type_
.as_ref()
.try_borrow()
.expect("borrow type after inference")
.deref()
{
TypeVar::Link { .. } => (),
TypeVar::Unbound { id } | TypeVar::Generic { id } => {
let _ = self.map_id(*id);
}
},
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/encryption.rs | compiler-core/src/encryption.rs | use thiserror::Error;
pub fn encrypt_with_passphrase(
message: &[u8],
passphrase: &str,
) -> Result<String, age::EncryptError> {
let passphrase = age::secrecy::SecretString::from(passphrase);
let recipient = age::scrypt::Recipient::new(passphrase.clone());
let encrypted = age::encrypt_and_armor(&recipient, message)?;
Ok(encrypted)
}
// the function `decrypt_with_passphrase` has two possible failure cases:
// - when decryption fails
// - when the data was decrypted succesfully but the result is not UTF-8 valid
#[derive(Error, Debug)]
pub enum DecryptError {
#[error("unable to decrypt message: {0}")]
Decrypt(#[from] age::DecryptError),
#[error("decrypted message is not UTF-8 valid: {0}")]
Io(#[from] std::string::FromUtf8Error),
}
pub fn decrypt_with_passphrase(
encrypted_message: &[u8],
passphrase: &str,
) -> Result<String, DecryptError> {
let passphrase = age::secrecy::SecretString::from(passphrase);
let identity = age::scrypt::Identity::new(passphrase);
let decrypted = age::decrypt(&identity, encrypted_message)?;
let decrypted = String::from_utf8(decrypted)?;
Ok(decrypted)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/inline.rs | compiler-core/src/inline.rs | //! This module implements the function inlining optimisation. This allows
//! function calls to be inlined at the callsite, and replaced with the contents
//! of the function which is being called.
//!
//! Function inlining is useful for two main reasons:
//! - It removes the overhead of calling other functions and jumping around
//! execution too much
//! - It removes the barrier of the function call between the code around the
//! call, and the code inside the called function.
//!
//! For example, the following Gleam code make heavy use of `use` sugar and higher
//! order functions:
//!
//! ```gleam
//! pub fn try_sum(list: List(Result(String, Nil)), sum: Int) -> Result(Int, Nil) {
//! use <- bool.guard(when: sum >= 1000, return: Ok(sum))
//! case list {
//! [] -> Ok(sum)
//! [first, ..rest] -> {
//! use number <- result.try(int.parse(first))
//! try_sum(rest, sum + number)
//! }
//! }
//! }
//! ```
//!
//! This can make the code easier to read, but it normally would have a performance
//! cost. There are two called functions, and two implicit anonymous functions.
//! This function is also not tail recursive, as it uses higher order functions
//! inside its body.
//!
//! However, with function inlining, the above code can be optimised to:
//!
//! ```gleam
//! pub fn try_sum(list: List(Result(String, Nil)), sum: Int) -> Result(Int, Nil) {
//! case sum >= 1000 {
//! True -> Ok(sum)
//! False -> case list {
//! [] -> Ok(sum)
//! [first, ..rest] -> {
//! case int.parse(first) {
//! Ok(number) -> try_sum(rest, sum + number)
//! Error(error) -> Error(error)
//! }
//! }
//! }
//! }
//! }
//! ```
//!
//! Which now has no extra function calls, and is tail recursive!
//!
//! The process of function inlining is quite simple really. It is implemented
//! using an AST folder, which traverses each node of the AST, and potentially
//! alters it as it goes.
//!
//! Every time we encounter a function call, we decide whether or not we can
//! inline it. For now, the criteria for inlining is very simple, although a
//! more complex heuristic-based approach will likely be implemented in the
//! future. For now though, a function can be inlined if:
//! It is a standard library function within the hardcoded list - which can be
//! found in the `inline_function` function - or, it is an anonymous function.
//!
//! Inlining anonymous functions allows us to:
//! - Remove calls to parameters of higher-order functions once those higher-
//! order functions have been inlined. For example, the following example using
//! `result.map`:
//! ```gleam
//! result.map(Ok(10), fn(x) { x + 1 })
//! ```
//!
//! Without inlining of anonymous function would be turned into:
//! ```gleam
//! case Ok(10) {
//! Ok(value) -> Ok(fn(x) { x + 1 }(value))
//! Error(error) -> Error(error)
//! }
//! ```
//!
//! However if we inline anonymous functions also, we remove every call, and
//! so it becomes:
//!
//! ```gleam
//! case Ok(10) {
//! Ok(value) -> Ok(value + 1)
//! Error(error) -> Error(error)
//! }
//! ```
//!
//! - Remove calls to anonymous functions in pipelines. Sometimes, an anonymous
//! function is used in a pipeline, which can sometimes be the result of an
//! expanded function capture. For example:
//!
//! ```gleam
//! "10" |> int.parse |> result.unwrap(0) |> fn(x) { x * x } |> something_else
//! ```
//!
//! This can now be desugared to:
//! ```gleam
//! let _pipe1 = "10"
//! let _pipe2 = int.parse(_pipe1)
//! let _pipe3 = result.unwrap(_pipe2, 0)
//! let _pipe4 = _pipe3 * _pipe3
//! something_else(_pipe4)
//! ```
//!
//! See documentation of individual functions to explain better how the process
//! works.
//!
#![allow(dead_code)]
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use ecow::{EcoString, eco_format};
use itertools::Itertools;
use vec1::Vec1;
use crate::{
STDLIB_PACKAGE_NAME,
analyse::Inferred,
ast::{
self, ArgNames, Assert, AssignName, Assignment, AssignmentKind, BitArrayOption,
BitArraySegment, BitArraySize, CallArg, Clause, FunctionLiteralKind, Pattern,
PipelineAssignmentKind, Publicity, SrcSpan, Statement, TailPattern, TypedArg, TypedAssert,
TypedAssignment, TypedBitArraySize, TypedClause, TypedDefinitions, TypedExpr,
TypedExprBitArraySegment, TypedFunction, TypedModule, TypedPattern,
TypedPipelineAssignment, TypedStatement, TypedUse, visit::Visit,
},
exhaustiveness::{Body, CompiledCase, Decision},
type_::{
self, Deprecation, ModuleInterface, ModuleValueConstructor, PRELUDE_MODULE_NAME,
PatternConstructor, Type, TypedCallArg, ValueConstructor, ValueConstructorVariant,
collapse_links,
error::VariableOrigin,
expression::{Implementations, Purity},
},
};
/// Perform function inlining across an entire module, applying it to each
/// individual function.
pub fn module(
mut module: TypedModule,
modules: &im::HashMap<EcoString, ModuleInterface>,
) -> TypedModule {
let mut inliner = Inliner::new(modules);
module.definitions = TypedDefinitions {
functions: module
.definitions
.functions
.into_iter()
.map(|function| inliner.function(function))
.collect(),
..module.definitions
};
module
}
struct Inliner<'a> {
/// Importable modules, containing information about functions which can be
/// inlined
modules: &'a im::HashMap<EcoString, ModuleInterface>,
/// Any variables which can be inlined. This is used when inlining the body
/// of function calls. Let's look at an example inlinable function:
/// ```gleam
/// pub fn add(a, b) {
/// a + b
/// }
/// ```
/// If it is called - `add(1, 2)` - it can be inlined to the following:
/// ```gleam
/// {
/// let a = 1
/// let b = 2
/// a + b
/// }
/// ```
///
/// However, this can be inlined further. Since `a` and `b` are only used
/// once each in the body, the whole expression can be reduced to `1 + 2`.
///
/// In the above example, this variable would contain `{a: 1, b: 2}`,
/// indicating the names of the variables to be inlined, as well as the
/// values to replace them with.
inline_variables: HashMap<EcoString, TypedExpr>,
/// The number we append to variable names in order to ensure uniqueness.
variable_number: usize,
/// Set of in-scope variables, used to determine when a conflict between
/// variable names occurs during inlining.
in_scope: HashSet<EcoString>,
/// If two variables conflict in names during inlining, we need to rename
/// one to avoid the conflict. Any variables renamed this way are stored
/// here.
renamed_variables: im::HashMap<EcoString, EcoString>,
/// The current position, whether we are inside the body of an inlined
/// function or not.
position: Position,
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Position {
RegularFunction,
InlinedFunction,
}
impl Inliner<'_> {
fn new(modules: &im::HashMap<EcoString, ModuleInterface>) -> Inliner<'_> {
Inliner {
modules,
inline_variables: HashMap::new(),
variable_number: 0,
renamed_variables: im::HashMap::new(),
in_scope: HashSet::new(),
position: Position::RegularFunction,
}
}
/// Defines a variable in the current scope, renaming it if necessary.
/// Currently, this duplicates work performed in the code generators, where
/// variables are renamed in a similar way. But since inlining can change
/// scope boundaries, it needs to be performed here too. Ideally, we would
/// move all the deduplicating logic from the code generators to here where
/// we perform inlining, but that is a fairly large item of work.
fn define_variable(&mut self, name: EcoString) -> EcoString {
let unique_in_scope = self.in_scope.insert(name.clone());
// If the variable name is already defined, and we are inlining a function,
// that means there is a potential conflict in names and we need to rename
// the variable.
if !unique_in_scope && self.position == Position::InlinedFunction {
// Prefixing the variable name with `_inline_` ensures it does
// not conflict with other defined variables.
let new_name = eco_format!("_inline_{name}_{}", self.variable_number);
self.variable_number += 1;
_ = self.renamed_variables.insert(name, new_name.clone());
new_name
} else {
name
}
}
/// Get the name we are using for a variable, in case it is renamed.
fn variable_name(&self, name: EcoString) -> EcoString {
self.renamed_variables.get(&name).cloned().unwrap_or(name)
}
fn function(&mut self, mut function: TypedFunction) -> TypedFunction {
for argument in function.arguments.iter() {
match &argument.names {
ArgNames::Discard { .. } | ArgNames::LabelledDiscard { .. } => {}
ArgNames::Named { name, .. } | ArgNames::NamedLabelled { name, .. } => {
_ = self.in_scope.insert(name.clone());
}
}
}
function.body = function
.body
.into_iter()
.map(|statement| self.statement(statement))
.collect_vec();
function
}
fn statement(&mut self, statement: TypedStatement) -> TypedStatement {
match statement {
Statement::Expression(expression_ast) => {
Statement::Expression(self.expression(expression_ast))
}
Statement::Assignment(assignment_ast) => {
Statement::Assignment(Box::new(self.assignment(*assignment_ast)))
}
Statement::Use(use_ast) => Statement::Use(self.use_(use_ast)),
Statement::Assert(assert_ast) => Statement::Assert(self.assert(assert_ast)),
}
}
fn assert(&mut self, assert: TypedAssert) -> TypedAssert {
let Assert {
location,
value,
message,
} = assert;
Assert {
location,
value: self.expression(value),
message: message.map(|expression| self.expression(expression)),
}
}
fn use_(&mut self, mut use_: TypedUse) -> TypedUse {
use_.call = self.boxed_expression(use_.call);
use_
}
fn assignment(&mut self, assignment: TypedAssignment) -> TypedAssignment {
let Assignment {
location,
value,
pattern,
kind,
annotation,
compiled_case,
} = assignment;
Assignment {
location,
value: self.expression(value),
pattern: self.register_pattern_variables(pattern),
kind: self.assignment_kind(kind),
annotation,
compiled_case,
}
}
/// Register variables defined in a pattern so we correctly keep track of
/// the scope, and rename any which conflict with existing variables.
fn register_pattern_variables(&mut self, pattern: TypedPattern) -> TypedPattern {
match pattern {
Pattern::Int { .. }
| Pattern::Float { .. }
| Pattern::String { .. }
| Pattern::Discard { .. }
| Pattern::Invalid { .. } => pattern,
Pattern::Variable {
location,
name,
type_,
origin,
} => Pattern::Variable {
location,
name: self.define_variable(name),
type_,
origin,
},
Pattern::BitArraySize(size) => Pattern::BitArraySize(self.bit_array_size(size)),
Pattern::Assign {
name,
location,
pattern,
} => Pattern::Assign {
name: self.define_variable(name),
location,
pattern: Box::new(self.register_pattern_variables(*pattern)),
},
Pattern::List {
location,
elements,
tail,
type_,
} => Pattern::List {
location,
elements: elements
.into_iter()
.map(|element| self.register_pattern_variables(element))
.collect(),
tail: tail.map(|tail| {
Box::new(TailPattern {
location: tail.location,
pattern: self.register_pattern_variables(tail.pattern),
})
}),
type_,
},
Pattern::Constructor {
location,
name_location,
name,
arguments,
module,
constructor,
spread,
type_,
} => Pattern::Constructor {
location,
name_location,
name,
arguments: arguments
.into_iter()
.map(
|CallArg {
label,
location,
value,
implicit,
}| CallArg {
label,
location,
value: self.register_pattern_variables(value),
implicit,
},
)
.collect(),
module,
constructor,
spread,
type_,
},
Pattern::Tuple { location, elements } => Pattern::Tuple {
location,
elements: elements
.into_iter()
.map(|element| self.register_pattern_variables(element))
.collect(),
},
Pattern::BitArray { location, segments } => Pattern::BitArray {
location,
segments: segments
.into_iter()
.map(|segment| {
self.bit_array_segment(segment, Self::register_pattern_variables)
})
.collect(),
},
Pattern::StringPrefix {
location,
left_location,
left_side_assignment,
right_location,
left_side_string,
right_side_assignment,
} => Pattern::StringPrefix {
location,
left_location,
left_side_assignment: left_side_assignment
.map(|(name, location)| (self.define_variable(name), location)),
right_location,
left_side_string,
right_side_assignment: match right_side_assignment {
AssignName::Variable(name) => AssignName::Variable(self.define_variable(name)),
AssignName::Discard(name) => AssignName::Discard(name),
},
},
}
}
fn bit_array_size(&mut self, size: TypedBitArraySize) -> TypedBitArraySize {
match size {
BitArraySize::Int { .. } => size,
BitArraySize::Variable {
location,
name,
constructor,
type_,
} => BitArraySize::Variable {
location,
name: self.variable_name(name),
constructor,
type_,
},
BitArraySize::BinaryOperator {
location,
operator,
left,
right,
} => BitArraySize::BinaryOperator {
location,
operator,
left: Box::new(self.bit_array_size(*left)),
right: Box::new(self.bit_array_size(*right)),
},
BitArraySize::Block { location, inner } => BitArraySize::Block {
location,
inner: Box::new(self.bit_array_size(*inner)),
},
}
}
fn assignment_kind(&mut self, kind: AssignmentKind<TypedExpr>) -> AssignmentKind<TypedExpr> {
match kind {
AssignmentKind::Let | AssignmentKind::Generated => kind,
AssignmentKind::Assert {
location,
assert_keyword_start,
message,
} => AssignmentKind::Assert {
location,
assert_keyword_start,
message: message.map(|expression| self.expression(expression)),
},
}
}
fn boxed_expression(&mut self, boxed: Box<TypedExpr>) -> Box<TypedExpr> {
Box::new(self.expression(*boxed))
}
fn expressions(&mut self, expressions: Vec<TypedExpr>) -> Vec<TypedExpr> {
expressions
.into_iter()
.map(|expression| self.expression(expression))
.collect()
}
/// Perform inlining over an expression. This function is recursive, as
/// expressions can be deeply nested. Most expressions just recursively
/// call this function on each of their component parts, but some have
/// special handling.
fn expression(&mut self, mut expression: TypedExpr) -> TypedExpr {
match expression {
TypedExpr::Int { .. }
| TypedExpr::Float { .. }
| TypedExpr::String { .. }
| TypedExpr::Fn { .. }
| TypedExpr::ModuleSelect { .. }
| TypedExpr::Invalid { .. } => expression,
TypedExpr::Var {
ref constructor,
ref mut name,
..
} => match &constructor.variant {
// If this variable can be inlined, replace it with its value.
// See the `inline_variables` documentation for an explanation.
ValueConstructorVariant::LocalVariable { .. } => {
// We remove the variable as inlined variables can only be
// inlined once. `inline_variables` only contains variables
// which we have already checked are possible to inline, as
// we check for variables which are only used once when converting
// to an `InlinableFunction`.
match self.inline_variables.remove(name) {
Some(inlined_expression) => inlined_expression,
None => match self.renamed_variables.get(name) {
Some(new_name) => {
*name = new_name.clone();
expression
}
None => expression,
},
}
}
ValueConstructorVariant::ModuleConstant { .. }
| ValueConstructorVariant::ModuleFn { .. }
| ValueConstructorVariant::Record { .. } => expression,
},
TypedExpr::Block {
location,
statements,
} => TypedExpr::Block {
location,
statements: statements.mapped(|statement| self.statement(statement)),
},
TypedExpr::NegateBool { location, value } => TypedExpr::NegateBool {
location,
value: self.boxed_expression(value),
},
TypedExpr::NegateInt { location, value } => TypedExpr::NegateInt {
location,
value: self.boxed_expression(value),
},
TypedExpr::Pipeline {
location,
first_value,
assignments,
finally,
finally_kind,
} => self.pipeline(location, first_value, assignments, finally, finally_kind),
TypedExpr::List {
location,
type_,
elements,
tail,
} => TypedExpr::List {
location,
type_,
elements: self.expressions(elements),
tail: tail.map(|boxed_expression| self.boxed_expression(boxed_expression)),
},
TypedExpr::Call {
location,
type_,
fun,
arguments,
} => self.call(location, type_, fun, arguments),
TypedExpr::BinOp {
location,
type_,
name,
name_location,
left,
right,
} => TypedExpr::BinOp {
location,
type_,
name,
name_location,
left: self.boxed_expression(left),
right: self.boxed_expression(right),
},
TypedExpr::Case {
location,
type_,
subjects,
clauses,
compiled_case,
} => self.case(location, type_, subjects, clauses, compiled_case),
TypedExpr::RecordAccess {
location,
field_start,
type_,
label,
index,
record,
documentation,
} => TypedExpr::RecordAccess {
location,
field_start,
type_,
label,
index,
record: self.boxed_expression(record),
documentation,
},
TypedExpr::PositionalAccess {
location,
type_,
index,
record,
} => TypedExpr::PositionalAccess {
location,
type_,
index,
record: self.boxed_expression(record),
},
TypedExpr::Tuple {
location,
type_,
elements,
} => TypedExpr::Tuple {
location,
type_,
elements: self.expressions(elements),
},
TypedExpr::TupleIndex {
location,
type_,
index,
tuple,
} => TypedExpr::TupleIndex {
location,
type_,
index,
tuple: self.boxed_expression(tuple),
},
TypedExpr::Todo {
location,
message,
kind,
type_,
} => TypedExpr::Todo {
location,
message: message.map(|boxed_expression| self.boxed_expression(boxed_expression)),
kind,
type_,
},
TypedExpr::Panic {
location,
message,
type_,
} => TypedExpr::Panic {
location,
message: message.map(|boxed_expression| self.boxed_expression(boxed_expression)),
type_,
},
TypedExpr::Echo {
location,
type_,
expression,
message,
} => TypedExpr::Echo {
location,
expression: expression.map(|expression| self.boxed_expression(expression)),
message: message.map(|message| self.boxed_expression(message)),
type_,
},
TypedExpr::BitArray {
location,
type_,
segments,
} => self.bit_array(location, type_, segments),
TypedExpr::RecordUpdate {
location,
type_,
record_assignment,
constructor,
arguments,
} => TypedExpr::RecordUpdate {
location,
type_,
record_assignment: record_assignment
.map(|assignment| Box::new(self.assignment(*assignment))),
constructor: self.boxed_expression(constructor),
arguments: self.arguments(arguments),
},
}
}
fn arguments(&mut self, arguments: Vec<TypedCallArg>) -> Vec<TypedCallArg> {
arguments
.into_iter()
.map(
|TypedCallArg {
label,
location,
value,
implicit,
}| TypedCallArg {
label,
location,
value: self.expression(value),
implicit,
},
)
.collect()
}
/// Where the magic happens. First, we check the left-hand side of the call
/// to see if it's something we can inline. If not, we continue to walk the
/// tree like all the other expressions do. If it can be inlined, we follow
/// a three-step process:
///
/// - Inlining: Here, we replace the reference to the function with an
/// anonymous function with the same contents. If the left-hand side is
/// already an anonymous function, we skip this step.
///
/// - Beta reduction: The call to the anonymous function it transformed into
/// a block with assignments for each argument at the beginning
///
/// - Optimisation: We then recursively optimise the block. This allows us
/// to, for example, inline anonymous functions passed to higher-order
/// functions.
///
/// Here is an example of inlining `result.map`:
///
/// Initial code:
/// ```gleam
/// let x = Ok(10)
/// result.map(x, fn(x) {
/// let y = x + 4
/// int.to_string(y)
/// })
/// ```
///
/// After inlining:
/// ```gleam
/// let x = Ok(10)
/// fn(result, function) {
/// case result {
/// Ok(value) -> Ok(function(value))
/// Error(error) -> Error(error)
/// }
/// }(x, fn(x) {
/// let y = x + 4
/// int.to_string(y)
/// })
/// ```
///
/// After beta reduction:
/// ```gleam
/// let x = Ok(10)
/// {
/// let result = x
/// let function = fn(x) {
/// let y = x + 4
/// int.to_string(y)
/// }
/// case result {
/// Ok(value) -> Ok(function(value))
/// Error(error) -> Error(error)
/// }
/// }
/// ```
///
/// And finally, after the final optimising pass, where this inlining process
/// is repeated:
/// ```gleam
/// let x = Ok(10)
/// case x {
/// Ok(value) -> Ok({
/// let y = x + 4
/// int.to_string(y)
/// })
/// Error(error) -> Error(error)
/// }
/// ```
///
fn call(
&mut self,
location: SrcSpan,
type_: Arc<Type>,
function: Box<TypedExpr>,
arguments: Vec<TypedCallArg>,
) -> TypedExpr {
let arguments = self.arguments(arguments);
// First, we traverse the left-hand side of this call. If this is called
// inside another inlined function, this could potentially inline an
// argument, allowing further inlining.
let function = self.expression(*function);
// If the left-hand side is in a block for some reason, for example
// `{ fn(x) { x + 1 } }(10)`, we still want to be able to inline it.
let function = expand_block(function);
let function = match function {
TypedExpr::Var {
ref constructor,
ref name,
..
} => match &constructor.variant {
ValueConstructorVariant::ModuleFn { module, .. } => {
// If the function is in the list of inlinable functions in
// the module it belongs to, we can inline it!
if let Some(function) = self
.modules
.get(module)
.and_then(|module| module.inline_functions.get(name))
{
// First, we do the actual inlining, by converting it to
// an anonymous function.
let (parameters, body) = function.to_anonymous_function();
// Then, we perform beta reduction, inlining the call to
// the anonymous function.
return self.inline_anonymous_function_call(
¶meters,
arguments,
body,
&function.inlinable_parameters,
);
} else {
function
}
}
// We cannot inline local variables or constants, as we do not
// have enough information to inline them. Records are not actually
// function calls, so they also cannot be inlined.
ValueConstructorVariant::LocalVariable { .. }
| ValueConstructorVariant::ModuleConstant { .. }
| ValueConstructorVariant::Record { .. } => function,
},
TypedExpr::ModuleSelect {
ref constructor,
label: ref name,
ref module_name,
..
} => match constructor {
// We use the same logic here as for `TypedExpr::Var` above.
ModuleValueConstructor::Fn { .. } => {
if let Some(function) = self
.modules
.get(module_name)
.and_then(|module| module.inline_functions.get(name))
{
let (parameters, body) = function.to_anonymous_function();
return self.inline_anonymous_function_call(
¶meters,
arguments,
body,
&function.inlinable_parameters,
);
} else {
function
}
}
ModuleValueConstructor::Record { .. } | ModuleValueConstructor::Constant { .. } => {
function
}
},
// Direct calls to anonymous functions can always be inlined
TypedExpr::Fn {
arguments: parameters,
body,
..
} => {
let inlinable_parameters = find_inlinable_parameters(¶meters, &body);
return self.inline_anonymous_function_call(
¶meters,
arguments,
body,
&inlinable_parameters,
);
}
TypedExpr::Int { .. }
| TypedExpr::Float { .. }
| TypedExpr::String { .. }
| TypedExpr::Block { .. }
| TypedExpr::Pipeline { .. }
| TypedExpr::List { .. }
| TypedExpr::Call { .. }
| TypedExpr::BinOp { .. }
| TypedExpr::Case { .. }
| TypedExpr::RecordAccess { .. }
| TypedExpr::PositionalAccess { .. }
| TypedExpr::Tuple { .. }
| TypedExpr::TupleIndex { .. }
| TypedExpr::Todo { .. }
| TypedExpr::Panic { .. }
| TypedExpr::Echo { .. }
| TypedExpr::BitArray { .. }
| TypedExpr::RecordUpdate { .. }
| TypedExpr::NegateBool { .. }
| TypedExpr::NegateInt { .. }
| TypedExpr::Invalid { .. } => function,
};
TypedExpr::Call {
location,
type_,
fun: Box::new(function),
arguments,
}
}
/// Turn a call to an anonymous function into a block with assignments.
fn inline_anonymous_function_call(
&mut self,
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/hex.rs | compiler-core/src/hex.rs | use camino::Utf8Path;
use debug_ignore::DebugIgnore;
use flate2::read::GzDecoder;
use futures::future;
use hexpm::{ApiError, version::Version};
use tar::Archive;
use crate::{
Error, Result,
io::{FileSystemReader, FileSystemWriter, HttpClient, TarUnpacker},
manifest::{ManifestPackage, ManifestPackageSource},
paths::{self, ProjectPaths},
};
pub const HEXPM_PUBLIC_KEY: &[u8] = b"-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApqREcFDt5vV21JVe2QNB
Edvzk6w36aNFhVGWN5toNJRjRJ6m4hIuG4KaXtDWVLjnvct6MYMfqhC79HAGwyF+
IqR6Q6a5bbFSsImgBJwz1oadoVKD6ZNetAuCIK84cjMrEFRkELtEIPNHblCzUkkM
3rS9+DPlnfG8hBvGi6tvQIuZmXGCxF/73hU0/MyGhbmEjIKRtG6b0sJYKelRLTPW
XgK7s5pESgiwf2YC/2MGDXjAJfpfCd0RpLdvd4eRiXtVlE9qO9bND94E7PgQ/xqZ
J1i2xWFndWa6nfFnRxZmCStCOZWYYPlaxr+FZceFbpMwzTNs4g3d4tLNUcbKAIH4
0wIDAQAB
-----END PUBLIC KEY-----
";
fn key_name(hostname: &str) -> String {
format!("gleam-{hostname}")
}
pub async fn publish_package<Http: HttpClient>(
release_tarball: Vec<u8>,
version: String,
api_key: &str,
config: &hexpm::Config,
replace: bool,
http: &Http,
) -> Result<()> {
tracing::info!("Publishing package, replace: {}", replace);
let request = hexpm::api_publish_package_request(release_tarball, api_key, config, replace);
let response = http.send(request).await?;
hexpm::api_publish_package_response(response).map_err(|e| {
if let ApiError::NotReplacing = e {
Error::HexPublishReplaceRequired { version }
} else {
Error::hex(e)
}
})
}
pub async fn transfer_owner<Http: HttpClient>(
api_key: &str,
package_name: String,
new_owner_username_or_email: String,
config: &hexpm::Config,
http: &Http,
) -> Result<()> {
tracing::info!(
"Transferring ownership of `{}` to {}",
package_name,
new_owner_username_or_email
);
let request = hexpm::api_transfer_owner_request(
&package_name,
&new_owner_username_or_email,
api_key,
config,
);
let response = http.send(request).await?;
hexpm::api_transfer_owner_response(response).map_err(Error::hex)
}
#[derive(Debug, strum::EnumString, strum::VariantNames, Clone, Copy, PartialEq, Eq)]
#[strum(serialize_all = "lowercase")]
pub enum RetirementReason {
Other,
Invalid,
Security,
Deprecated,
Renamed,
}
impl RetirementReason {
pub fn to_library_enum(&self) -> hexpm::RetirementReason {
match self {
RetirementReason::Other => hexpm::RetirementReason::Other,
RetirementReason::Invalid => hexpm::RetirementReason::Invalid,
RetirementReason::Security => hexpm::RetirementReason::Security,
RetirementReason::Deprecated => hexpm::RetirementReason::Deprecated,
RetirementReason::Renamed => hexpm::RetirementReason::Renamed,
}
}
}
pub async fn retire_release<Http: HttpClient>(
package: &str,
version: &str,
reason: RetirementReason,
message: Option<&str>,
api_key: &str,
config: &hexpm::Config,
http: &Http,
) -> Result<()> {
tracing::info!(package=%package, version=%version, "retiring_hex_release");
let request = hexpm::api_retire_release_request(
package,
version,
reason.to_library_enum(),
message,
api_key,
config,
);
let response = http.send(request).await?;
hexpm::api_retire_release_response(response).map_err(Error::hex)
}
pub async fn unretire_release<Http: HttpClient>(
package: &str,
version: &str,
api_key: &str,
config: &hexpm::Config,
http: &Http,
) -> Result<()> {
tracing::info!(package=%package, version=%version, "retiring_hex_release");
let request = hexpm::api_unretire_release_request(package, version, api_key, config);
let response = http.send(request).await?;
hexpm::api_unretire_release_response(response).map_err(Error::hex)
}
pub async fn create_api_key<Http: HttpClient>(
hostname: &str,
username: &str,
password: &str,
config: &hexpm::Config,
http: &Http,
) -> Result<String> {
tracing::info!("Creating API key with Hex");
let request =
hexpm::api_create_api_key_request(username, password, &key_name(hostname), config);
let response = http.send(request).await?;
hexpm::api_create_api_key_response(response).map_err(Error::hex)
}
pub async fn remove_api_key<Http: HttpClient>(
hostname: &str,
config: &hexpm::Config,
auth_key: &str,
http: &Http,
) -> Result<()> {
tracing::info!("Deleting API key from Hex");
let request = hexpm::api_remove_api_key_request(&key_name(hostname), auth_key, config);
let response = http.send(request).await?;
hexpm::api_remove_api_key_response(response).map_err(Error::hex)
}
#[derive(Debug)]
pub struct Downloader {
fs_reader: DebugIgnore<Box<dyn FileSystemReader>>,
fs_writer: DebugIgnore<Box<dyn FileSystemWriter>>,
http: DebugIgnore<Box<dyn HttpClient>>,
untar: DebugIgnore<Box<dyn TarUnpacker>>,
hex_config: hexpm::Config,
paths: ProjectPaths,
}
impl Downloader {
pub fn new(
fs_reader: Box<dyn FileSystemReader>,
fs_writer: Box<dyn FileSystemWriter>,
http: Box<dyn HttpClient>,
untar: Box<dyn TarUnpacker>,
paths: ProjectPaths,
) -> Self {
Self {
fs_reader: DebugIgnore(fs_reader),
fs_writer: DebugIgnore(fs_writer),
http: DebugIgnore(http),
untar: DebugIgnore(untar),
hex_config: hexpm::Config::new(),
paths,
}
}
pub async fn ensure_package_downloaded(
&self,
package: &ManifestPackage,
) -> Result<bool, Error> {
let outer_checksum = match &package.source {
ManifestPackageSource::Hex { outer_checksum } => outer_checksum,
ManifestPackageSource::Git { .. } | ManifestPackageSource::Local { .. } => {
panic!("Attempt to download non-hex package from hex")
}
};
let tarball_path = paths::global_package_cache_package_tarball(
&package.name,
&package.version.to_string(),
);
if self.fs_reader.is_file(&tarball_path) {
tracing::info!(
package = package.name.as_str(),
version = %package.version,
"package_in_cache"
);
return Ok(false);
}
tracing::info!(
package = &package.name.as_str(),
version = %package.version,
"downloading_package_to_cache"
);
let request = hexpm::repository_get_package_tarball_request(
&package.name,
&package.version.to_string(),
None,
&self.hex_config,
);
let response = self.http.send(request).await?;
let tarball = hexpm::repository_get_package_tarball_response(response, &outer_checksum.0)
.map_err(|error| Error::DownloadPackageError {
package_name: package.name.to_string(),
package_version: package.version.to_string(),
error: error.to_string(),
})?;
self.fs_writer.write_bytes(&tarball_path, &tarball)?;
Ok(true)
}
pub async fn ensure_package_in_build_directory(
&self,
package: &ManifestPackage,
) -> Result<bool> {
let _ = self.ensure_package_downloaded(package).await?;
self.extract_package_from_cache(&package.name, &package.version)
}
// It would be really nice if this was async but the library is sync
pub fn extract_package_from_cache(&self, name: &str, version: &Version) -> Result<bool> {
let contents_path = Utf8Path::new("contents.tar.gz");
let destination = self.paths.build_packages_package(name);
// If the directory already exists then there's nothing for us to do
if self.fs_reader.is_directory(&destination) {
tracing::info!(package = name, "Package already in build directory");
return Ok(false);
}
tracing::info!(package = name, "writing_package_to_target");
let tarball = paths::global_package_cache_package_tarball(name, &version.to_string());
let reader = self.fs_reader.reader(&tarball)?;
let mut archive = Archive::new(reader);
// Find the source code from within the outer tarball
for entry in self.untar.entries(&mut archive)? {
let file = entry.map_err(Error::expand_tar)?;
let path = file.header().path().map_err(Error::expand_tar)?;
if path.as_ref() == contents_path {
// Expand this inner source code and write to the file system
let archive = Archive::new(GzDecoder::new(file));
let result = self.untar.unpack(&destination, archive);
// If we failed to expand the tarball remove any source code
// that was partially written so that we don't mistakenly think
// the operation succeeded next time we run.
return match result {
Ok(()) => Ok(true),
Err(err) => {
self.fs_writer.delete_directory(&destination)?;
Err(err)
}
};
}
}
Err(Error::ExpandTar {
error: "Unable to locate Hex package contents.tar.gz".into(),
})
}
pub async fn download_hex_packages<'a, Packages: Iterator<Item = &'a ManifestPackage>>(
&self,
packages: Packages,
project_name: &str,
) -> Result<()> {
let futures = packages
.filter(|package| project_name != package.name)
.map(|package| self.ensure_package_in_build_directory(package));
// Run the futures to download the packages concurrently
let results = future::join_all(futures).await;
// Count the number of packages downloaded while checking for errors
for result in results {
let _ = result?;
}
Ok(())
}
}
pub async fn publish_documentation<Http: HttpClient>(
name: &str,
version: &Version,
archive: Vec<u8>,
api_key: &str,
config: &hexpm::Config,
http: &Http,
) -> Result<()> {
tracing::info!("publishing_documentation");
let request =
hexpm::api_publish_docs_request(name, &version.to_string(), archive, api_key, config)
.map_err(Error::hex)?;
let response = http.send(request).await?;
hexpm::api_publish_docs_response(response).map_err(Error::hex)
}
pub async fn get_package_release<Http: HttpClient>(
name: &str,
version: &Version,
config: &hexpm::Config,
http: &Http,
) -> Result<hexpm::Release<hexpm::ReleaseMeta>> {
let version = version.to_string();
tracing::info!(
name = name,
version = version.as_str(),
"looking_up_package_release"
);
let request = hexpm::api_get_package_release_request(name, &version, None, config);
let response = http.send(request).await?;
hexpm::api_get_package_release_response(response).map_err(Error::hex)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/paths.rs | compiler-core/src/paths.rs | use crate::build::{Mode, Target};
use camino::{Utf8Path, Utf8PathBuf};
pub const ARTEFACT_DIRECTORY_NAME: &str = "_gleam_artefacts";
#[derive(Debug, Clone)]
pub struct ProjectPaths {
root: Utf8PathBuf,
}
impl ProjectPaths {
pub fn new(root: Utf8PathBuf) -> Self {
Self { root }
}
pub fn at_filesystem_root() -> Self {
let path = if cfg!(target_family = "windows") {
r"C:\"
} else {
"/"
};
Self::new(Utf8PathBuf::from(path))
}
pub fn root(&self) -> &Utf8Path {
&self.root
}
pub fn root_config(&self) -> Utf8PathBuf {
self.root.join("gleam.toml")
}
pub fn readme(&self) -> Utf8PathBuf {
self.root.join("README.md")
}
pub fn manifest(&self) -> Utf8PathBuf {
self.root.join("manifest.toml")
}
pub fn src_directory(&self) -> Utf8PathBuf {
self.root.join("src")
}
pub fn test_directory(&self) -> Utf8PathBuf {
self.root.join("test")
}
pub fn dev_directory(&self) -> Utf8PathBuf {
self.root.join("dev")
}
pub fn build_directory(&self) -> Utf8PathBuf {
self.root.join("build")
}
pub fn build_packages_directory(&self) -> Utf8PathBuf {
self.build_directory().join("packages")
}
pub fn build_packages_toml(&self) -> Utf8PathBuf {
self.build_packages_directory().join("packages.toml")
}
pub fn build_packages_package(&self, package_name: &str) -> Utf8PathBuf {
self.build_packages_directory().join(package_name)
}
// build_deps_package_config
pub fn build_packages_package_config(&self, package_name: &str) -> Utf8PathBuf {
self.build_packages_package(package_name).join("gleam.toml")
}
pub fn build_export_hex_tarball(&self, package_name: &str, version: &str) -> Utf8PathBuf {
self.build_directory()
.join(format!("{package_name}-{version}.tar"))
}
pub fn build_directory_for_mode(&self, mode: Mode) -> Utf8PathBuf {
self.build_directory().join(mode.to_string())
}
pub fn erlang_shipment_directory(&self) -> Utf8PathBuf {
self.build_directory().join("erlang-shipment")
}
pub fn build_documentation_directory(&self, package: &str) -> Utf8PathBuf {
self.build_directory_for_mode(Mode::Dev)
.join("docs")
.join(package)
}
pub fn build_directory_for_target(&self, mode: Mode, target: Target) -> Utf8PathBuf {
self.build_directory_for_mode(mode).join(target.to_string())
}
/// Note this uses the "application name", not the name of this package.
/// This is because in BEAM applications one can specify an application
/// name that is not the same as the Hex package name. Ideally we would
/// always use the package name, but the BEAM runtime knows nothing
/// about packages, only applications, so it will look on the filesystem
/// for the application name when loading it.
pub fn build_directory_for_package(
&self,
mode: Mode,
target: Target,
application_name: &str,
) -> Utf8PathBuf {
self.build_directory_for_target(mode, target)
.join(application_name)
}
pub fn build_packages_ebins_glob(&self, mode: Mode, target: Target) -> Utf8PathBuf {
self.build_directory_for_package(mode, target, "*")
.join("ebin")
}
/// A path to a special file that contains the version of gleam that last built
/// the artifacts. If this file does not match the current version of gleam we
/// will rebuild from scratch
pub fn build_gleam_version(&self, mode: Mode, target: Target) -> Utf8PathBuf {
self.build_directory_for_target(mode, target)
.join("gleam_version")
}
}
pub fn global_package_cache_package_tarball(package_name: &str, version: &str) -> Utf8PathBuf {
global_packages_cache().join(format!("{package_name}-{version}.tar"))
}
pub fn global_hexpm_credentials_path() -> Utf8PathBuf {
global_hexpm_cache().join("credentials")
}
fn global_hexpm_cache() -> Utf8PathBuf {
default_global_gleam_cache().join("hex").join("hexpm")
}
fn global_packages_cache() -> Utf8PathBuf {
global_hexpm_cache().join("packages")
}
pub fn default_global_gleam_cache() -> Utf8PathBuf {
Utf8PathBuf::from_path_buf(
dirs_next::cache_dir()
.expect("Failed to determine user cache directory")
.join("gleam"),
)
.expect("Non Utf8 Path")
}
pub fn unnest(within: &Utf8Path) -> Utf8PathBuf {
let mut path = Utf8PathBuf::new();
for _ in within {
path = path.join("..")
}
path
}
#[test]
fn paths() {
assert!(default_global_gleam_cache().ends_with("gleam"));
assert!(global_packages_cache().ends_with("hex/hexpm/packages"));
assert!(
global_package_cache_package_tarball("gleam_stdlib", "0.17.1")
.ends_with("hex/hexpm/packages/gleam_stdlib-0.17.1.tar")
);
assert!(
global_package_cache_package_tarball("elli", "1.0.0")
.ends_with("hex/hexpm/packages/elli-1.0.0.tar")
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/error.rs | compiler-core/src/error.rs | #![allow(clippy::unwrap_used, clippy::expect_used)]
use crate::bit_array::UnsupportedOption;
use crate::build::{Origin, Outcome, Runtime, Target};
use crate::dependency::{PackageFetcher, ResolutionError};
use crate::diagnostic::{Diagnostic, ExtraLabel, Label, Location};
use crate::derivation_tree::DerivationTreePrinter;
use crate::parse::error::ParseErrorDetails;
use crate::strings::{to_snake_case, to_upper_camel_case};
use crate::type_::collapse_links;
use crate::type_::error::{
IncorrectArityContext, InvalidImportKind, MissingAnnotation, ModuleValueUsageContext, Named,
RecordField, UnknownField, UnknownTypeHint, UnsafeRecordUpdateReason,
};
use crate::type_::printer::{Names, Printer};
use crate::type_::{FieldAccessUsage, error::PatternMatchKind};
use crate::{ast::BinOp, parse::error::ParseErrorType, type_::Type};
use crate::{bit_array, diagnostic::Level, type_::UnifyErrorSituation};
use ecow::EcoString;
use hexpm::version::Version;
use itertools::Itertools;
use std::borrow::Cow;
use std::fmt::{Debug, Display};
use std::io::Write;
use std::path::PathBuf;
use std::sync::Arc;
use termcolor::Buffer;
use thiserror::Error;
use vec1::Vec1;
use camino::{Utf8Path, Utf8PathBuf};
pub type Name = EcoString;
pub type Result<Ok, Err = Error> = std::result::Result<Ok, Err>;
#[cfg(test)]
pub mod tests;
macro_rules! wrap_format {
($($tts:tt)*) => {
wrap(&format!($($tts)*))
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct UnknownImportDetails {
pub module: Name,
pub location: crate::ast::SrcSpan,
pub path: Utf8PathBuf,
pub src: EcoString,
pub modules: Vec<EcoString>,
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct ImportCycleLocationDetails {
pub location: crate::ast::SrcSpan,
pub path: Utf8PathBuf,
pub src: EcoString,
}
#[derive(Debug, Eq, PartialEq, Error, Clone)]
pub enum Error {
#[error("failed to parse Gleam source code")]
Parse {
path: Utf8PathBuf,
src: EcoString,
error: Box<crate::parse::error::ParseError>,
},
#[error("type checking failed")]
Type {
path: Utf8PathBuf,
src: EcoString,
errors: Vec1<crate::type_::Error>,
names: Box<Names>,
},
#[error("unknown import {import}")]
UnknownImport {
import: EcoString,
// Boxed to prevent this variant from being overly large
details: Box<UnknownImportDetails>,
},
#[error("duplicate module {module}")]
DuplicateModule {
module: Name,
first: Utf8PathBuf,
second: Utf8PathBuf,
},
#[error("duplicate source file {file}")]
DuplicateSourceFile { file: String },
#[error("duplicate native Erlang module {module}")]
DuplicateNativeErlangModule {
module: Name,
first: Utf8PathBuf,
second: Utf8PathBuf,
},
#[error("gleam module {module} clashes with native file of same name")]
ClashingGleamModuleAndNativeFileName {
module: Name,
gleam_file: Utf8PathBuf,
native_file: Utf8PathBuf,
},
#[error("cyclical module imports")]
ImportCycle {
modules: Vec1<(EcoString, ImportCycleLocationDetails)>,
},
#[error("cyclical package dependencies")]
PackageCycle { packages: Vec<EcoString> },
#[error("{action:?} {path:?} failed: {err:?}")]
FileIo {
kind: FileKind,
action: FileIoAction,
path: Utf8PathBuf,
err: Option<String>,
},
#[error("Non Utf-8 Path: {path}")]
NonUtf8Path { path: PathBuf },
#[error("{error}")]
GitInitialization { error: String },
#[error("io operation failed")]
StandardIo {
action: StandardIoAction,
err: Option<std::io::ErrorKind>,
},
#[error("source code incorrectly formatted")]
Format { problem_files: Vec<Unformatted> },
#[error("Hex error: {0}")]
Hex(String),
#[error("{error}")]
ExpandTar { error: String },
#[error("{err}")]
AddTar { path: Utf8PathBuf, err: String },
#[error("{0}")]
TarFinish(String),
#[error("{0}")]
Gzip(String),
#[error("shell program `{program}` not found")]
ShellProgramNotFound { program: String, os: OS },
#[error("shell program `{program}` failed")]
ShellCommand {
program: String,
reason: ShellCommandFailureReason,
},
#[error("{name} is not a valid project name")]
InvalidProjectName {
name: String,
reason: InvalidProjectNameReason,
},
#[error("{module} is not a valid module name")]
InvalidModuleName { module: String },
#[error("{module} is not module")]
ModuleDoesNotExist {
module: EcoString,
suggestion: Option<EcoString>,
},
#[error("{module} does not have a main function")]
ModuleDoesNotHaveMainFunction { module: EcoString, origin: Origin },
#[error("{module} does not have a public main function")]
MainFunctionIsPrivate { module: EcoString },
#[error("{module}'s main function has the wrong arity so it can not be run")]
MainFunctionHasWrongArity { module: EcoString, arity: usize },
#[error("{module}'s main function does not support the current target")]
MainFunctionDoesNotSupportTarget { module: EcoString, target: Target },
#[error("{input} is not a valid version. {error}")]
InvalidVersionFormat { input: String, error: String },
#[error("incompatible locked version. {error}")]
IncompatibleLockedVersion { error: String },
#[error("project root already exists")]
ProjectRootAlreadyExist { path: String },
#[error("File(s) already exist in {}",
file_names.iter().map(|x| x.as_str()).join(", "))]
OutputFilesAlreadyExist { file_names: Vec<Utf8PathBuf> },
#[error("Packages not exist: {}", packages.iter().join(", "))]
RemovedPackagesNotExist { packages: Vec<String> },
#[error("unable to find project root")]
UnableToFindProjectRoot { path: String },
#[error("gleam.toml version {toml_ver} does not match .app version {app_ver}")]
VersionDoesNotMatch { toml_ver: String, app_ver: String },
#[error("metadata decoding failed")]
MetadataDecodeError { error: Option<String> },
#[error("warnings are not permitted")]
ForbiddenWarnings { count: usize },
#[error("Invalid runtime for {target} target: {invalid_runtime}")]
InvalidRuntime {
target: Target,
invalid_runtime: Runtime,
},
#[error("package downloading failed: {error}")]
DownloadPackageError {
package_name: String,
package_version: String,
error: String,
},
#[error("{0}")]
Http(String),
#[error("Failed to create canonical path for package {0}")]
DependencyCanonicalizationFailed(String),
#[error("Could not find versions that satisfy dependency requirements")]
DependencyResolutionNoSolution {
root_package_name: EcoString,
derivation_tree:
Box<NeverEqual<pubgrub::DerivationTree<String, pubgrub::Ranges<Version>, String>>>,
},
#[error("Dependency resolution failed: {0}")]
DependencyResolutionError(String),
#[error("The package {0} is listed in dependencies and dev-dependencies")]
DuplicateDependency(EcoString),
#[error("Expected package {expected} at path {path} but found {found} instead")]
WrongDependencyProvided {
path: Utf8PathBuf,
expected: String,
found: String,
},
#[error("The package {package} is provided multiple times, as {source_1} and {source_2}")]
ProvidedDependencyConflict {
package: String,
source_1: String,
source_2: String,
},
#[error("The package was missing required fields for publishing")]
MissingHexPublishFields {
description_missing: bool,
licence_missing: bool,
},
#[error("Dependency {package:?} has not been published to Hex")]
PublishNonHexDependencies { package: String },
#[error("The package {package} uses unsupported build tools {build_tools:?}")]
UnsupportedBuildTool {
package: String,
build_tools: Vec<EcoString>,
},
#[error("Opening docs at {path} failed: {error}")]
FailedToOpenDocs { path: Utf8PathBuf, error: String },
#[error(
"The package {package} requires a Gleam version satisfying \
{required_version} and you are using v{gleam_version}"
)]
IncompatibleCompilerVersion {
package: String,
required_version: String,
gleam_version: String,
},
#[error("The --javascript-prelude flag must be given when compiling to JavaScript")]
JavaScriptPreludeRequired,
#[error("The modules {unfinished:?} contain todo expressions and so cannot be published")]
CannotPublishTodo { unfinished: Vec<EcoString> },
#[error("The modules {unfinished:?} contain todo expressions and so cannot be published")]
CannotPublishEcho { unfinished: Vec<EcoString> },
#[error(
"The modules {unfinished:?} contain internal types in their public API so cannot be published"
)]
CannotPublishLeakedInternalType { unfinished: Vec<EcoString> },
#[error("The modules {unfinished:?} are empty and so cannot be published")]
CannotPublishEmptyModules { unfinished: Vec<EcoString> },
#[error("Publishing packages to reserve names is not permitted")]
HexPackageSquatting,
#[error("The package includes the default main function so cannot be published")]
CannotPublishWithDefaultMain { package_name: EcoString },
#[error("Corrupt manifest.toml")]
CorruptManifest,
#[error("The Gleam module {path} would overwrite the Erlang module {name}")]
GleamModuleWouldOverwriteStandardErlangModule { name: EcoString, path: Utf8PathBuf },
#[error("Version already published")]
HexPublishReplaceRequired { version: String },
#[error("The gleam version constraint is wrong and so cannot be published")]
CannotPublishWrongVersion {
minimum_required_version: SmallVersion,
wrongfully_allowed_version: SmallVersion,
},
#[error("Failed to encrypt local Hex API key")]
FailedToEncryptLocalHexApiKey { detail: String },
#[error("Failed to decrypt local Hex API key")]
FailedToDecryptLocalHexApiKey { detail: String },
#[error("Cannot add a package with the same name as a dependency")]
CannotAddSelfAsDependency { name: EcoString },
}
// A wrapper that ignores the inner value for equality:
#[derive(Debug, Clone)]
pub struct NeverEqual<T>(pub T);
impl<T> PartialEq for NeverEqual<T> {
fn eq(&self, _other: &Self) -> bool {
false
}
}
impl<T> Eq for NeverEqual<T> {}
/// This is to make clippy happy and not make the error variant too big by
/// storing an entire `hexpm::version::Version` in the error.
///
/// This is enough to report wrong Gleam compiler versions.
///
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct SmallVersion {
major: u8,
minor: u8,
patch: u8,
}
impl Display for SmallVersion {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&format!("{}.{}.{}", self.major, self.minor, self.patch))
}
}
impl SmallVersion {
pub fn from_hexpm(version: Version) -> Self {
Self {
major: version.major as u8,
minor: version.minor as u8,
patch: version.patch as u8,
}
}
}
#[derive(Debug, Clone, Eq, PartialEq, Copy)]
pub enum OS {
Linux(Distro),
MacOS,
Windows,
Other,
}
#[derive(Debug, Clone, Eq, PartialEq, Copy)]
pub enum Distro {
Ubuntu,
Debian,
Other,
}
pub fn parse_os(os: &str, distro: &str) -> OS {
match os {
"macos" => OS::MacOS,
"windows" => OS::Windows,
"linux" => OS::Linux(parse_linux_distribution(distro)),
_ => OS::Other,
}
}
pub fn parse_linux_distribution(distro: &str) -> Distro {
match distro {
"ubuntu" => Distro::Ubuntu,
"debian" => Distro::Debian,
_ => Distro::Other,
}
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum ShellCommandFailureReason {
/// When we don't have any context about the failure
Unknown,
/// When the actual running of the command failed for some reason.
IoError(std::io::ErrorKind),
/// When the shell command returned an error status
ShellCommandError(String),
}
impl Error {
pub fn http<E>(error: E) -> Error
where
E: std::error::Error,
{
Self::Http(error.to_string())
}
pub fn hex<E>(error: E) -> Error
where
E: std::error::Error,
{
Self::Hex(error.to_string())
}
pub fn add_tar<P, E>(path: P, error: E) -> Error
where
P: AsRef<Utf8Path>,
E: std::error::Error,
{
Self::AddTar {
path: path.as_ref().to_path_buf(),
err: error.to_string(),
}
}
pub fn finish_tar<E>(error: E) -> Error
where
E: std::error::Error,
{
Self::TarFinish(error.to_string())
}
pub fn dependency_resolution_failed<T: PackageFetcher>(
error: ResolutionError<'_, T>,
root_package_name: EcoString,
) -> Error {
match error {
ResolutionError::NoSolution(derivation_tree) => Self::DependencyResolutionNoSolution {
root_package_name,
derivation_tree: Box::new(NeverEqual(derivation_tree)),
},
ResolutionError::ErrorRetrievingDependencies {
package,
version,
source,
} => Self::DependencyResolutionError(format!(
"An error occurred while trying to retrieve dependencies of {package}@{version}: {source}",
)),
ResolutionError::ErrorChoosingVersion { package, source } => {
Self::DependencyResolutionError(format!(
"An error occurred while choosing the version of {package}: {source}",
))
}
ResolutionError::ErrorInShouldCancel(err) => Self::DependencyResolutionError(format!(
"Dependency resolution was cancelled. {err}"
)),
}
}
pub fn expand_tar<E>(error: E) -> Error
where
E: std::error::Error,
{
Self::ExpandTar {
error: error.to_string(),
}
}
}
impl<T> From<Error> for Outcome<T, Error> {
fn from(error: Error) -> Self {
Outcome::TotalFailure(error)
}
}
impl From<capnp::Error> for Error {
fn from(error: capnp::Error) -> Self {
Error::MetadataDecodeError {
error: Some(error.to_string()),
}
}
}
impl From<capnp::NotInSchema> for Error {
fn from(error: capnp::NotInSchema) -> Self {
Error::MetadataDecodeError {
error: Some(error.to_string()),
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum InvalidProjectNameReason {
Format,
FormatNotLowercase,
GleamPrefix,
ErlangReservedWord,
ErlangStandardLibraryModule,
GleamReservedWord,
GleamReservedModule,
}
pub fn format_invalid_project_name_error(
name: &str,
reason: &InvalidProjectNameReason,
with_suggestion: &Option<String>,
) -> String {
let reason_message = match reason {
InvalidProjectNameReason::ErlangReservedWord => "is a reserved word in Erlang.",
InvalidProjectNameReason::ErlangStandardLibraryModule => {
"is a standard library module in Erlang."
}
InvalidProjectNameReason::GleamReservedWord => "is a reserved word in Gleam.",
InvalidProjectNameReason::GleamReservedModule => "is a reserved module name in Gleam.",
InvalidProjectNameReason::FormatNotLowercase => {
"does not have the correct format. Project names \
may only contain lowercase letters."
}
InvalidProjectNameReason::Format => {
"does not have the correct format. Project names \
must start with a lowercase letter and may only contain lowercase letters, \
numbers and underscores."
}
InvalidProjectNameReason::GleamPrefix => {
"has the reserved prefix `gleam_`. \
This prefix is intended for official Gleam packages only."
}
};
match with_suggestion {
Some(suggested_name) => wrap_format!(
"We were not able to create your project as `{}` {}
Would you like to name your project '{}' instead?",
name,
reason_message,
suggested_name
),
None => wrap_format!(
"We were not able to create your project as `{}` {}
Please try again with a different project name.",
name,
reason_message
),
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum StandardIoAction {
Read,
Write,
}
impl StandardIoAction {
fn text(&self) -> &'static str {
match self {
StandardIoAction::Read => "read from",
StandardIoAction::Write => "write to",
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum FileIoAction {
Link,
Open,
Copy,
Read,
Parse,
Delete,
// Rename,
Create,
WriteTo,
Canonicalise,
UpdatePermissions,
FindParent,
ReadMetadata,
}
impl FileIoAction {
fn text(&self) -> &'static str {
match self {
FileIoAction::Link => "link",
FileIoAction::Open => "open",
FileIoAction::Copy => "copy",
FileIoAction::Read => "read",
FileIoAction::Parse => "parse",
FileIoAction::Delete => "delete",
// FileIoAction::Rename => "rename",
FileIoAction::Create => "create",
FileIoAction::WriteTo => "write to",
FileIoAction::FindParent => "find the parent of",
FileIoAction::Canonicalise => "canonicalise",
FileIoAction::UpdatePermissions => "update permissions of",
FileIoAction::ReadMetadata => "read metadata of",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FileKind {
File,
Directory,
}
impl FileKind {
fn text(&self) -> &'static str {
match self {
FileKind::File => "file",
FileKind::Directory => "directory",
}
}
}
// https://github.com/rust-lang/rust/blob/03994e498df79aa1f97f7bbcfd52d57c8e865049/compiler/rustc_span/src/edit_distance.rs
pub fn edit_distance(a: &str, b: &str, limit: usize) -> Option<usize> {
let mut a = &a.chars().collect::<Vec<_>>()[..];
let mut b = &b.chars().collect::<Vec<_>>()[..];
if a.len() < b.len() {
std::mem::swap(&mut a, &mut b);
}
let min_dist = a.len() - b.len();
// If we know the limit will be exceeded, we can return early.
if min_dist > limit {
return None;
}
// Strip common prefix.
while !b.is_empty() && !a.is_empty() {
let (b_first, b_rest) = b.split_last().expect("Failed to split 'b' slice");
let (a_first, a_rest) = a.split_last().expect("Failed to split 'a' slice");
if b_first == a_first {
a = a_rest;
b = b_rest;
} else {
break;
}
}
// If either string is empty, the distance is the length of the other.
// We know that `b` is the shorter string, so we don't need to check `a`.
if b.is_empty() {
return Some(min_dist);
}
let mut prev_prev = vec![usize::MAX; b.len() + 1];
let mut prev = (0..=b.len()).collect::<Vec<_>>();
let mut current = vec![0; b.len() + 1];
// row by row
for i in 1..=a.len() {
if let Some(element) = current.get_mut(0) {
*element = i;
}
let a_idx = i - 1;
// column by column
for j in 1..=b.len() {
let b_idx = j - 1;
// There is no cost to substitute a character with itself.
let substitution_cost = match (a.get(a_idx), b.get(b_idx)) {
(Some(&a_char), Some(&b_char)) => {
if a_char == b_char {
0
} else {
1
}
}
_ => panic!("Index out of bounds"),
};
let insertion = current.get(j - 1).map_or(usize::MAX, |&x| x + 1);
if let Some(value) = current.get_mut(j) {
*value = std::cmp::min(
// deletion
prev.get(j).map_or(usize::MAX, |&x| x + 1),
std::cmp::min(
// insertion
insertion,
// substitution
prev.get(j - 1)
.map_or(usize::MAX, |&x| x + substitution_cost),
),
);
}
if (i > 1)
&& (j > 1)
&& let (Some(&a_val), Some(&b_val_prev), Some(&a_val_prev), Some(&b_val)) = (
a.get(a_idx),
b.get(b_idx - 1),
a.get(a_idx - 1),
b.get(b_idx),
)
&& (a_val == b_val_prev)
&& (a_val_prev == b_val)
{
// transposition
if let Some(curr) = current.get_mut(j)
&& let Some(&prev_prev_val) = prev_prev.get(j - 2)
{
*curr = std::cmp::min(*curr, prev_prev_val + 1);
}
}
}
// Rotate the buffers, reusing the memory.
[prev_prev, prev, current] = [prev, current, prev_prev];
}
// `prev` because we already rotated the buffers.
let distance = match prev.get(b.len()) {
Some(&d) => d,
None => usize::MAX,
};
(distance <= limit).then_some(distance)
}
fn edit_distance_with_substrings(a: &str, b: &str, limit: usize) -> Option<usize> {
let n = a.chars().count();
let m = b.chars().count();
// Check one isn't less than half the length of the other. If this is true then there is a
// big difference in length.
let big_len_diff = (n * 2) < m || (m * 2) < n;
let len_diff = m.abs_diff(n);
let distance = edit_distance(a, b, limit + len_diff)?;
// This is the crux, subtracting length difference means exact substring matches will now be 0
let score = distance - len_diff;
// If the score is 0 but the words have different lengths then it's a substring match not a full
// word match
let score = if score == 0 && len_diff > 0 && !big_len_diff {
1 // Exact substring match, but not a total word match so return non-zero
} else if !big_len_diff {
// Not a big difference in length, discount cost of length difference
score + len_diff.div_ceil(2)
} else {
// A big difference in length, add back the difference in length to the score
score + len_diff
};
(score <= limit).then_some(score)
}
fn did_you_mean(name: &str, options: &[EcoString]) -> Option<String> {
// If only one option is given, return that option.
// This seems to solve the `unknown_variable_3` test.
if options.len() == 1 {
return options
.first()
.map(|option| format!("Did you mean `{option}`?"));
}
// Check for case-insensitive matches.
// This solves the comparison to small and single character terms,
// such as the test on `type_vars_must_be_declared`.
if let Some(exact_match) = options
.iter()
.find(|&option| option.eq_ignore_ascii_case(name))
{
return Some(format!("Did you mean `{exact_match}`?"));
}
// Calculate the threshold as one third of the name's length, with a minimum of 1.
let threshold = std::cmp::max(name.chars().count() / 3, 1);
// Filter and sort options based on edit distance.
options
.iter()
.filter(|&option| option != crate::ast::CAPTURE_VARIABLE)
.sorted()
.filter_map(|option| {
edit_distance_with_substrings(option, name, threshold)
.map(|distance| (option, distance))
})
.min_by_key(|&(_, distance)| distance)
.map(|(option, _)| format!("Did you mean `{option}`?"))
}
fn to_ordinal(value: u32) -> String {
match value % 10 {
// All numbers starting with 1 end in `th` (11th, 12th, 13th, etc.)
_ if value / 10 == 1 => format!("{value}th"),
1 => format!("{value}st"),
2 => format!("{value}nd"),
3 => format!("{value}rd"),
_ => format!("{value}th"),
}
}
impl Error {
pub fn pretty_string(&self) -> String {
let mut nocolor = Buffer::no_color();
self.pretty(&mut nocolor);
String::from_utf8(nocolor.into_inner()).expect("Error printing produced invalid utf8")
}
pub fn pretty(&self, buffer: &mut Buffer) {
for diagnostic in self.to_diagnostics() {
diagnostic.write(buffer);
writeln!(buffer).expect("write new line after diagnostic");
}
}
pub fn to_diagnostics(&self) -> Vec<Diagnostic> {
use crate::type_::Error as TypeError;
match self {
Error::HexPackageSquatting => {
let text =
"You appear to be attempting to reserve a name on Hex rather than publishing a
working package. This is against the Hex terms of service and can result in
package deletion or account suspension.
"
.into();
vec![Diagnostic {
title: "Invalid Hex package".into(),
text,
level: Level::Error,
location: None,
hint: None,
}]
}
Error::CannotPublishWithDefaultMain { package_name } => {
let text = wrap_format!(
"Packages with the default main function cannot be published
Remove or modify the main function that contains only:
`io.println(\"Hello from {package_name}!\")`"
);
vec![Diagnostic {
title: "Cannot publish with default main function".into(),
text,
level: Level::Error,
location: None,
hint: None,
}]
}
Error::MetadataDecodeError { error } => {
let mut text = "A problem was encountered when decoding the metadata for one \
of the Gleam dependency modules."
.to_string();
if let Some(error) = error {
text.push_str("\nThe error from the decoder library was:\n\n");
text.push_str(error);
}
vec![Diagnostic {
title: "Failed to decode module metadata".into(),
text,
level: Level::Error,
location: None,
hint: None,
}]
}
Error::InvalidProjectName { name, reason } => {
let text = format_invalid_project_name_error(name, reason, &None);
vec![Diagnostic {
title: "Invalid project name".into(),
text,
hint: None,
level: Level::Error,
location: None,
}]
}
Error::InvalidModuleName { module } => vec![Diagnostic {
title: "Invalid module name".into(),
text: format!(
"`{module}` is not a valid module name.
Module names can only contain lowercase letters, underscore, and
forward slash and must not end with a slash."
),
level: Level::Error,
location: None,
hint: None,
}],
Error::ModuleDoesNotExist { module, suggestion } => {
let hint = match suggestion {
Some(suggestion) => format!("Did you mean `{suggestion}`?"),
None => format!("Try creating the file `src/{module}.gleam`."),
};
vec![Diagnostic {
title: "Module does not exist".into(),
text: format!("Module `{module}` was not found."),
level: Level::Error,
location: None,
hint: Some(hint),
}]
}
Error::ModuleDoesNotHaveMainFunction { module, origin } => vec![Diagnostic {
title: "Module does not have a main function".into(),
text: format!(
"`{module}` does not have a main function so the module can not be run."
),
level: Level::Error,
location: None,
hint: Some(format!(
"Add a public `main` function to `{}/{module}.gleam`.",
origin.folder_name()
)),
}],
Error::MainFunctionIsPrivate { module } => vec![Diagnostic {
title: "Module does not have a public main function".into(),
text: wrap_format!(
"`{module}` has a main function, but it is private, so it cannot be run."
),
level: Level::Error,
location: None,
hint: Some(wrap_format!(
"Make the `main` function in the `{module}` module public."
)),
}],
Error::MainFunctionDoesNotSupportTarget { module, target } => vec![Diagnostic {
title: "Target not supported".into(),
text: wrap_format!(
"`{module}` has a main function, but it does not support the {target} \
target, so it cannot be run."
),
level: Level::Error,
location: None,
hint: None,
}],
Error::MainFunctionHasWrongArity { module, arity } => vec![Diagnostic {
title: "Main function has wrong arity".into(),
text: wrap_format!(
"`{module}:main` should have an arity of 0 to be run but its arity is {arity}."
),
level: Level::Error,
location: None,
hint: Some("Change the function signature of main to `pub fn main() {}`.".into()),
}],
Error::ProjectRootAlreadyExist { path } => vec![Diagnostic {
title: "Project folder already exists".into(),
text: format!("Project folder root:\n\n {path}"),
level: Level::Error,
hint: None,
location: None,
}],
Error::OutputFilesAlreadyExist { file_names } => vec![Diagnostic {
title: format!(
"{} already exist{} in target directory",
if file_names.len() == 1 {
"File"
} else {
"Files"
},
if file_names.len() == 1 { "" } else { "s" }
),
text: format!(
"{}
If you want to overwrite these files, delete them and run the command again.
",
file_names
.iter()
.map(|name| format!(" - {}", name.as_str()))
.join("\n")
),
level: Level::Error,
hint: None,
location: None,
}],
Error::RemovedPackagesNotExist { packages } => vec![Diagnostic {
title: "Package not found".into(),
text: format!(
"These packages are not dependencies of your package so they could not
be removed.
{}
",
packages
.iter()
.map(|p| format!(" - {}", p.as_str()))
.join("\n")
),
level: Level::Error,
hint: None,
location: None,
}],
Error::CannotPublishTodo { unfinished } => vec![Diagnostic {
title: "Cannot publish unfinished code".into(),
text: format!(
"These modules contain todo expressions and cannot be published:
{}
Please remove them and try again.
",
unfinished
.iter()
.map(|name| format!(" - {}", name.as_str()))
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build.rs | compiler-core/src/build.rs | #![allow(warnings)]
mod elixir_libraries;
mod module_loader;
mod native_file_copier;
pub mod package_compiler;
mod package_loader;
mod project_compiler;
mod telemetry;
#[cfg(test)]
mod tests;
pub use self::package_compiler::PackageCompiler;
pub use self::package_loader::StaleTracker;
pub use self::project_compiler::{Built, Options, ProjectCompiler};
pub use self::telemetry::{NullTelemetry, Telemetry};
use crate::ast::{
self, CallArg, CustomType, DefinitionLocation, TypeAst, TypedArg, TypedConstant,
TypedCustomType, TypedDefinitions, TypedExpr, TypedFunction, TypedImport, TypedModuleConstant,
TypedPattern, TypedRecordConstructor, TypedStatement, TypedTypeAlias,
};
use crate::type_::{Type, TypedCallArg};
use crate::{
ast::{Definition, SrcSpan, TypedModule},
config::{self, PackageConfig},
erlang,
error::{Error, FileIoAction, FileKind},
io::OutputFile,
parse::extra::{Comment, ModuleExtra},
type_,
};
use camino::Utf8PathBuf;
use ecow::EcoString;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use std::fmt::{Debug, Display};
use std::sync::Arc;
use std::time::SystemTime;
use std::{collections::HashMap, ffi::OsString, fs::DirEntry, iter::Peekable, process};
use strum::{Display, EnumIter, EnumString, EnumVariantNames, VariantNames};
use vec1::Vec1;
#[derive(
Debug,
Serialize,
Deserialize,
Display,
EnumString,
EnumVariantNames,
EnumIter,
Clone,
Copy,
PartialEq,
Eq,
)]
#[strum(serialize_all = "lowercase")]
pub enum Target {
#[strum(serialize = "erlang", serialize = "erl")]
#[serde(rename = "erlang", alias = "erl")]
Erlang,
#[strum(serialize = "javascript", serialize = "js")]
#[serde(rename = "javascript", alias = "js")]
JavaScript,
}
impl Target {
pub fn variant_strings() -> Vec<EcoString> {
Self::VARIANTS.iter().map(|s| (*s).into()).collect()
}
/// Returns `true` if the target is [`JavaScript`].
///
/// [`JavaScript`]: Target::JavaScript
#[must_use]
pub fn is_javascript(&self) -> bool {
matches!(self, Self::JavaScript)
}
/// Returns `true` if the target is [`Erlang`].
///
/// [`Erlang`]: Target::Erlang
#[must_use]
pub fn is_erlang(&self) -> bool {
matches!(self, Self::Erlang)
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
/// This is used to skip compiling the root package when running the main
/// function coming from a dependency. This way a dependency can be run even
/// there's compilation errors in the root package.
///
pub enum Compile {
/// The default compiler behaviour, compile all packages.
///
All,
/// Only compile the dependency packages, skipping the root package.
///
DepsOnly,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Codegen {
All,
DepsOnly,
None,
}
impl Codegen {
fn should_codegen(&self, is_root_package: bool) -> bool {
match self {
Codegen::All => true,
Codegen::DepsOnly => !is_root_package,
Codegen::None => false,
}
}
}
#[derive(
Debug, Serialize, Deserialize, Display, EnumString, EnumVariantNames, Clone, Copy, PartialEq, Eq,
)]
pub enum Runtime {
#[strum(serialize = "nodejs", serialize = "node")]
#[serde(rename = "nodejs", alias = "node")]
NodeJs,
#[strum(serialize = "deno")]
#[serde(rename = "deno")]
Deno,
#[strum(serialize = "bun")]
#[serde(rename = "bun")]
Bun,
}
impl Default for Runtime {
fn default() -> Self {
Self::NodeJs
}
}
#[derive(Debug)]
pub enum TargetCodegenConfiguration {
JavaScript {
emit_typescript_definitions: bool,
prelude_location: Utf8PathBuf,
},
Erlang {
app_file: Option<ErlangAppCodegenConfiguration>,
},
}
impl TargetCodegenConfiguration {
pub fn target(&self) -> Target {
match self {
Self::JavaScript { .. } => Target::JavaScript,
Self::Erlang { .. } => Target::Erlang,
}
}
}
#[derive(Debug)]
pub struct ErlangAppCodegenConfiguration {
pub include_dev_deps: bool,
/// Some packages have a different OTP application name than their package
/// name, as rebar3 (and Mix?) support this. The .app file must use the OTP
/// name, not the package name.
pub package_name_overrides: HashMap<EcoString, EcoString>,
}
#[derive(
Debug,
Serialize,
Deserialize,
Display,
EnumString,
EnumVariantNames,
EnumIter,
Clone,
Copy,
PartialEq,
)]
#[strum(serialize_all = "lowercase")]
pub enum Mode {
Dev,
Prod,
Lsp,
}
impl Mode {
/// Returns `true` if the mode includes development code.
///
pub fn includes_dev_code(&self) -> bool {
match self {
Self::Dev | Self::Lsp => true,
Self::Prod => false,
}
}
pub fn includes_dev_dependencies(&self) -> bool {
match self {
Mode::Dev | Mode::Lsp => true,
Mode::Prod => false,
}
}
}
#[test]
fn mode_includes_dev_code() {
assert!(Mode::Dev.includes_dev_code());
assert!(Mode::Lsp.includes_dev_code());
assert!(!Mode::Prod.includes_dev_code());
}
#[derive(Debug)]
pub struct Package {
pub config: PackageConfig,
pub modules: Vec<Module>,
pub cached_module_names: Vec<EcoString>,
}
impl Package {
pub fn attach_doc_and_module_comments(&mut self) {
for mut module in &mut self.modules {
module.attach_doc_and_module_comments();
}
}
pub fn into_modules_hashmap(self) -> HashMap<String, Module> {
self.modules
.into_iter()
.map(|m| (m.name.to_string(), m))
.collect()
}
}
#[derive(Debug)]
pub struct Module {
pub name: EcoString,
pub code: EcoString,
pub mtime: SystemTime,
pub input_path: Utf8PathBuf,
pub origin: Origin,
pub ast: TypedModule,
pub extra: ModuleExtra,
pub dependencies: Vec<(EcoString, SrcSpan)>,
}
#[derive(Debug)]
/// A data structure used to store all definitions in a single homogeneous
/// vector and sort them by their position in order to attach to each the right
/// documentation.
///
enum DocumentableDefinition<'a> {
Constant(&'a mut TypedModuleConstant),
TypeAlias(&'a mut TypedTypeAlias),
CustomType(&'a mut TypedCustomType),
Function(&'a mut TypedFunction),
Import(&'a mut TypedImport),
}
impl<'a> DocumentableDefinition<'a> {
pub fn location(&self) -> SrcSpan {
match self {
Self::Constant(module_constant) => module_constant.location,
Self::TypeAlias(type_alias) => type_alias.location,
Self::CustomType(custom_type) => custom_type.location,
Self::Function(function) => function.location,
Self::Import(import) => import.location,
}
}
pub fn put_doc(&mut self, new_documentation: (u32, EcoString)) {
match self {
Self::Import(_import) => (),
Self::Function(function) => {
let _ = function.documentation.replace(new_documentation);
}
Self::TypeAlias(type_alias) => {
let _ = type_alias.documentation.replace(new_documentation);
}
Self::CustomType(custom_type) => {
let _ = custom_type.documentation.replace(new_documentation);
}
Self::Constant(constant) => {
let _ = constant.documentation.replace(new_documentation);
}
}
}
}
impl Module {
pub fn erlang_name(&self) -> EcoString {
module_erlang_name(&self.name)
}
pub fn compiled_erlang_path(&self) -> Utf8PathBuf {
let mut path = Utf8PathBuf::from(&module_erlang_name(&self.name));
assert!(path.set_extension("erl"), "Couldn't set file extension");
path
}
pub fn find_node(&self, byte_index: u32) -> Option<Located<'_>> {
self.ast.find_node(byte_index)
}
pub fn attach_doc_and_module_comments(&mut self) {
// Module Comments
self.ast.documentation = self
.extra
.module_comments
.iter()
.map(|span| Comment::from((span, self.code.as_str())).content.into())
.collect();
self.ast.type_info.documentation = self.ast.documentation.clone();
// Order definitions to avoid misassociating doc comments after the
// order has changed during compilation.
let TypedDefinitions {
imports,
constants,
custom_types,
type_aliases,
functions,
} = &mut self.ast.definitions;
let mut definitions = ((imports.iter_mut()).map(DocumentableDefinition::Import))
.chain((constants.iter_mut()).map(DocumentableDefinition::Constant))
.chain((custom_types.iter_mut()).map(DocumentableDefinition::CustomType))
.chain((type_aliases.iter_mut()).map(DocumentableDefinition::TypeAlias))
.chain((functions.iter_mut()).map(DocumentableDefinition::Function))
.sorted_by_key(|definition| definition.location())
.collect_vec();
// Doc Comments
let mut doc_comments = self.extra.doc_comments.iter().peekable();
for definition in &mut definitions {
let (docs_start, docs): (u32, Vec<&str>) = doc_comments_before(
&mut doc_comments,
&self.extra,
definition.location().start,
&self.code,
);
if !docs.is_empty() {
let doc = docs.join("\n").into();
definition.put_doc((docs_start, doc));
}
if let DocumentableDefinition::CustomType(CustomType { constructors, .. }) = definition
{
for constructor in constructors {
let (docs_start, docs): (u32, Vec<&str>) = doc_comments_before(
&mut doc_comments,
&self.extra,
constructor.location.start,
&self.code,
);
if !docs.is_empty() {
let doc = docs.join("\n").into();
constructor.put_doc((docs_start, doc));
}
for argument in constructor.arguments.iter_mut() {
let (docs_start, docs): (u32, Vec<&str>) = doc_comments_before(
&mut doc_comments,
&self.extra,
argument.location.start,
&self.code,
);
if !docs.is_empty() {
let doc = docs.join("\n").into();
argument.put_doc((docs_start, doc));
}
}
}
}
}
}
}
pub fn module_erlang_name(gleam_name: &EcoString) -> EcoString {
gleam_name.replace("/", "@")
}
#[derive(Debug, Clone, PartialEq)]
pub struct UnqualifiedImport<'a> {
pub name: &'a EcoString,
pub module: &'a EcoString,
pub is_type: bool,
pub location: &'a SrcSpan,
}
/// The position of a located expression. Used to determine extra context,
/// such as whether to provide label completions if the expression is in
/// argument position.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ExpressionPosition<'a> {
Expression,
ArgumentOrLabel {
called_function: &'a TypedExpr,
function_arguments: &'a [TypedCallArg],
},
}
#[derive(Debug, Clone, PartialEq)]
pub enum Located<'a> {
Pattern(&'a TypedPattern),
PatternSpread {
spread_location: SrcSpan,
pattern: &'a TypedPattern,
},
Statement(&'a TypedStatement),
Expression {
expression: &'a TypedExpr,
position: ExpressionPosition<'a>,
},
VariantConstructorDefinition(&'a TypedRecordConstructor),
FunctionBody(&'a TypedFunction),
Arg(&'a TypedArg),
Annotation {
ast: &'a TypeAst,
type_: std::sync::Arc<Type>,
},
UnqualifiedImport(UnqualifiedImport<'a>),
Label(SrcSpan, std::sync::Arc<Type>),
ModuleName {
location: SrcSpan,
name: &'a EcoString,
layer: ast::Layer,
},
Constant(&'a TypedConstant),
// A module's top level definitions
ModuleFunction(&'a TypedFunction),
ModuleConstant(&'a TypedModuleConstant),
ModuleImport(&'a TypedImport),
ModuleCustomType(&'a TypedCustomType),
ModuleTypeAlias(&'a TypedTypeAlias),
}
impl<'a> Located<'a> {
// Looks up the type constructor for the given type and then create the location.
fn type_location(
&self,
importable_modules: &'a im::HashMap<EcoString, type_::ModuleInterface>,
type_: std::sync::Arc<Type>,
) -> Option<DefinitionLocation> {
type_constructor_from_modules(importable_modules, type_).map(|t| DefinitionLocation {
module: Some(t.module.clone()),
span: t.origin,
})
}
pub fn definition_location(
&self,
importable_modules: &'a im::HashMap<EcoString, type_::ModuleInterface>,
) -> Option<DefinitionLocation> {
match self {
Self::PatternSpread { .. } => None,
Self::Pattern(pattern) => pattern.definition_location(),
Self::Statement(statement) => statement.definition_location(),
Self::FunctionBody(statement) => None,
Self::Expression { expression, .. } => expression.definition_location(),
Self::ModuleImport(import) => Some(DefinitionLocation {
module: Some(import.module.clone()),
span: SrcSpan { start: 0, end: 0 },
}),
Self::ModuleConstant(constant) => Some(DefinitionLocation {
module: None,
span: constant.location,
}),
Self::ModuleCustomType(custom_type) => Some(DefinitionLocation {
module: None,
span: custom_type.location,
}),
Self::ModuleFunction(function) => Some(DefinitionLocation {
module: None,
span: function.location,
}),
Self::ModuleTypeAlias(type_alias) => Some(DefinitionLocation {
module: None,
span: type_alias.location,
}),
Self::VariantConstructorDefinition(record) => Some(DefinitionLocation {
module: None,
span: record.location,
}),
Self::UnqualifiedImport(UnqualifiedImport {
module,
name,
is_type,
..
}) => importable_modules.get(*module).and_then(|m| {
if *is_type {
m.types.get(*name).map(|t| DefinitionLocation {
module: Some((*module).clone()),
span: t.origin,
})
} else {
m.values.get(*name).map(|v| DefinitionLocation {
module: Some((*module).clone()),
span: v.definition_location().span,
})
}
}),
Self::Arg(_) => None,
Self::Annotation { type_, .. } => self.type_location(importable_modules, type_.clone()),
Self::Label(_, _) => None,
Self::ModuleName { name, .. } => Some(DefinitionLocation {
module: Some((*name).clone()),
span: SrcSpan::new(0, 0),
}),
Self::Constant(constant) => constant.definition_location(),
}
}
pub(crate) fn type_(&self) -> Option<Arc<Type>> {
match self {
Located::Pattern(pattern) => Some(pattern.type_()),
Located::Statement(statement) => Some(statement.type_()),
Located::Expression { expression, .. } => Some(expression.type_()),
Located::Arg(arg) => Some(arg.type_.clone()),
Located::Label(_, type_) | Located::Annotation { type_, .. } => Some(type_.clone()),
Located::Constant(constant) => Some(constant.type_()),
Located::PatternSpread { .. }
| Located::ModuleConstant(_)
| Located::ModuleCustomType(_)
| Located::ModuleFunction(_)
| Located::ModuleImport(_)
| Located::ModuleTypeAlias(_)
| Located::VariantConstructorDefinition(_)
| Located::FunctionBody(_)
| Located::UnqualifiedImport(_)
| Located::ModuleName { .. } => None,
}
}
pub fn type_definition_locations(
&self,
importable_modules: &im::HashMap<EcoString, type_::ModuleInterface>,
) -> Option<Vec<DefinitionLocation>> {
let type_ = self.type_()?;
Some(type_to_definition_locations(type_, importable_modules))
}
}
/// Returns the locations of all the types that one could reach starting from
/// the given type (included). This includes all types that are part of a
/// tuple/function type or that are used as args in a named type.
///
/// For example, given this type `Dict(Int, #(Wibble, Wobble))` all the
/// "reachable" include: `Dict`, `Int`, `Wibble` and `Wobble`.
///
/// This is what powers the "go to type definition" capability of the language
/// server.
///
fn type_to_definition_locations<'a>(
type_: Arc<Type>,
importable_modules: &'a im::HashMap<EcoString, type_::ModuleInterface>,
) -> Vec<DefinitionLocation> {
match type_.as_ref() {
// For named types we start with the location of the named type itself
// followed by the locations of all types they reference in their args.
//
// For example with a `Dict(Wibble, Wobble)` we'd start with the
// definition of `Dict`, followed by the definition of `Wibble` and
// `Wobble`.
//
Type::Named {
module,
name,
arguments,
..
} => {
let Some(module) = importable_modules.get(module) else {
return vec![];
};
let Some(type_) = module.get_public_type(&name) else {
return vec![];
};
let mut locations = vec![DefinitionLocation {
module: Some(module.name.clone()),
span: type_.origin,
}];
for argument in arguments {
locations.extend(type_to_definition_locations(
argument.clone(),
importable_modules,
));
}
locations
}
// For fn types we just get the locations of their arguments and return
// type.
//
Type::Fn { arguments, return_ } => arguments
.iter()
.flat_map(|argument| type_to_definition_locations(argument.clone(), importable_modules))
.chain(type_to_definition_locations(
return_.clone(),
importable_modules,
))
.collect_vec(),
// In case of a var we just follow it and get the locations of the type
// it points to.
//
Type::Var { type_ } => match type_.borrow().clone() {
type_::TypeVar::Unbound { .. } | type_::TypeVar::Generic { .. } => vec![],
type_::TypeVar::Link { type_ } => {
type_to_definition_locations(type_, importable_modules)
}
},
// In case of tuples we get the locations of the wrapped types.
//
Type::Tuple { elements } => elements
.iter()
.flat_map(|element| type_to_definition_locations(element.clone(), importable_modules))
.collect_vec(),
}
}
// Looks up the type constructor for the given type
pub fn type_constructor_from_modules(
importable_modules: &im::HashMap<EcoString, type_::ModuleInterface>,
type_: std::sync::Arc<Type>,
) -> Option<&type_::TypeConstructor> {
let type_ = type_::collapse_links(type_);
match type_.as_ref() {
Type::Named { name, module, .. } => importable_modules
.get(module)
.and_then(|i| i.types.get(name)),
_ => None,
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Origin {
Src,
Test,
Dev,
}
impl Origin {
/// Returns `true` if the origin is [`Src`].
///
/// [`Src`]: Origin::Src
#[must_use]
pub fn is_src(&self) -> bool {
matches!(self, Self::Src)
}
/// Returns `true` if the origin is [`Test`].
///
/// [`Test`]: Origin::Test
#[must_use]
pub fn is_test(&self) -> bool {
matches!(self, Self::Test)
}
/// Returns `true` if the origin is [`Dev`].
///
/// [`Dev`]: Origin::Dev
#[must_use]
pub fn is_dev(&self) -> bool {
matches!(self, Self::Dev)
}
/// Name of the folder containing the origin.
#[must_use]
pub fn folder_name(&self) -> &str {
match self {
Origin::Src => "src",
Origin::Test => "test",
Origin::Dev => "dev",
}
}
}
fn doc_comments_before<'a>(
doc_comments_spans: &mut Peekable<impl Iterator<Item = &'a SrcSpan>>,
extra: &ModuleExtra,
byte: u32,
src: &'a str,
) -> (u32, Vec<&'a str>) {
let mut comments = vec![];
let mut comment_start = u32::MAX;
while let Some(SrcSpan { start, end }) = doc_comments_spans.peek() {
if start > &byte {
break;
}
if extra.has_comment_between(*end, byte) {
// We ignore doc comments that come before a regular comment.
_ = doc_comments_spans.next();
continue;
}
let comment = doc_comments_spans
.next()
.expect("Comment before accessing next span");
if comment.start < comment_start {
comment_start = comment.start;
}
comments.push(Comment::from((comment, src)).content)
}
(comment_start, comments)
}
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub struct SourceFingerprint(u64);
impl SourceFingerprint {
pub fn new(source: &str) -> Self {
SourceFingerprint(xxhash_rust::xxh3::xxh3_64(source.as_bytes()))
}
}
/// Like a `Result`, but the operation can partially succeed or fail.
///
#[derive(Debug)]
pub enum Outcome<T, E> {
/// The operation was totally succesful.
Ok(T),
/// The operation was partially successful but there were problems.
PartialFailure(T, E),
/// The operation was entirely unsuccessful.
TotalFailure(E),
}
impl<T, E> Outcome<T, E>
where
E: Debug,
{
#[cfg(test)]
/// Panic if there's any errors
pub fn unwrap(self) -> T {
match self {
Outcome::Ok(t) => t,
Outcome::PartialFailure(_, errors) => panic!("Error: {:?}", errors),
Outcome::TotalFailure(error) => panic!("Error: {:?}", error),
}
}
/// Panic if there's any errors
pub fn expect(self, e: &'static str) -> T {
match self {
Outcome::Ok(t) => t,
Outcome::PartialFailure(_, errors) => panic!("{e}: {:?}", errors),
Outcome::TotalFailure(error) => panic!("{e}: {:?}", error),
}
}
pub fn into_result(self) -> Result<T, E> {
match self {
Outcome::Ok(t) => Ok(t),
Outcome::PartialFailure(_, e) | Outcome::TotalFailure(e) => Err(e),
}
}
pub fn map<T2>(self, f: impl FnOnce(T) -> T2) -> Outcome<T2, E> {
match self {
Outcome::Ok(t) => Outcome::Ok(f(t)),
Outcome::PartialFailure(t, e) => Outcome::PartialFailure(f(t), e),
Outcome::TotalFailure(e) => Outcome::TotalFailure(e),
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/dep_tree.rs | compiler-core/src/dep_tree.rs | use ecow::EcoString;
use petgraph::{Direction, algo::Cycle, graph::NodeIndex};
use std::collections::{HashMap, HashSet};
#[cfg(test)]
use pretty_assertions::assert_eq;
/// Take a sequence of values and their deps, and return the values in
/// order so that deps come before the dependants.
///
/// Any deps that are not nodes are ignored and presumed to be nodes
/// that do not need processing.
///
/// Errors if there are duplicate values, unknown deps, or cycles.
///
pub fn toposort_deps(inputs: Vec<(EcoString, Vec<EcoString>)>) -> Result<Vec<EcoString>, Error> {
let mut graph = petgraph::Graph::<(), ()>::with_capacity(inputs.len(), inputs.len() * 5);
let mut values = HashMap::with_capacity(inputs.len());
let mut indexes = HashMap::with_capacity(inputs.len());
for (value, _deps) in &inputs {
let index = graph.add_node(());
let _ = indexes.insert(value.clone(), index);
let _ = values.insert(index, value.clone());
}
for (value, deps) in inputs {
let &from_index = indexes.get(&value).expect("Finding index for value");
for &to_index in deps.into_iter().filter_map(|dep| indexes.get(&dep)) {
let _ = graph.add_edge(from_index, to_index, ());
}
}
match petgraph::algo::toposort(&graph, None) {
Err(e) => Err(Error::Cycle(import_cycle(e, &graph, values))),
Ok(seq) => Ok(seq
.into_iter()
.map(|i| values.remove(&i).expect("Finding value for index"))
.rev()
.collect()),
}
}
fn import_cycle(
cycle: Cycle<NodeIndex>,
graph: &petgraph::Graph<(), ()>,
mut values: HashMap<NodeIndex, EcoString>,
) -> Vec<EcoString> {
let origin = cycle.node_id();
let mut path = vec![];
let _ = find_cycle(origin, origin, graph, &mut path, &mut HashSet::new());
path.iter()
.map(|index| {
values
.remove(index)
.expect("dep_tree::import_cycle(): cannot find values for index")
})
.collect()
}
fn find_cycle(
origin: NodeIndex,
parent: NodeIndex,
graph: &petgraph::Graph<(), ()>,
path: &mut Vec<NodeIndex>,
seen: &mut HashSet<NodeIndex>,
) -> bool {
let _ = seen.insert(parent);
for node in graph.neighbors_directed(parent, Direction::Outgoing) {
if node == origin {
path.push(node);
return true;
}
if seen.contains(&node) {
continue;
}
if find_cycle(origin, node, graph, path, seen) {
path.push(node);
return true;
}
}
false
}
#[derive(Debug, PartialEq)]
pub enum Error {
Cycle(Vec<EcoString>),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn toposort_deps_test() {
// All deps are nodes
assert_eq!(
toposort_deps(vec![
("a".into(), vec!["b".into()]),
("c".into(), vec![]),
("b".into(), vec!["c".into()])
]),
Ok(vec!["c".into(), "b".into(), "a".into()])
);
// No deps
assert_eq!(
toposort_deps(vec![
("no-deps-1".into(), vec![]),
("no-deps-2".into(), vec![])
]),
Ok(vec!["no-deps-1".into(), "no-deps-2".into(),])
);
// Some deps are not nodes (and thus are ignored)
assert_eq!(
toposort_deps(vec![
("a".into(), vec!["b".into(), "z".into()]),
("b".into(), vec!["x".into()])
]),
Ok(vec!["b".into(), "a".into()])
);
}
#[test]
fn cycle_detection() {
// a ---+
// ^ |
// | v
// +----+
assert_eq!(
toposort_deps(vec![("a".into(), vec!["a".into()])]),
Err(Error::Cycle(vec!["a".into()]))
);
// a -> b -> c
// ^ v
// | |
// +---------+
assert_eq!(
toposort_deps(vec![
("a".into(), vec!["b".into()]),
("b".into(), vec!["c".into()]),
("c".into(), vec!["a".into()]),
]),
Err(Error::Cycle(vec!["c".into(), "b".into(), "a".into()]))
);
// a -> b <- e
// | | ^
// v v |
// f c -> d
assert_eq!(
toposort_deps(vec![
("a".into(), vec!["b".into()]),
("b".into(), vec!["c".into()]),
("c".into(), vec!["d".into()]),
("d".into(), vec!["e".into()]),
("e".into(), vec!["b".into()]),
("a".into(), vec!["f".into()]),
]),
Err(Error::Cycle(vec![
"e".into(),
"d".into(),
"c".into(),
"b".into(),
]))
);
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/format.rs | compiler-core/src/format.rs | #[cfg(test)]
mod tests;
use crate::{
Error, Result,
ast::{
CustomType, Import, ModuleConstant, TypeAlias, TypeAstConstructor, TypeAstFn, TypeAstHole,
TypeAstTuple, TypeAstVar, *,
},
build::Target,
docvec,
io::Utf8Writer,
parse::extra::{Comment, ModuleExtra},
pretty::{self, *},
warning::WarningEmitter,
};
use ecow::{EcoString, eco_format};
use itertools::Itertools;
use std::cmp::Ordering;
use vec1::Vec1;
use crate::type_::Deprecation;
use camino::Utf8Path;
const INDENT: isize = 2;
pub fn pretty(writer: &mut impl Utf8Writer, src: &EcoString, path: &Utf8Path) -> Result<()> {
let parsed = crate::parse::parse_module(path.to_owned(), src, &WarningEmitter::null())
.map_err(|error| Error::Parse {
path: path.to_path_buf(),
src: src.clone(),
error: Box::new(error),
})?;
let intermediate = Intermediate::from_extra(&parsed.extra, src);
Formatter::with_comments(&intermediate)
.module(&parsed.module)
.pretty_print(80, writer)
}
pub(crate) struct Intermediate<'a> {
comments: Vec<Comment<'a>>,
doc_comments: Vec<Comment<'a>>,
module_comments: Vec<Comment<'a>>,
empty_lines: &'a [u32],
new_lines: &'a [u32],
trailing_commas: &'a [u32],
}
impl<'a> Intermediate<'a> {
pub fn from_extra(extra: &'a ModuleExtra, src: &'a EcoString) -> Intermediate<'a> {
Intermediate {
comments: extra
.comments
.iter()
.map(|span| Comment::from((span, src)))
.collect(),
doc_comments: extra
.doc_comments
.iter()
.map(|span| Comment::from((span, src)))
.collect(),
empty_lines: &extra.empty_lines,
module_comments: extra
.module_comments
.iter()
.map(|span| Comment::from((span, src)))
.collect(),
new_lines: &extra.new_lines,
trailing_commas: &extra.trailing_commas,
}
}
}
#[derive(Debug)]
enum FnCapturePosition {
RightHandSideOfPipe,
EverywhereElse,
}
#[derive(Debug)]
/// One of the pieces making a record update arg list: it could be the starting
/// record being updated, or one of the subsequent arguments.
///
enum RecordUpdatePiece<'a, A> {
Record(&'a RecordBeingUpdated<A>),
Argument(&'a RecordUpdateArg<A>),
}
impl<A> HasLocation for RecordUpdatePiece<'_, A> {
fn location(&self) -> SrcSpan {
match self {
RecordUpdatePiece::Record(record) => record.location,
RecordUpdatePiece::Argument(arg) => arg.location,
}
}
}
type UntypedRecordUpdatePiece<'a> = RecordUpdatePiece<'a, UntypedExpr>;
/// Hayleigh's bane
#[derive(Debug, Clone, Default)]
pub struct Formatter<'a> {
comments: &'a [Comment<'a>],
doc_comments: &'a [Comment<'a>],
module_comments: &'a [Comment<'a>],
empty_lines: &'a [u32],
new_lines: &'a [u32],
trailing_commas: &'a [u32],
}
impl<'comments> Formatter<'comments> {
pub fn new() -> Self {
Default::default()
}
pub(crate) fn with_comments(extra: &'comments Intermediate<'comments>) -> Self {
Self {
comments: &extra.comments,
doc_comments: &extra.doc_comments,
module_comments: &extra.module_comments,
empty_lines: extra.empty_lines,
new_lines: extra.new_lines,
trailing_commas: extra.trailing_commas,
}
}
fn any_comments(&self, limit: u32) -> bool {
self.comments
.first()
.is_some_and(|comment| comment.start < limit)
}
fn any_empty_lines(&self, limit: u32) -> bool {
self.empty_lines.first().is_some_and(|line| *line < limit)
}
/// Pop comments that occur before a byte-index in the source, consuming
/// and retaining any empty lines contained within.
/// Returns an iterator of comments with their start position.
fn pop_comments_with_position(
&mut self,
limit: u32,
) -> impl Iterator<Item = (u32, Option<&'comments str>)> + use<'comments> {
let (popped, rest, empty_lines) =
comments_before(self.comments, self.empty_lines, limit, true);
self.comments = rest;
self.empty_lines = empty_lines;
popped
}
/// Pop comments that occur before a byte-index in the source, consuming
/// and retaining any empty lines contained within.
fn pop_comments(
&mut self,
limit: u32,
) -> impl Iterator<Item = Option<&'comments str>> + use<'comments> {
self.pop_comments_with_position(limit)
.map(|(_position, comment)| comment)
}
/// Pop doc comments that occur before a byte-index in the source, consuming
/// and dropping any empty lines contained within.
fn pop_doc_comments(
&mut self,
limit: u32,
) -> impl Iterator<Item = Option<&'comments str>> + use<'comments> {
let (popped, rest, empty_lines) =
comments_before(self.doc_comments, self.empty_lines, limit, false);
self.doc_comments = rest;
self.empty_lines = empty_lines;
popped.map(|(_position, comment)| comment)
}
/// Remove between 0 and `limit` empty lines following the current position,
/// returning true if any empty lines were removed.
fn pop_empty_lines(&mut self, limit: u32) -> bool {
let mut end = 0;
for (i, &position) in self.empty_lines.iter().enumerate() {
if position > limit {
break;
}
end = i + 1;
}
self.empty_lines = self
.empty_lines
.get(end..)
.expect("Pop empty lines slicing");
end != 0
}
fn targeted_definition<'a>(&mut self, definition: &'a TargetedDefinition) -> Document<'a> {
let target = definition.target;
let definition = &definition.definition;
let start = definition.location().start;
let comments = self.pop_comments_with_position(start);
let comments = self.printed_documented_comments(comments);
let document = self.documented_definition(definition);
let document = match target {
None => document,
Some(Target::Erlang) => docvec!["@target(erlang)", line(), document],
Some(Target::JavaScript) => docvec!["@target(javascript)", line(), document],
};
comments.to_doc().append(document.group())
}
pub(crate) fn module<'a>(&mut self, module: &'a UntypedModule) -> Document<'a> {
let mut documents = vec![];
let mut previous_was_a_definition = false;
// Here we take consecutive groups of imports so that they can be sorted
// alphabetically.
for (is_import_group, definitions) in &module
.definitions
.iter()
.chunk_by(|definition| definition.definition.is_import())
{
if is_import_group {
if previous_was_a_definition {
documents.push(lines(2));
}
documents.append(&mut self.imports(definitions.collect_vec()));
previous_was_a_definition = false;
} else {
for definition in definitions {
if !documents.is_empty() {
documents.push(lines(2));
}
documents.push(self.targeted_definition(definition));
}
previous_was_a_definition = true;
}
}
let definitions = concat(documents);
// Now that definitions has been collected, only freestanding comments (//)
// and doc comments (///) remain. Freestanding comments aren't associated
// with any statement, and are moved to the bottom of the module.
let doc_comments = join(
self.doc_comments
.iter()
.map(|comment| "///".to_doc().append(EcoString::from(comment.content))),
line(),
);
let comments = match printed_comments(self.pop_comments(u32::MAX), false) {
Some(comments) => comments,
None => nil(),
};
let module_comments = if !self.module_comments.is_empty() {
let comments = self
.module_comments
.iter()
.map(|s| "////".to_doc().append(EcoString::from(s.content)));
join(comments, line()).append(line())
} else {
nil()
};
let non_empty = vec![module_comments, definitions, doc_comments, comments]
.into_iter()
.filter(|doc| !doc.is_empty());
join(non_empty, line()).append(line())
}
/// Separates the imports in groups delimited by comments or empty lines and
/// sorts each group alphabetically.
///
/// The formatter needs to play nicely with import groups defined by the
/// programmer. If one puts a comment before an import then that's a clue
/// for the formatter that it has run into a gorup of related imports.
///
/// So we can't just sort `imports` and format each one, we have to be a
/// bit smarter and see if each import is preceded by a comment.
/// Once we find a comment we know we're done with the current import
/// group and a new one has started.
///
/// ```gleam
/// // This is an import group.
/// import gleam/int
/// import gleam/string
///
/// // This marks the beginning of a new import group that can't
/// // be mushed together with the previous one!
/// import wibble
/// import wobble
/// ```
fn imports<'a>(&mut self, imports: Vec<&'a TargetedDefinition>) -> Vec<Document<'a>> {
let mut import_groups_docs = vec![];
let mut current_group = vec![];
let mut current_group_delimiter = nil();
for import in imports {
let start = import.definition.location().start;
// We need to start a new group if the `import` is preceded by one or
// more empty lines or a `//` comment.
let start_new_group = self.any_comments(start) || self.any_empty_lines(start);
if start_new_group {
// First we print the previous group and clear it out to start a
// new empty group containing the import we've just ran into.
if !current_group.is_empty() {
import_groups_docs.push(docvec![
current_group_delimiter,
self.sorted_import_group(¤t_group)
]);
current_group.clear();
}
// Now that we've taken care of the previous group we can start
// the new one. We know it's preceded either by an empty line or
// some comments se we have to be a bit more precise and save the
// actual delimiter that we're going to put at the top of this
// group.
let comments = self.pop_comments(start);
let _ = self.pop_empty_lines(start);
current_group_delimiter = printed_comments(comments, true).unwrap_or(nil());
}
// Lastly we add the import to the group.
current_group.push(import);
}
// Let's not forget about the last import group!
if !current_group.is_empty() {
import_groups_docs.push(docvec![
current_group_delimiter,
self.sorted_import_group(¤t_group)
]);
}
// We want all consecutive import groups to be separated by an empty line.
// This should really be `.intersperse(line())` but I can't do that
// because of https://github.com/rust-lang/rust/issues/48919.
Itertools::intersperse(import_groups_docs.into_iter(), lines(2)).collect_vec()
}
/// Prints the imports as a single sorted group of import statements.
///
fn sorted_import_group<'a>(&mut self, imports: &[&'a TargetedDefinition]) -> Document<'a> {
let imports = imports
.iter()
.sorted_by(|one, other| match (&one.definition, &other.definition) {
(Definition::Import(one), Definition::Import(other)) => {
one.module.cmp(&other.module)
}
// It shouldn't really be possible for a non import to be here so
// we just return a default value.
_ => Ordering::Equal,
})
.map(|import| self.targeted_definition(import));
// This should really be `.intersperse(line())` but I can't do that
// because of https://github.com/rust-lang/rust/issues/48919.
Itertools::intersperse(imports, line())
.collect_vec()
.to_doc()
}
fn definition<'a>(&mut self, statement: &'a UntypedDefinition) -> Document<'a> {
match statement {
Definition::Function(function) => self.statement_fn(function),
Definition::TypeAlias(alias) => self.type_alias(alias),
Definition::CustomType(ct) => self.custom_type(ct),
Definition::Import(Import {
module,
as_name,
unqualified_values,
unqualified_types,
documentation: _,
location: _,
package: _,
}) => {
let second = if unqualified_values.is_empty() && unqualified_types.is_empty() {
nil()
} else {
let unqualified_types = unqualified_types
.iter()
.sorted_by(|a, b| a.name.cmp(&b.name))
.map(|type_| docvec!["type ", type_]);
let unqualified_values = unqualified_values
.iter()
.sorted_by(|a, b| a.name.cmp(&b.name))
.map(|value| value.to_doc());
let unqualified = join(
unqualified_types.chain(unqualified_values),
flex_break(",", ", "),
);
let unqualified = break_("", "")
.append(unqualified)
.nest(INDENT)
.append(break_(",", ""))
.group();
".{".to_doc().append(unqualified).append("}")
};
let doc = docvec!["import ", module.as_str(), second];
let default_module_access_name = module.split('/').next_back().map(EcoString::from);
match (default_module_access_name, as_name) {
// If the `as name` is the same as the module name that would be
// used anyways we won't render it. For example:
// ```gleam
// import gleam/int as int
// ^^^^^^ this is redundant and removed
// ```
(Some(module_name), Some((AssignName::Variable(name), _)))
if &module_name == name =>
{
doc
}
(_, None) => doc,
(_, Some((AssignName::Variable(name) | AssignName::Discard(name), _))) => {
doc.append(" as ").append(name)
}
}
}
Definition::ModuleConstant(ModuleConstant {
publicity,
name,
annotation,
value,
deprecation,
documentation: _,
location: _,
name_location: _,
type_: _,
implementations: _,
}) => {
let attributes = AttributesPrinter::new()
.set_internal(*publicity)
.set_deprecation(deprecation)
.to_doc();
let head = attributes
.append(pub_(*publicity))
.append("const ")
.append(name.as_str());
let head = match annotation {
None => head,
Some(t) => head.append(": ").append(self.type_ast(t)),
};
head.append(" = ").append(self.const_expr(value))
}
}
}
fn const_expr<'a, A, B>(&mut self, value: &'a Constant<A, B>) -> Document<'a> {
let comments = self.pop_comments(value.location().start);
let document = match value {
Constant::Int { value, .. } => self.int(value),
Constant::Float { value, .. } => self.float(value),
Constant::String { value, .. } => self.string(value),
Constant::List {
elements, location, ..
} => self.const_list(elements, location),
Constant::Tuple {
elements, location, ..
} => self.const_tuple(elements, location),
Constant::BitArray {
segments, location, ..
} => {
let segment_docs = segments
.iter()
.map(|segment| bit_array_segment(segment, |e| self.const_expr(e)))
.collect_vec();
let packing = self.items_sequence_packing(
segments,
None,
|segment| segment.value.can_have_multiple_per_line(),
*location,
);
self.bit_array(segment_docs, packing, location)
}
Constant::Record {
name,
arguments,
module: None,
..
} if arguments.is_empty() => name.to_doc(),
Constant::Record {
name,
arguments,
module: Some((m, _)),
..
} if arguments.is_empty() => m.to_doc().append(".").append(name.as_str()),
Constant::Record {
name,
arguments,
module: None,
location,
..
} => {
let arguments = arguments
.iter()
.map(|argument| self.constant_call_arg(argument))
.collect_vec();
name.to_doc()
.append(self.wrap_arguments(arguments, location.end))
.group()
}
Constant::Record {
name,
arguments,
module: Some((m, _)),
location,
..
} => {
let arguments = arguments
.iter()
.map(|argument| self.constant_call_arg(argument))
.collect_vec();
m.to_doc()
.append(".")
.append(name.as_str())
.append(self.wrap_arguments(arguments, location.end))
.group()
}
Constant::Var {
name, module: None, ..
} => name.to_doc(),
Constant::Var {
name,
module: Some((module, _)),
..
} => docvec![module, ".", name],
Constant::StringConcatenation { left, right, .. } => self
.const_expr(left)
.append(break_("", " ").append("<>".to_doc()))
.nest(INDENT)
.append(" ")
.append(self.const_expr(right)),
Constant::RecordUpdate {
module,
name,
record,
arguments,
location,
..
} => self.const_record_update(module, name, record, arguments, location),
Constant::Invalid { .. } => panic!("invalid constants can not be in an untyped ast"),
};
commented(document, comments)
}
fn const_list<'a, A, B>(
&mut self,
elements: &'a [Constant<A, B>],
location: &SrcSpan,
) -> Document<'a> {
if elements.is_empty() {
// We take all comments that come _before_ the end of the list,
// that is all comments that are inside "[" and "]", if there's
// any comment we want to put it inside the empty list!
return match printed_comments(self.pop_comments(location.end), false) {
None => "[]".to_doc(),
Some(comments) => "["
.to_doc()
.append(break_("", "").nest(INDENT))
.append(comments)
.append(break_("", ""))
.append("]")
// vvv We want to make sure the comments are on a separate
// line from the opening and closing brackets so we
// force the breaks to be split on newlines.
.force_break(),
};
}
let list_packing = self.items_sequence_packing(
elements,
None,
|element| element.can_have_multiple_per_line(),
*location,
);
let comma = match list_packing {
ItemsPacking::FitMultiplePerLine => flex_break(",", ", "),
ItemsPacking::FitOnePerLine | ItemsPacking::BreakOnePerLine => break_(",", ", "),
};
let mut elements_doc = nil();
for element in elements.iter() {
let empty_lines = self.pop_empty_lines(element.location().start);
let element_doc = self.const_expr(element);
elements_doc = if elements_doc.is_empty() {
element_doc
} else if empty_lines {
// If there's empty lines before the list item we want to add an
// empty line here. Notice how we're making sure no nesting is
// added after the comma, otherwise we would be adding needless
// whitespace in the empty line!
docvec![
elements_doc,
comma.clone().set_nesting(0),
line(),
element_doc
]
} else {
docvec![elements_doc, comma.clone(), element_doc]
};
}
elements_doc = elements_doc.next_break_fits(NextBreakFitsMode::Disabled);
let doc = break_("[", "[").append(elements_doc).nest(INDENT);
// We get all remaining comments that come before the list's closing
// square bracket.
// If there's any we add those before the closing square bracket instead
// of moving those out of the list.
// Otherwise those would be moved out of the list.
let comments = self.pop_comments(location.end);
let doc = match printed_comments(comments, false) {
None => doc.append(break_(",", "")).append("]"),
Some(comment) => doc
.append(break_(",", "").nest(INDENT))
// ^ See how here we're adding the missing indentation to the
// final break so that the final comment is as indented as the
// list's items.
.append(comment)
.append(line())
.append("]")
.force_break(),
};
match list_packing {
ItemsPacking::FitOnePerLine | ItemsPacking::FitMultiplePerLine => doc.group(),
ItemsPacking::BreakOnePerLine => doc.force_break(),
}
}
pub fn const_tuple<'a, A, B>(
&mut self,
elements: &'a [Constant<A, B>],
location: &SrcSpan,
) -> Document<'a> {
if elements.is_empty() {
// We take all comments that come _before_ the end of the tuple,
// that is all comments that are inside "#(" and ")", if there's
// any comment we want to put it inside the empty list!
return match printed_comments(self.pop_comments(location.end), false) {
None => "#()".to_doc(),
Some(comments) => "#("
.to_doc()
.append(break_("", "").nest(INDENT))
.append(comments)
.append(break_("", ""))
.append(")")
// vvv We want to make sure the comments are on a separate
// line from the opening and closing parentheses so we
// force the breaks to be split on newlines.
.force_break(),
};
}
let arguments_docs = elements.iter().map(|element| self.const_expr(element));
let tuple_doc = break_("#(", "#(")
.append(
join(arguments_docs, break_(",", ", "))
.next_break_fits(NextBreakFitsMode::Disabled),
)
.nest(INDENT);
let comments = self.pop_comments(location.end);
match printed_comments(comments, false) {
None => tuple_doc.append(break_(",", "")).append(")").group(),
Some(comments) => tuple_doc
.append(break_(",", "").nest(INDENT))
.append(comments)
.append(line())
.append(")")
.force_break(),
}
}
fn documented_definition<'a>(&mut self, s: &'a UntypedDefinition) -> Document<'a> {
let comments = self.doc_comments(s.location().start);
comments.append(self.definition(s).group()).group()
}
fn doc_comments<'a>(&mut self, limit: u32) -> Document<'a> {
let mut comments = self.pop_doc_comments(limit).peekable();
match comments.peek() {
None => nil(),
Some(_) => join(
comments.map(|c| match c {
Some(c) => "///".to_doc().append(EcoString::from(c)),
None => unreachable!("empty lines dropped by pop_doc_comments"),
}),
line(),
)
.append(line())
.force_break(),
}
}
fn type_ast_constructor<'a>(
&mut self,
module: &'a Option<(EcoString, SrcSpan)>,
name: &'a str,
arguments: &'a [TypeAst],
location: &SrcSpan,
_name_location: &SrcSpan,
) -> Document<'a> {
let head = module
.as_ref()
.map(|(qualifier, _)| qualifier.to_doc().append(".").append(name))
.unwrap_or_else(|| name.to_doc());
if arguments.is_empty() {
head
} else {
head.append(self.type_arguments(arguments, location))
}
}
fn type_ast<'a>(&mut self, t: &'a TypeAst) -> Document<'a> {
match t {
TypeAst::Hole(TypeAstHole { name, .. }) => name.to_doc(),
TypeAst::Constructor(TypeAstConstructor {
name,
arguments,
module,
location,
name_location,
start_parentheses: _,
}) => self.type_ast_constructor(module, name, arguments, location, name_location),
TypeAst::Fn(TypeAstFn {
arguments,
return_,
location,
}) => "fn"
.to_doc()
.append(self.type_arguments(arguments, location))
.group()
.append(" ->")
.append(break_("", " ").append(self.type_ast(return_)).nest(INDENT)),
TypeAst::Var(TypeAstVar { name, .. }) => name.to_doc(),
TypeAst::Tuple(TypeAstTuple { elements, location }) => {
"#".to_doc().append(self.type_arguments(elements, location))
}
}
.group()
}
fn type_arguments<'a>(&mut self, arguments: &'a [TypeAst], location: &SrcSpan) -> Document<'a> {
let arguments = arguments
.iter()
.map(|type_| self.type_ast(type_))
.collect_vec();
self.wrap_arguments(arguments, location.end)
}
pub fn type_alias<'a, A>(&mut self, alias: &'a TypeAlias<A>) -> Document<'a> {
let TypeAlias {
alias: name,
parameters: arguments,
type_ast: type_,
publicity,
deprecation,
location,
name_location: _,
type_: _,
documentation: _,
} = alias;
let attributes = AttributesPrinter::new()
.set_deprecation(deprecation)
.set_internal(*publicity)
.to_doc();
let head = docvec![attributes, pub_(*publicity), "type ", name];
let head = if arguments.is_empty() {
head
} else {
let arguments = arguments.iter().map(|(_, e)| e.to_doc()).collect_vec();
head.append(self.wrap_arguments(arguments, location.end).group())
};
head.append(" =")
.append(line().append(self.type_ast(type_)).group().nest(INDENT))
}
fn fn_arg<'a, A>(&mut self, arg: &'a Arg<A>) -> Document<'a> {
let comments = self.pop_comments(arg.location.start);
let doc = match &arg.annotation {
None => arg.names.to_doc(),
Some(a) => arg.names.to_doc().append(": ").append(self.type_ast(a)),
}
.group();
commented(doc, comments)
}
fn statement_fn<'a>(&mut self, function: &'a UntypedFunction) -> Document<'a> {
let Function {
location,
body_start: _,
end_position,
name,
arguments,
body,
publicity,
deprecation,
return_annotation,
return_type: _,
documentation: _,
external_erlang,
external_javascript,
implementations: _,
purity: _,
} = function;
let attributes = AttributesPrinter::new()
.set_deprecation(deprecation)
.set_internal(*publicity)
.set_external_erlang(external_erlang)
.set_external_javascript(external_javascript)
.to_doc();
// Fn name and args
let arguments = arguments
.iter()
.map(|argument| self.fn_arg(argument))
.collect_vec();
let signature = pub_(*publicity)
.append("fn ")
.append(
&name
.as_ref()
.expect("Function in a statement must be named")
.1,
)
.append(
self.wrap_arguments(
arguments,
// Calculate end location of arguments to not consume comments in
// return annotation
return_annotation
.as_ref()
.map_or(location.end, |ann| ann.location().start),
),
);
// Add return annotation
let signature = match &return_annotation {
Some(anno) => signature.append(" -> ").append(self.type_ast(anno)),
None => signature,
}
.group();
if body.is_empty() {
return attributes.append(signature);
}
let head = attributes.append(signature);
// Format body
let body = self.statements(body);
// Add any trailing comments
let body = match printed_comments(self.pop_comments(*end_position), false) {
Some(comments) => body.append(line()).append(comments),
None => body,
};
// Stick it all together
head.append(" {")
.append(line().append(body).nest(INDENT).group())
.append(line())
.append("}")
}
fn expr_fn<'a>(
&mut self,
arguments: &'a [UntypedArg],
return_annotation: Option<&'a TypeAst>,
body: &'a Vec1<UntypedStatement>,
location: &SrcSpan,
end_of_head_byte_index: &u32,
) -> Document<'a> {
let arguments_docs = arguments
.iter()
.map(|argument| self.fn_arg(argument))
.collect_vec();
let arguments = self
.wrap_arguments(arguments_docs, *end_of_head_byte_index)
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/bit_array.rs | compiler-core/src/bit_array.rs | use ecow::EcoString;
use num_bigint::BigInt;
use crate::ast::{self, BitArrayOption, SrcSpan};
use crate::build::Target;
use crate::type_::Type;
use std::sync::Arc;
//
// Public Interface
//
pub fn type_options_for_value<TypedValue>(
input_options: &[BitArrayOption<TypedValue>],
target: Target,
) -> Result<Arc<Type>, Error>
where
TypedValue: GetLiteralValue,
{
type_options(input_options, TypeOptionsMode::Expression, false, target)
}
pub fn type_options_for_pattern<TypedValue>(
input_options: &[BitArrayOption<TypedValue>],
must_have_size: bool,
target: Target,
) -> Result<Arc<Type>, Error>
where
TypedValue: GetLiteralValue,
{
type_options(
input_options,
TypeOptionsMode::Pattern,
must_have_size,
target,
)
}
struct SegmentOptionCategories<'a, T> {
type_: Option<&'a BitArrayOption<T>>,
signed: Option<&'a BitArrayOption<T>>,
endian: Option<&'a BitArrayOption<T>>,
unit: Option<&'a BitArrayOption<T>>,
size: Option<&'a BitArrayOption<T>>,
}
impl<T> SegmentOptionCategories<'_, T> {
fn new() -> Self {
SegmentOptionCategories {
type_: None,
signed: None,
endian: None,
unit: None,
size: None,
}
}
fn segment_type(&self) -> Arc<Type> {
use BitArrayOption::*;
let default = Int {
location: SrcSpan::default(),
};
match self.type_.unwrap_or(&default) {
Int { .. } => crate::type_::int(),
Float { .. } => crate::type_::float(),
Utf8 { .. } | Utf16 { .. } | Utf32 { .. } => crate::type_::string(),
Bytes { .. } | Bits { .. } => crate::type_::bit_array(),
Utf8Codepoint { .. } | Utf16Codepoint { .. } | Utf32Codepoint { .. } => {
crate::type_::utf_codepoint()
}
Signed { .. }
| Unsigned { .. }
| Big { .. }
| Little { .. }
| Native { .. }
| Size { .. }
| Unit { .. } => panic!("Tried to type a non type kind BitArray option."),
}
}
}
#[derive(Debug, PartialEq, Eq)]
/// Whether we're typing options for a bit array segment that's part of a pattern
/// or an expression.
///
enum TypeOptionsMode {
Expression,
Pattern,
}
fn type_options<TypedValue>(
input_options: &[BitArrayOption<TypedValue>],
mode: TypeOptionsMode,
must_have_size: bool,
target: Target,
) -> Result<Arc<Type>, Error>
where
TypedValue: GetLiteralValue,
{
use BitArrayOption::*;
let mut categories = SegmentOptionCategories::new();
// Basic category checking
for option in input_options {
match option {
Utf8Codepoint { .. } | Utf16Codepoint { .. } | Utf32Codepoint { .. }
if mode == TypeOptionsMode::Pattern && target == Target::JavaScript =>
{
return err(
ErrorType::OptionNotSupportedForTarget {
target,
option: UnsupportedOption::UtfCodepointPattern,
},
option.location(),
);
}
Bytes { .. }
| Int { .. }
| Float { .. }
| Bits { .. }
| Utf8 { .. }
| Utf16 { .. }
| Utf32 { .. }
| Utf8Codepoint { .. }
| Utf16Codepoint { .. }
| Utf32Codepoint { .. } => {
if let Some(previous) = categories.type_ {
return err(
ErrorType::ConflictingTypeOptions {
existing_type: previous.label(),
},
option.location(),
);
} else {
categories.type_ = Some(option);
}
}
Signed { .. } | Unsigned { .. } => {
if let Some(previous) = categories.signed {
return err(
ErrorType::ConflictingSignednessOptions {
existing_signed: previous.label(),
},
option.location(),
);
} else {
categories.signed = Some(option);
}
}
Native { .. } if target == Target::JavaScript => {
return err(
ErrorType::OptionNotSupportedForTarget {
target,
option: UnsupportedOption::NativeEndianness,
},
option.location(),
);
}
Big { .. } | Little { .. } | Native { .. } => {
if let Some(previous) = categories.endian {
return err(
ErrorType::ConflictingEndiannessOptions {
existing_endianness: previous.label(),
},
option.location(),
);
} else {
categories.endian = Some(option);
}
}
Size { .. } => {
if categories.size.is_some() {
return err(ErrorType::ConflictingSizeOptions, option.location());
} else {
categories.size = Some(option);
}
}
Unit { .. } => {
if categories.unit.is_some() {
return err(ErrorType::ConflictingUnitOptions, option.location());
} else {
categories.unit = Some(option);
}
}
};
}
// Some options are not allowed in value mode
if mode == TypeOptionsMode::Expression {
match categories {
SegmentOptionCategories {
signed: Some(opt), ..
}
| SegmentOptionCategories {
type_: Some(opt @ Bytes { .. }),
..
} => return err(ErrorType::OptionNotAllowedInValue, opt.location()),
_ => (),
}
}
// All but the last segment in a pattern must have an exact size
if must_have_size
&& let SegmentOptionCategories {
type_: Some(opt @ (Bytes { .. } | Bits { .. })),
size: None,
..
} = categories
{
return err(ErrorType::SegmentMustHaveSize, opt.location());
}
// Endianness is only valid for int, utf16, utf16_codepoint, utf32,
// utf32_codepoint and float
match categories {
SegmentOptionCategories {
type_:
None
| Some(
Int { .. }
| Utf16 { .. }
| Utf32 { .. }
| Utf16Codepoint { .. }
| Utf32Codepoint { .. }
| Float { .. },
),
..
} => {}
SegmentOptionCategories {
endian: Some(endian),
..
} => return err(ErrorType::InvalidEndianness, endian.location()),
_ => {}
}
// signed and unsigned can only be used with int types
match categories {
SegmentOptionCategories {
type_: None | Some(Int { .. }),
..
} => {}
SegmentOptionCategories {
type_: Some(opt),
signed: Some(sign),
..
} => {
return err(
ErrorType::SignednessUsedOnNonInt { type_: opt.label() },
sign.location(),
);
}
_ => {}
}
// utf8, utf16, utf32 exclude unit and size
match categories {
SegmentOptionCategories {
type_: Some(type_),
unit: Some(_),
..
} if is_unicode(type_) => {
return err(
ErrorType::TypeDoesNotAllowUnit {
type_: type_.label(),
},
type_.location(),
);
}
SegmentOptionCategories {
type_: Some(type_),
size: Some(_),
..
} if is_unicode(type_) => {
return err(
ErrorType::TypeDoesNotAllowSize {
type_: type_.label(),
},
type_.location(),
);
}
_ => {}
}
// if unit specified, size must be specified
if let SegmentOptionCategories {
unit: Some(unit),
size: None,
..
} = categories
{
return err(ErrorType::UnitMustHaveSize, unit.location());
}
// float only 16/32/64
if let SegmentOptionCategories {
type_: Some(Float { .. }),
size: Some(size),
..
} = categories
&& let Some(abox) = size.value()
{
match abox.as_int_literal() {
None => (),
Some(value) if value == 16.into() || value == 32.into() || value == 64.into() => (),
_ => return err(ErrorType::FloatWithSize, size.location()),
}
}
// Segment patterns with a zero or negative constant size must be rejected,
// we know they will never match!
// A negative size is still allowed in expressions as it will just result
// in an empty segment.
if let (Some(size @ Size { value, .. }), TypeOptionsMode::Pattern) = (categories.size, mode) {
match value.as_int_literal() {
Some(n) if n <= BigInt::ZERO => {
return err(ErrorType::ConstantSizeNotPositive, size.location());
}
Some(_) | None => (),
}
}
Ok(categories.segment_type())
}
pub trait GetLiteralValue {
fn as_int_literal(&self) -> Option<BigInt>;
}
impl GetLiteralValue for ast::TypedPattern {
fn as_int_literal(&self) -> Option<BigInt> {
match self {
ast::Pattern::Int { int_value, .. }
| ast::Pattern::BitArraySize(ast::BitArraySize::Int { int_value, .. }) => {
Some(int_value.clone())
}
ast::Pattern::Float { .. }
| ast::Pattern::String { .. }
| ast::Pattern::Variable { .. }
| ast::Pattern::BitArraySize(_)
| ast::Pattern::Assign { .. }
| ast::Pattern::Discard { .. }
| ast::Pattern::List { .. }
| ast::Pattern::Constructor { .. }
| ast::Pattern::Tuple { .. }
| ast::Pattern::BitArray { .. }
| ast::Pattern::StringPrefix { .. }
| ast::Pattern::Invalid { .. } => None,
}
}
}
fn is_unicode<T>(opt: &BitArrayOption<T>) -> bool {
use BitArrayOption::*;
matches!(
opt,
Utf8 { .. }
| Utf16 { .. }
| Utf32 { .. }
| Utf8Codepoint { .. }
| Utf16Codepoint { .. }
| Utf32Codepoint { .. }
)
}
fn err<A>(error: ErrorType, location: SrcSpan) -> Result<A, Error> {
Err(Error { location, error })
}
#[derive(Debug)]
pub struct Error {
pub location: SrcSpan,
pub error: ErrorType,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum ErrorType {
ConflictingEndiannessOptions {
existing_endianness: EcoString,
},
ConflictingSignednessOptions {
existing_signed: EcoString,
},
ConflictingSizeOptions,
ConflictingTypeOptions {
existing_type: EcoString,
},
ConflictingUnitOptions,
FloatWithSize,
InvalidEndianness,
OptionNotAllowedInValue,
SegmentMustHaveSize,
SignednessUsedOnNonInt {
type_: EcoString,
},
TypeDoesNotAllowSize {
type_: EcoString,
},
TypeDoesNotAllowUnit {
type_: EcoString,
},
UnitMustHaveSize,
VariableUtfSegmentInPattern,
ConstantSizeNotPositive,
OptionNotSupportedForTarget {
target: Target,
option: UnsupportedOption,
},
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum UnsupportedOption {
UtfCodepointPattern,
NativeEndianness,
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/codegen.rs | compiler-core/src/codegen.rs | use crate::{
Result,
build::{
ErlangAppCodegenConfiguration, Module, module_erlang_name, package_compiler::StdlibPackage,
},
config::PackageConfig,
erlang,
io::FileSystemWriter,
javascript::{self, ModuleConfig},
line_numbers::LineNumbers,
};
use ecow::EcoString;
use erlang::escape_atom_string;
use itertools::Itertools;
use std::fmt::Debug;
use camino::Utf8Path;
/// A code generator that creates a .erl Erlang module and record header files
/// for each Gleam module in the package.
#[derive(Debug)]
pub struct Erlang<'a> {
build_directory: &'a Utf8Path,
include_directory: &'a Utf8Path,
}
impl<'a> Erlang<'a> {
pub fn new(build_directory: &'a Utf8Path, include_directory: &'a Utf8Path) -> Self {
Self {
build_directory,
include_directory,
}
}
pub fn render<Writer: FileSystemWriter>(
&self,
writer: Writer,
modules: &[Module],
root: &Utf8Path,
) -> Result<()> {
for module in modules {
let erl_name = module.erlang_name();
self.erlang_module(&writer, module, &erl_name, root)?;
self.erlang_record_headers(&writer, module, &erl_name)?;
}
Ok(())
}
fn erlang_module<Writer: FileSystemWriter>(
&self,
writer: &Writer,
module: &Module,
erl_name: &str,
root: &Utf8Path,
) -> Result<()> {
let name = format!("{erl_name}.erl");
let path = self.build_directory.join(&name);
let line_numbers = LineNumbers::new(&module.code);
let output = erlang::module(&module.ast, &line_numbers, root);
tracing::debug!(name = ?name, "Generated Erlang module");
writer.write(&path, &output?)
}
fn erlang_record_headers<Writer: FileSystemWriter>(
&self,
writer: &Writer,
module: &Module,
erl_name: &str,
) -> Result<()> {
for (name, text) in erlang::records(&module.ast) {
let name = format!("{erl_name}_{name}.hrl");
tracing::debug!(name = ?name, "Generated Erlang header");
writer.write(&self.include_directory.join(name), &text)?;
}
Ok(())
}
}
/// A code generator that creates a .app Erlang application file for the package
#[derive(Debug)]
pub struct ErlangApp<'a> {
output_directory: &'a Utf8Path,
config: &'a ErlangAppCodegenConfiguration,
}
impl<'a> ErlangApp<'a> {
pub fn new(output_directory: &'a Utf8Path, config: &'a ErlangAppCodegenConfiguration) -> Self {
Self {
output_directory,
config,
}
}
pub fn render<Writer: FileSystemWriter>(
&self,
writer: Writer,
config: &PackageConfig,
modules: &[Module],
native_modules: Vec<EcoString>,
) -> Result<()> {
fn tuple(key: &str, value: &str) -> String {
format!(" {{{key}, {value}}},\n")
}
let path = self.output_directory.join(format!("{}.app", &config.name));
let start_module = match config.erlang.application_start_module.as_ref() {
None => "".into(),
Some(module) => {
let module = module_erlang_name(module);
let argument = match config.erlang.application_start_argument.as_ref() {
Some(argument) => argument.as_str(),
None => "[]",
};
tuple("mod", &format!("{{'{module}', {argument}}}"))
}
};
let modules = modules
.iter()
.map(|m| m.erlang_name())
.chain(native_modules)
.unique()
.sorted()
.map(escape_atom_string)
.join(",\n ");
// TODO: When precompiling for production (i.e. as a precompiled hex
// package) we will need to exclude the dev deps.
let applications = config
.dependencies
.keys()
.chain(
config
.dev_dependencies
.keys()
.take_while(|_| self.config.include_dev_deps),
)
// TODO: test this!
.map(|name| self.config.package_name_overrides.get(name).unwrap_or(name))
.chain(config.erlang.extra_applications.iter())
.sorted()
.join(",\n ");
let text = format!(
r#"{{application, {package}, [
{start_module} {{vsn, "{version}"}},
{{applications, [{applications}]}},
{{description, "{description}"}},
{{modules, [{modules}]}},
{{registered, []}}
]}}.
"#,
applications = applications,
description = config.description,
modules = modules,
package = config.name,
start_module = start_module,
version = config.version,
);
writer.write(&path, &text)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TypeScriptDeclarations {
None,
Emit,
}
#[derive(Debug)]
pub struct JavaScript<'a> {
output_directory: &'a Utf8Path,
prelude_location: &'a Utf8Path,
project_root: &'a Utf8Path,
typescript: TypeScriptDeclarations,
}
impl<'a> JavaScript<'a> {
pub fn new(
output_directory: &'a Utf8Path,
typescript: TypeScriptDeclarations,
prelude_location: &'a Utf8Path,
project_root: &'a Utf8Path,
) -> Self {
Self {
prelude_location,
output_directory,
project_root,
typescript,
}
}
pub fn render(
&self,
writer: &impl FileSystemWriter,
modules: &[Module],
stdlib_package: StdlibPackage,
) -> Result<()> {
for module in modules {
let js_name = module.name.clone();
if self.typescript == TypeScriptDeclarations::Emit {
self.ts_declaration(writer, module, &js_name)?;
}
self.js_module(writer, module, &js_name, stdlib_package)?
}
self.write_prelude(writer)?;
Ok(())
}
fn write_prelude(&self, writer: &impl FileSystemWriter) -> Result<()> {
let rexport = format!("export * from \"{}\";\n", self.prelude_location);
let prelude_path = &self.output_directory.join("gleam.mjs");
// This check skips unnecessary `gleam.mjs` writes which confuse
// watchers and HMR build tools
if !writer.exists(prelude_path) {
writer.write(prelude_path, &rexport)?;
}
if self.typescript == TypeScriptDeclarations::Emit {
let rexport = format!(
"export * from \"{}\";\nexport type * from \"{}\";\n",
self.prelude_location,
self.prelude_location.as_str().replace(".mjs", ".d.mts")
);
let prelude_declaration_path = &self.output_directory.join("gleam.d.mts");
// Type declaration may trigger badly configured watchers
if !writer.exists(prelude_declaration_path) {
writer.write(prelude_declaration_path, &rexport)?;
}
}
Ok(())
}
fn ts_declaration(
&self,
writer: &impl FileSystemWriter,
module: &Module,
js_name: &str,
) -> Result<()> {
let name = format!("{js_name}.d.mts");
let path = self.output_directory.join(name);
let output = javascript::ts_declaration(&module.ast);
tracing::debug!(name = ?js_name, "Generated TS declaration");
writer.write(&path, &output)
}
fn js_module(
&self,
writer: &impl FileSystemWriter,
module: &Module,
js_name: &str,
stdlib_package: StdlibPackage,
) -> Result<()> {
let name = format!("{js_name}.mjs");
let path = self.output_directory.join(name);
let line_numbers = LineNumbers::new(&module.code);
let output = javascript::module(ModuleConfig {
module: &module.ast,
line_numbers: &line_numbers,
path: &module.input_path,
project_root: self.project_root,
src: &module.code,
typescript: self.typescript,
stdlib_package,
});
tracing::debug!(name = ?js_name, "Generated js module");
writer.write(&path, &output)
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/strings.rs | compiler-core/src/strings.rs | use ecow::EcoString;
use itertools::Itertools;
use crate::ast::Endianness;
/// Converts any escape sequences from the given string to their correct
/// bytewise UTF-8 representation and returns the resulting string.
pub fn convert_string_escape_chars(str: &EcoString) -> EcoString {
let mut filtered_str = EcoString::new();
let mut str_iter = str.chars().peekable();
loop {
match str_iter.next() {
Some('\\') => match str_iter.next() {
// Check for Unicode escape sequence, e.g. \u{00012FF}
Some('u') => {
if str_iter.peek() != Some(&'{') {
// Invalid Unicode escape sequence
filtered_str.push('u');
continue;
}
// Consume the left brace after peeking
let _ = str_iter.next();
let codepoint_str = str_iter
.peeking_take_while(char::is_ascii_hexdigit)
.collect::<String>();
if codepoint_str.is_empty() || str_iter.peek() != Some(&'}') {
// Invalid Unicode escape sequence
filtered_str.push_str("u{");
filtered_str.push_str(&codepoint_str);
continue;
}
let codepoint = u32::from_str_radix(&codepoint_str, 16)
.ok()
.and_then(char::from_u32);
if let Some(codepoint) = codepoint {
// Consume the right brace after peeking
let _ = str_iter.next();
// Consider this codepoint's length instead of
// that of the Unicode escape sequence itself
filtered_str.push(codepoint);
} else {
// Invalid Unicode escape sequence
// (codepoint value not in base 16 or too large)
filtered_str.push_str("u{");
filtered_str.push_str(&codepoint_str);
}
}
Some('n') => filtered_str.push('\n'),
Some('r') => filtered_str.push('\r'),
Some('f') => filtered_str.push('\u{C}'),
Some('t') => filtered_str.push('\t'),
Some('"') => filtered_str.push('\"'),
Some('\\') => filtered_str.push('\\'),
Some(c) => filtered_str.push(c),
None => break,
},
Some(c) => filtered_str.push(c),
None => break,
}
}
filtered_str
}
pub fn to_snake_case(string: &str) -> EcoString {
let mut snake_case = EcoString::with_capacity(string.len());
let mut is_word_boundary = true;
for char in string.chars() {
match char {
'_' | ' ' => {
is_word_boundary = true;
continue;
}
_ if char.is_uppercase() => {
is_word_boundary = true;
}
_ => {}
}
if is_word_boundary {
// We don't want to push an underscore at the start of the string,
// even if it starts with a capital letter or other delimiter.
if !snake_case.is_empty() {
snake_case.push('_');
}
is_word_boundary = false;
}
snake_case.push(char.to_ascii_lowercase());
}
snake_case
}
pub fn to_upper_camel_case(string: &str) -> EcoString {
let mut pascal_case = EcoString::with_capacity(string.len());
let mut chars = string.chars();
while let Some(char) = chars.next() {
if char == '_' {
let Some(next) = chars.next() else { break };
pascal_case.push(next.to_ascii_uppercase());
} else {
pascal_case.push(char);
}
}
pascal_case
}
/// Converts a string into its UTF-16 representation in bytes
pub fn string_to_utf16_bytes(string: &str, endianness: Endianness) -> Vec<u8> {
let mut bytes = Vec::with_capacity(string.len() * 2);
let mut character_buffer = [0, 0];
for character in string.chars() {
let segments = character.encode_utf16(&mut character_buffer);
for segment in segments {
let segment_bytes = match endianness {
Endianness::Big => segment.to_be_bytes(),
Endianness::Little => segment.to_le_bytes(),
};
bytes.push(segment_bytes[0]);
bytes.push(segment_bytes[1]);
}
}
bytes
}
/// Converts a string into its UTF-32 representation in bytes
pub fn string_to_utf32_bytes(string: &str, endianness: Endianness) -> Vec<u8> {
let mut bytes = Vec::with_capacity(string.len() * 4);
for character in string.chars() {
let character_bytes = match endianness {
Endianness::Big => (character as u32).to_be_bytes(),
Endianness::Little => (character as u32).to_le_bytes(),
};
bytes.extend(character_bytes);
}
bytes
}
/// Gets the number of UTF-16 codepoints it would take to encode a given string.
pub fn length_utf16(string: &str) -> usize {
let mut length = 0;
for char in string.chars() {
length += char.len_utf16()
}
length
}
/// Gets the number of UTF-32 codepoints in a string
pub fn length_utf32(string: &str) -> usize {
string.chars().count()
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/requirement.rs | compiler-core/src/requirement.rs | use std::fmt;
use std::str::FromStr;
use crate::Error;
use crate::error::Result;
use crate::io::make_relative;
use camino::{Utf8Path, Utf8PathBuf};
use ecow::EcoString;
use hexpm::version::Range;
use serde::Deserialize;
use serde::de::{self, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, SerializeMap, Serializer};
#[derive(Deserialize, Debug, PartialEq, Eq, Clone)]
#[serde(untagged, remote = "Self", deny_unknown_fields)]
pub enum Requirement {
Hex {
#[serde(deserialize_with = "deserialise_range")]
version: Range,
},
Path {
path: Utf8PathBuf,
},
Git {
git: EcoString,
#[serde(rename = "ref")]
ref_: EcoString,
},
}
impl Requirement {
pub fn hex(range: &str) -> Result<Requirement> {
Ok(Requirement::Hex {
version: Range::new(range.to_string()).map_err(|e| Error::InvalidVersionFormat {
input: range.to_string(),
error: e.to_string(),
})?,
})
}
pub fn path(path: &str) -> Requirement {
Requirement::Path { path: path.into() }
}
pub fn git(url: &str, ref_: &str) -> Requirement {
Requirement::Git {
git: url.into(),
ref_: ref_.into(),
}
}
pub fn to_toml(&self, root_path: &Utf8Path) -> String {
match self {
Requirement::Hex { version: range } => {
format!(r#"{{ version = "{range}" }}"#)
}
Requirement::Path { path } => {
format!(
r#"{{ path = "{}" }}"#,
make_relative(root_path, path).as_str().replace('\\', "/")
)
}
Requirement::Git { git: url, ref_ } => {
format!(r#"{{ git = "{url}", ref = "{ref_}" }}"#)
}
}
}
}
// Serialization
impl Serialize for Requirement {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut map = serializer.serialize_map(Some(1))?;
match self {
Requirement::Hex { version: range } => map.serialize_entry("version", range)?,
Requirement::Path { path } => map.serialize_entry("path", path)?,
Requirement::Git { git: url, ref_ } => {
map.serialize_entry("git", url)?;
map.serialize_entry("ref", ref_)?;
}
}
map.end()
}
}
// Deserialization
fn deserialise_range<'de, D>(deserializer: D) -> Result<Range, D::Error>
where
D: Deserializer<'de>,
{
let version = String::deserialize(deserializer)?;
Range::new(version).map_err(de::Error::custom)
}
#[derive(Debug, Copy, Clone)]
pub struct Void;
impl FromStr for Requirement {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Requirement::hex(s)
}
}
struct RequirementVisitor;
impl<'de> Visitor<'de> for RequirementVisitor {
type Value = Requirement;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("string or map")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
match value.parse::<Requirement>() {
Ok(value) => Ok(value),
Err(error) => Err(de::Error::custom(error)),
}
}
fn visit_map<M>(self, visitor: M) -> Result<Self::Value, M::Error>
where
M: MapAccess<'de>,
{
Requirement::deserialize(de::value::MapAccessDeserializer::new(visitor))
}
}
impl<'de> Deserialize<'de> for Requirement {
fn deserialize<D>(deserializer: D) -> Result<Requirement, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(RequirementVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn read_requirement() {
let toml = r#"
short = "~> 0.5"
hex = { version = "~> 1.0.0" }
local = { path = "/path/to/package" }
github = { git = "https://github.com/gleam-lang/otp.git", ref = "4d34935" }
"#;
let deps: HashMap<String, Requirement> = toml::from_str(toml).unwrap();
assert_eq!(deps["short"], Requirement::hex("~> 0.5").unwrap());
assert_eq!(deps["hex"], Requirement::hex("~> 1.0.0").unwrap());
assert_eq!(deps["local"], Requirement::path("/path/to/package"));
assert_eq!(
deps["github"],
Requirement::git("https://github.com/gleam-lang/otp.git", "4d34935")
);
}
#[test]
fn read_wrong_version() {
let toml = r#"
short = ">= 2.0 and < 3.0.0"
"#;
let error =
toml::from_str::<HashMap<String, Requirement>>(toml).expect_err("invalid version");
insta::assert_snapshot!(error.to_string());
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/docs.rs | compiler-core/src/docs.rs | mod printer;
mod source_links;
#[cfg(test)]
mod tests;
use std::{collections::HashMap, time::SystemTime};
use camino::Utf8PathBuf;
use hexpm::version::Version;
use printer::Printer;
use crate::{
build::{Module, Package},
config::{DocsPage, PackageConfig},
docs::source_links::SourceLinker,
io::{Content, FileSystemReader, OutputFile},
package_interface::PackageInterface,
paths::ProjectPaths,
type_::{self},
version::COMPILER_VERSION,
};
use askama::Template;
use ecow::EcoString;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use serde_json::to_string as serde_to_string;
#[derive(PartialEq, Eq, Copy, Clone, Debug)]
pub enum DocContext {
HexPublish,
Build,
}
#[derive(PartialEq, Debug, Serialize, Deserialize)]
pub struct PackageInformation {
#[serde(rename = "gleam.toml")]
package_config: PackageConfig,
}
/// Like `ManifestPackage`, but lighter and cheaper to clone as it is all that
/// we need for printing documentation.
#[derive(Debug, Clone)]
pub struct Dependency {
pub version: Version,
pub kind: DependencyKind,
}
#[derive(Debug, Clone, Copy)]
pub enum DependencyKind {
Hex,
Path,
Git,
}
#[derive(Debug)]
pub struct DocumentationConfig<'a> {
pub package_config: &'a PackageConfig,
pub dependencies: HashMap<EcoString, Dependency>,
pub analysed: &'a [Module],
pub docs_pages: &'a [DocsPage],
pub rendering_timestamp: SystemTime,
pub context: DocContext,
}
pub fn generate_html<IO: FileSystemReader>(
paths: &ProjectPaths,
config: DocumentationConfig<'_>,
fs: IO,
) -> Vec<OutputFile> {
let DocumentationConfig {
package_config: config,
dependencies,
analysed,
docs_pages,
rendering_timestamp,
context: is_hex_publish,
} = config;
let modules = analysed
.iter()
.filter(|module| module.origin.is_src())
.filter(|module| !config.is_internal_module(&module.name));
let rendering_timestamp = rendering_timestamp
.duration_since(SystemTime::UNIX_EPOCH)
.expect("get current timestamp")
.as_secs()
.to_string();
// Define user-supplied (or README) pages
let pages: Vec<_> = docs_pages
.iter()
.map(|page| Link {
name: page.title.to_string(),
path: page.path.to_string(),
})
.collect();
let doc_links = config.links.iter().map(|doc_link| Link {
name: doc_link.title.to_string(),
path: doc_link.href.to_string(),
});
let repo_link = config
.repository
.as_ref()
.map(|r| r.url())
.map(|path| Link {
name: "Repository".into(),
path,
});
let host = if is_hex_publish == DocContext::HexPublish {
"https://hexdocs.pm"
} else {
""
};
// https://github.com/gleam-lang/gleam/issues/3020
let links: Vec<_> = match is_hex_publish {
DocContext::HexPublish => doc_links
.chain(repo_link)
.chain([Link {
name: "Hex".into(),
path: format!("https://hex.pm/packages/{0}", config.name).to_string(),
}])
.collect(),
DocContext::Build => doc_links.chain(repo_link).collect(),
};
let mut files = vec![];
let mut search_items = vec![];
let modules_links: Vec<_> = modules
.clone()
.map(|m| {
let path = [&m.name, ".html"].concat();
Link {
path,
name: m.name.split('/').join("<wbr />/"),
}
})
.sorted()
.collect();
// Generate user-supplied (or README) pages
for page in docs_pages {
let content = fs.read(&page.source).unwrap_or_default();
let rendered_content = render_markdown(&content, MarkdownSource::Standalone);
let unnest = page_unnest(&page.path);
let page_path_without_ext = page.path.split('.').next().unwrap_or("");
let page_title = match page_path_without_ext {
// The index page, such as README, should not push it's page title
"index" => format!("{} · v{}", config.name, config.version),
// Other page title's should say so
_other => format!("{} · {} · v{}", page.title, config.name, config.version),
};
let page_meta_description = match page_path_without_ext {
"index" => config.description.to_string().clone(),
_other => "".to_owned(),
};
let path = Utf8PathBuf::from(&page.path);
let temp = PageTemplate {
gleam_version: COMPILER_VERSION,
links: &links,
pages: &pages,
modules: &modules_links,
project_name: &config.name,
page_title: &page_title,
page_meta_description: &page_meta_description,
file_path: &path.clone(),
project_version: &config.version.to_string(),
content: rendered_content,
rendering_timestamp: &rendering_timestamp,
host,
unnest: &unnest,
};
files.push(OutputFile {
path,
content: Content::Text(temp.render().expect("Page template rendering")),
});
search_items.push(search_item_for_page(&config.name, &page.path, content))
}
// Generate module documentation pages
for module in modules {
let name = module.name.clone();
let unnest = page_unnest(&module.name);
// Read module src & create line number lookup structure
let source_links = SourceLinker::new(paths, config, module);
let documentation_content = module.ast.documentation.iter().join("\n");
let rendered_documentation =
render_markdown(&documentation_content, MarkdownSource::Comment);
let mut printer = Printer::new(
module.ast.type_info.package.clone(),
module.name.clone(),
&module.ast.names,
&dependencies,
);
let types = printer.type_definitions(&source_links, &module.ast.definitions);
let values = printer.value_definitions(&source_links, &module.ast.definitions);
types
.iter()
.for_each(|type_| search_items.push(search_item_for_type(&module.name, type_)));
values
.iter()
.for_each(|value| search_items.push(search_item_for_value(&module.name, value)));
search_items.push(search_item_for_module(module));
let page_title = format!("{} · {} · v{}", name, config.name, config.version);
let page_meta_description = "";
let path = Utf8PathBuf::from(format!("{}.html", module.name));
let template = ModuleTemplate {
gleam_version: COMPILER_VERSION,
host,
unnest,
links: &links,
pages: &pages,
documentation: rendered_documentation,
modules: &modules_links,
project_name: &config.name,
page_title: &page_title,
page_meta_description,
module_name: EcoString::from(&name),
file_path: &path.clone(),
project_version: &config.version.to_string(),
types,
values,
rendering_timestamp: &rendering_timestamp,
};
files.push(OutputFile {
path,
content: Content::Text(
template
.render()
.expect("Module documentation template rendering"),
),
});
}
// Render static assets
files.push(OutputFile {
path: Utf8PathBuf::from("css/atom-one-light.min.css"),
content: Content::Text(
std::include_str!("../templates/docs-css/atom-one-light.min.css").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("css/atom-one-dark.min.css"),
content: Content::Text(
std::include_str!("../templates/docs-css/atom-one-dark.min.css").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("css/index.css"),
content: Content::Text(std::include_str!("../templates/docs-css/index.css").to_string()),
});
// highlightjs:
files.push(OutputFile {
path: Utf8PathBuf::from("js/highlight.min.js"),
content: Content::Text(
std::include_str!("../templates/docs-js/highlight.min.js").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("js/highlightjs-gleam.js"),
content: Content::Text(
std::include_str!("../templates/docs-js/highlightjs-gleam.js").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("js/highlightjs-erlang.min.js"),
content: Content::Text(
std::include_str!("../templates/docs-js/highlightjs-erlang.min.js").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("js/highlightjs-elixir.min.js"),
content: Content::Text(
std::include_str!("../templates/docs-js/highlightjs-elixir.min.js").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("js/highlightjs-javascript.min.js"),
content: Content::Text(
std::include_str!("../templates/docs-js/highlightjs-javascript.min.js").to_string(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("js/highlightjs-typescript.min.js"),
content: Content::Text(
std::include_str!("../templates/docs-js/highlightjs-typescript.min.js").to_string(),
),
});
// lunr.min.js, search-data.json and index.js
files.push(OutputFile {
path: Utf8PathBuf::from("js/lunr.min.js"),
content: Content::Text(std::include_str!("../templates/docs-js/lunr.min.js").to_string()),
});
let search_data_json = serde_to_string(&SearchData {
items: search_items,
programming_language: SearchProgrammingLanguage::Gleam,
})
.expect("search index serialization");
files.push(OutputFile {
path: Utf8PathBuf::from("search-data.json"),
content: Content::Text(search_data_json.to_string()),
});
files.push(OutputFile {
path: Utf8PathBuf::from("js/index.js"),
content: Content::Text(std::include_str!("../templates/docs-js/index.js").to_string()),
});
// web fonts:
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/karla-v23-regular-latin-ext.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/karla-v23-regular-latin-ext.woff2").to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/karla-v23-regular-latin.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/karla-v23-regular-latin.woff2").to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/karla-v23-bold-latin-ext.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/karla-v23-bold-latin-ext.woff2").to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/karla-v23-bold-latin.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/karla-v23-bold-latin.woff2").to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/ubuntu-mono-v15-regular-cyrillic-ext.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/ubuntu-mono-v15-regular-cyrillic-ext.woff2")
.to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/ubuntu-mono-v15-regular-cyrillic.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/ubuntu-mono-v15-regular-cyrillic.woff2")
.to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/ubuntu-mono-v15-regular-greek-ext.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/ubuntu-mono-v15-regular-greek-ext.woff2")
.to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/ubuntu-mono-v15-regular-greek.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/ubuntu-mono-v15-regular-greek.woff2").to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/ubuntu-mono-v15-regular-latin-ext.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/ubuntu-mono-v15-regular-latin-ext.woff2")
.to_vec(),
),
});
files.push(OutputFile {
path: Utf8PathBuf::from("fonts/ubuntu-mono-v15-regular-latin.woff2"),
content: Content::Binary(
include_bytes!("../templates/docs-fonts/ubuntu-mono-v15-regular-latin.woff2").to_vec(),
),
});
files
}
fn search_item_for_page(package: &str, path: &str, content: String) -> SearchItem {
SearchItem {
type_: SearchItemType::Page,
parent_title: package.to_string(),
title: package.to_string(),
content,
reference: path.to_string(),
}
}
fn search_item_for_type(module: &str, type_: &TypeDefinition<'_>) -> SearchItem {
let constructors = type_
.constructors
.iter()
.map(|constructor| {
let arguments = constructor
.arguments
.iter()
.map(|argument| format!("{}\n{}", argument.name, argument.text_documentation))
.join("\n");
format!(
"{}\n{}\n{}",
constructor.raw_definition, constructor.text_documentation, arguments
)
})
.join("\n");
SearchItem {
type_: SearchItemType::Type,
parent_title: module.to_string(),
title: type_.name.to_string(),
content: format!(
"{}\n{}\n{}\n{}",
type_.raw_definition,
type_.text_documentation,
constructors,
import_synonyms(module, type_.name)
),
reference: format!("{}.html#{}", module, type_.name),
}
}
fn search_item_for_value(module: &str, value: &DocsValues<'_>) -> SearchItem {
SearchItem {
type_: SearchItemType::Value,
parent_title: module.to_string(),
title: value.name.to_string(),
content: format!(
"{}\n{}\n{}",
value.raw_definition,
value.text_documentation,
import_synonyms(module, value.name)
),
reference: format!("{}.html#{}", module, value.name),
}
}
fn search_item_for_module(module: &Module) -> SearchItem {
SearchItem {
type_: SearchItemType::Module,
parent_title: module.name.to_string(),
title: module.name.to_string(),
content: module.ast.documentation.iter().join("\n"),
reference: format!("{}.html", module.name),
}
}
pub fn generate_json_package_interface(
path: Utf8PathBuf,
package: &Package,
cached_modules: &im::HashMap<EcoString, type_::ModuleInterface>,
) -> OutputFile {
OutputFile {
path,
content: Content::Text(
serde_json::to_string(&PackageInterface::from_package(package, cached_modules))
.expect("JSON module interface serialisation"),
),
}
}
pub fn generate_json_package_information(path: Utf8PathBuf, config: PackageConfig) -> OutputFile {
OutputFile {
path,
content: Content::Text(package_information_as_json(config)),
}
}
fn package_information_as_json(config: PackageConfig) -> String {
let info = PackageInformation {
package_config: config,
};
serde_json::to_string_pretty(&info).expect("JSON module information serialisation")
}
fn page_unnest(path: &str) -> String {
let unnest = path
.strip_prefix('/')
.unwrap_or(path)
.split('/')
.skip(1)
.map(|_| "..")
.join("/");
if unnest.is_empty() {
".".into()
} else {
unnest
}
}
#[test]
fn page_unnest_test() {
// Pages
assert_eq!(page_unnest("wibble.html"), ".");
assert_eq!(page_unnest("/wibble.html"), ".");
assert_eq!(page_unnest("/wibble/woo.html"), "..");
assert_eq!(page_unnest("/wibble/wobble/woo.html"), "../..");
// Modules
assert_eq!(page_unnest("string"), ".");
assert_eq!(page_unnest("gleam/string"), "..");
assert_eq!(page_unnest("gleam/string/inspect"), "../..");
}
fn import_synonyms(parent: &str, child: &str) -> String {
format!("Synonyms:\n{parent}.{child}\n{parent} {child}")
}
fn text_documentation(doc: &Option<(u32, EcoString)>) -> String {
let raw_text = doc
.as_ref()
.map(|(_, it)| it.to_string())
.unwrap_or_else(|| "".into());
// TODO: parse markdown properly and extract the text nodes
raw_text.replace("```gleam", "").replace("```", "")
}
fn markdown_documentation(doc: &Option<(u32, EcoString)>) -> String {
doc.as_ref()
.map(|(_, doc)| render_markdown(doc, MarkdownSource::Comment))
.unwrap_or_default()
}
/// An enum to represent the source of a Markdown string to render.
enum MarkdownSource {
/// A Markdown string that comes from the documentation of a
/// definition/module. This means that each line is going to be preceded by
/// a whitespace.
Comment,
/// A Markdown string coming from a standalone file like a README.md.
Standalone,
}
fn render_markdown(text: &str, source: MarkdownSource) -> String {
let text = match source {
MarkdownSource::Standalone => text.into(),
// Doc comments start with "///\s", which can confuse the markdown parser
// and prevent tables from rendering correctly, so remove that first space.
MarkdownSource::Comment => text
.split('\n')
.map(|s| s.strip_prefix(' ').unwrap_or(s))
.join("\n"),
};
let mut s = String::with_capacity(text.len() * 3 / 2);
let p = pulldown_cmark::Parser::new_ext(&text, pulldown_cmark::Options::all());
pulldown_cmark::html::push_html(&mut s, p);
s
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Clone)]
struct Link {
name: String,
path: String,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
struct TypeConstructor {
definition: String,
raw_definition: String,
documentation: String,
text_documentation: String,
arguments: Vec<TypeConstructorArg>,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
struct TypeConstructorArg {
name: String,
doc: String,
text_documentation: String,
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug)]
struct TypeDefinition<'a> {
name: &'a str,
definition: String,
raw_definition: String,
documentation: String,
constructors: Vec<TypeConstructor>,
text_documentation: String,
source_url: String,
deprecation_message: String,
opaque: bool,
}
#[derive(PartialEq, Eq, PartialOrd, Ord)]
struct DocsValues<'a> {
name: &'a str,
definition: String,
raw_definition: String,
documentation: String,
text_documentation: String,
source_url: String,
deprecation_message: String,
}
#[derive(Template)]
#[template(path = "documentation_page.html")]
struct PageTemplate<'a> {
gleam_version: &'a str,
unnest: &'a str,
host: &'a str,
page_title: &'a str,
page_meta_description: &'a str,
file_path: &'a Utf8PathBuf,
project_name: &'a str,
project_version: &'a str,
pages: &'a [Link],
links: &'a [Link],
modules: &'a [Link],
content: String,
rendering_timestamp: &'a str,
}
#[derive(Template)]
#[template(path = "documentation_module.html")]
struct ModuleTemplate<'a> {
gleam_version: &'a str,
unnest: String,
host: &'a str,
page_title: &'a str,
page_meta_description: &'a str,
file_path: &'a Utf8PathBuf,
module_name: EcoString,
project_name: &'a str,
project_version: &'a str,
pages: &'a [Link],
links: &'a [Link],
modules: &'a [Link],
types: Vec<TypeDefinition<'a>>,
values: Vec<DocsValues<'a>>,
documentation: String,
rendering_timestamp: &'a str,
}
/// Search data for use by Hexdocs search, as well as the search built-in to
/// generated documentation
#[derive(Serialize, PartialEq, Eq, PartialOrd, Ord, Clone)]
struct SearchData {
items: Vec<SearchItem>,
#[serde(rename = "proglang")]
programming_language: SearchProgrammingLanguage,
}
/// A single item that can appear as a search result
#[derive(Serialize, PartialEq, Eq, PartialOrd, Ord, Clone)]
struct SearchItem {
/// The type of item this is: Value, Type, Module, or other Page
#[serde(rename = "type")]
type_: SearchItemType,
/// The title of the module or package containing this search item
#[serde(rename = "parentTitle")]
parent_title: String,
/// The title of this item
title: String,
/// Markdown text which describes this item, containing documentation from
/// doc comments, as well as rendered definitions of types and values.
#[serde(rename = "doc")]
content: String,
/// The relative URL to the documentation for this search item, for example
/// `gleam/option.html#Option`
#[serde(rename = "ref")]
reference: String,
}
#[derive(Serialize, PartialEq, Eq, PartialOrd, Ord, Clone)]
#[serde(rename_all = "lowercase")]
enum SearchItemType {
Value,
Module,
Page,
Type,
}
#[derive(Serialize, PartialEq, Eq, PartialOrd, Ord, Clone)]
#[serde(rename_all = "lowercase")]
enum SearchProgrammingLanguage {
// Elixir,
// Erlang,
Gleam,
}
#[test]
fn package_config_to_json() {
let input = r#"
name = "my_project"
version = "1.0.0"
licences = ["Apache-2.0", "MIT"]
description = "Pretty complex config"
target = "erlang"
repository = { type = "github", user = "example", repo = "my_dep" }
links = [{ title = "Home page", href = "https://example.com" }]
internal_modules = ["my_app/internal"]
gleam = ">= 0.30.0"
[dependencies]
gleam_stdlib = ">= 0.18.0 and < 2.0.0"
my_other_project = { path = "../my_other_project" }
[dev-dependencies]
gleeunit = ">= 1.0.0 and < 2.0.0"
[documentation]
pages = [{ title = "My Page", path = "my-page.html", source = "./path/to/my-page.md" }]
[erlang]
application_start_module = "my_app/application"
extra_applications = ["inets", "ssl"]
[javascript]
typescript_declarations = true
runtime = "node"
[javascript.deno]
allow_all = false
allow_ffi = true
allow_env = ["DATABASE_URL"]
allow_net = ["example.com:443"]
allow_read = ["./database.sqlite"]
"#;
let config = toml::from_str::<PackageConfig>(&input).unwrap();
let info = PackageInformation {
package_config: config.clone(),
};
let json = package_information_as_json(config);
let output = format!("--- GLEAM.TOML\n{input}\n\n--- EXPORTED JSON\n\n{json}");
insta::assert_snapshot!(output);
let roundtrip: PackageInformation = serde_json::from_str(&json).unwrap();
assert_eq!(info, roundtrip);
}
#[test]
fn barebones_package_config_to_json() {
let input = r#"
name = "my_project"
version = "1.0.0"
"#;
let config = toml::from_str::<PackageConfig>(&input).unwrap();
let json = package_information_as_json(config);
let output = format!("--- GLEAM.TOML\n{input}\n\n--- EXPORTED JSON\n\n{json}");
insta::assert_snapshot!(output);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/graph.rs | compiler-core/src/graph.rs | //! General functions for working with graphs.
use petgraph::{Direction, prelude::NodeIndex, stable_graph::StableGraph};
/// Sort a graph into a sequence from the leaves to the roots.
///
/// Nodes are returned in their smallest possible groups, which is either a leaf
/// or a cycle.
///
/// This function is implemented using `pop_leaf_or_cycle`.
///
pub fn into_dependency_order<N, E>(mut graph: StableGraph<N, E>) -> Vec<Vec<NodeIndex>> {
let mut items = vec![];
// Remove all self-edges from the graph.
graph.retain_edges(|graph, edge| match graph.edge_endpoints(edge) {
Some((a, b)) => a != b,
None => false,
});
loop {
let current = pop_leaf_or_cycle(&mut graph);
if current.is_empty() {
return items;
} else {
items.push(current);
}
}
}
/// The same as `leaf_or_cycle` but removes the nodes from the graph.
/// See the docs there for more details.
///
/// # Panics
///
/// Panics if the graph contains a self-edge.
///
fn pop_leaf_or_cycle<N, E>(graph: &mut StableGraph<N, E>) -> Vec<NodeIndex> {
let nodes = leaf_or_cycle(graph);
for node in &nodes {
_ = graph.remove_node(*node);
}
nodes
}
/// Return a leaf from the graph. If there are no leaves then the largest cycle
/// is returned instead.
///
/// If there are no leaves or cycles then an empty vector is returned.
///
/// The nodes returned are not removed from the graph.
///
/// # Panics
///
/// Panics if the graph contains a self-edge.
///
fn leaf_or_cycle<N, E>(graph: &StableGraph<N, E>) -> Vec<NodeIndex> {
if graph.node_count() == 0 {
return vec![];
}
// Find a leaf, returning one if found.
for node in graph.node_indices() {
let mut outgoing = graph.neighbors_directed(node, Direction::Outgoing);
let referenced = outgoing.next();
if referenced == Some(node) {
panic!("Self edge found in graph");
}
// This is a leaf.
if referenced.is_none() {
return vec![node];
}
}
// No leaves were found, so find a cycle.
// We use a toposort to find the start of the cycle.
let start = petgraph::algo::toposort(&graph, None)
.expect_err("Non-empty graph has no leaves or cycles")
.node_id();
// Then traverse the graph to find nodes in the cycle.
// This traverses all possible paths to find a cycle, this can likely be
// optimised. There's not a large number of functions in a module however so
// this is tolerable in this specific instance.
#[derive(Debug)]
enum Step {
Backtrack,
Next(NodeIndex),
}
let mut path = vec![];
let mut stack = vec![Step::Next(start)];
let mut cycles = vec![];
while let Some(step) = stack.pop() {
let node = match step {
// We have processed all the nodes in the branch so backtrack,
// popping the node off the path.
Step::Backtrack => {
_ = path.pop();
continue;
}
Step::Next(node) => node,
};
if path.contains(&node) {
continue;
}
// Add this node to the path and record the point at which we need to
// backtrack in order to go back up the tree.
stack.push(Step::Backtrack);
path.push(node);
// Check each child & add them to the stack if they are not the target.
for node in graph.neighbors_directed(node, Direction::Outgoing) {
if node == start {
cycles.push(path.clone());
} else {
stack.push(Step::Next(node));
}
}
}
cycles
.into_iter()
.max_by_key(|x| x.len())
.expect("Could not find cycle for toposort returned start node")
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn leaf_or_cycle_empty() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
assert!(pop_leaf_or_cycle(&mut graph).is_empty());
}
#[test]
fn leaf_or_cycle_1() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
assert_eq!(into_dependency_order(graph), vec![vec![a]]);
}
#[test]
fn leaf_or_cycle_2() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
assert_eq!(into_dependency_order(graph), vec![vec![a], vec![b]]);
}
#[test]
fn leaf_or_cycle_3() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
// Here a depends on b so b must come before a
let a = graph.add_node(());
let b = graph.add_node(());
let c = graph.add_node(());
_ = graph.add_edge(a, b, ());
assert_eq!(
into_dependency_order(graph),
vec![vec![b], vec![a], vec![c]]
);
}
#[test]
fn leaf_or_cycle_4() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
let c = graph.add_node(());
_ = graph.add_edge(a, b, ());
_ = graph.add_edge(a, c, ());
assert_eq!(
into_dependency_order(graph),
vec![vec![b], vec![c], vec![a]]
);
}
#[test]
fn leaf_or_cycle_5() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
let c = graph.add_node(());
_ = graph.add_edge(a, b, ());
_ = graph.add_edge(b, a, ());
assert_eq!(into_dependency_order(graph), vec![vec![c], vec![b, a]]);
}
#[test]
fn leaf_or_cycle_6() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
let c = graph.add_node(());
let d = graph.add_node(());
_ = graph.add_edge(a, b, ());
_ = graph.add_edge(b, c, ());
_ = graph.add_edge(c, a, ());
_ = graph.add_edge(d, a, ());
assert_eq!(into_dependency_order(graph), vec![vec![c, a, b], vec![d]]);
}
#[test]
fn leaf_or_cycle_7() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
_ = graph.add_edge(a, a, ());
_ = graph.add_edge(a, b, ());
_ = graph.add_edge(b, b, ());
// Here there are no true leafs, only cycles. However, b is in a loop
// with itself so counts as a leaf as far as we are concerned.
assert_eq!(into_dependency_order(graph), vec![vec![b], vec![a]]);
}
#[test]
fn leaf_or_cycle_8() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
_ = graph.add_edge(a, a, ());
_ = graph.add_edge(a, b, ());
_ = graph.add_edge(b, b, ());
_ = graph.add_edge(b, b, ());
_ = graph.add_edge(b, b, ());
// Here there are no true leafs, only cycles. However, b is in a loop
// with itself so counts as a leaf as far as we are concerned.
// This is different from the previous test as there are multiple self
// references for node b.
assert_eq!(into_dependency_order(graph), vec![vec![b], vec![a]]);
}
#[test]
fn leaf_or_cycle_9() {
let mut graph: StableGraph<(), ()> = StableGraph::new();
let a = graph.add_node(());
let b = graph.add_node(());
let c = graph.add_node(());
_ = graph.add_edge(a, a, ());
_ = graph.add_edge(a, b, ());
_ = graph.add_edge(b, b, ());
_ = graph.add_edge(b, c, ());
_ = graph.add_edge(c, b, ());
_ = graph.add_edge(c, c, ());
// Here there are no true leafs, only cycles. However, b is in a loop
// with itself so counts as a leaf as far as we are concerned.
// This is different from the previous test as there are multiple self
// references for node b.
assert_eq!(into_dependency_order(graph), vec![vec![c, b], vec![a]]);
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/erlang.rs | compiler-core/src/erlang.rs | // TODO: Refactor this module to be methods on structs rather than free
// functions with a load of arguments. See the JavaScript code generator and the
// formatter for examples.
mod pattern;
#[cfg(test)]
mod tests;
use crate::build::{Target, module_erlang_name};
use crate::erlang::pattern::{PatternPrinter, StringPatternAssignment};
use crate::strings::{convert_string_escape_chars, to_snake_case};
use crate::type_::is_prelude_module;
use crate::{
Result,
ast::{Function, *},
docvec,
line_numbers::LineNumbers,
pretty::*,
type_::{
ModuleValueConstructor, PatternConstructor, Type, TypeVar, TypedCallArg, ValueConstructor,
ValueConstructorVariant,
},
};
use camino::Utf8Path;
use ecow::{EcoString, eco_format};
use itertools::Itertools;
use regex::{Captures, Regex};
use std::collections::HashSet;
use std::sync::OnceLock;
use std::{collections::HashMap, ops::Deref, str::FromStr, sync::Arc};
use vec1::Vec1;
const INDENT: isize = 4;
const MAX_COLUMNS: isize = 80;
fn module_name_atom(module: &str) -> Document<'static> {
atom_string(module.replace('/', "@").into())
}
#[derive(Debug, Clone)]
struct Env<'a> {
module: &'a str,
function: &'a str,
line_numbers: &'a LineNumbers,
needs_function_docs: bool,
echo_used: bool,
current_scope_vars: im::HashMap<String, usize>,
erl_function_scope_vars: im::HashMap<String, usize>,
}
impl<'env> Env<'env> {
pub fn new(module: &'env str, function: &'env str, line_numbers: &'env LineNumbers) -> Self {
let vars: im::HashMap<_, _> = std::iter::once(("_".into(), 0)).collect();
Self {
current_scope_vars: vars.clone(),
erl_function_scope_vars: vars,
needs_function_docs: false,
echo_used: false,
line_numbers,
function,
module,
}
}
pub fn local_var_name<'a>(&mut self, name: &str) -> Document<'a> {
match self.current_scope_vars.get(name) {
None => {
let _ = self.current_scope_vars.insert(name.to_string(), 0);
let _ = self.erl_function_scope_vars.insert(name.to_string(), 0);
variable_name(name).to_doc()
}
Some(0) => variable_name(name).to_doc(),
Some(n) => {
use std::fmt::Write;
let mut name = variable_name(name);
write!(name, "@{n}").expect("pushing number suffix to name");
name.to_doc()
}
}
}
pub fn next_local_var_name<'a>(&mut self, name: &str) -> Document<'a> {
let next = self.erl_function_scope_vars.get(name).map_or(0, |i| i + 1);
let _ = self.erl_function_scope_vars.insert(name.to_string(), next);
let _ = self.current_scope_vars.insert(name.to_string(), next);
self.local_var_name(name)
}
}
pub fn records(module: &TypedModule) -> Vec<(&str, String)> {
module
.definitions
.custom_types
.iter()
.filter(|custom_type| {
custom_type.publicity.is_public()
&& !module
.unused_definition_positions
.contains(&custom_type.location.start)
})
.flat_map(|custom_type| &custom_type.constructors)
.filter(|constructor| !constructor.arguments.is_empty())
.filter_map(|constructor| {
constructor
.arguments
.iter()
.map(
|RecordConstructorArg {
label,
ast: _,
location: _,
type_,
..
}| {
label
.as_ref()
.map(|(_, label)| (label.as_str(), type_.clone()))
},
)
.collect::<Option<Vec<_>>>()
.map(|fields| (constructor.name.as_str(), fields))
})
.map(|(name, fields)| (name, record_definition(name, &fields)))
.collect()
}
pub fn record_definition(name: &str, fields: &[(&str, Arc<Type>)]) -> String {
let name = to_snake_case(name);
let type_printer = TypePrinter::new("").var_as_any();
let fields = fields.iter().map(move |(name, type_)| {
let type_ = type_printer.print(type_);
docvec![atom_string((*name).into()), " :: ", type_.group()]
});
let fields = break_("", "")
.append(join(fields, break_(",", ", ")))
.nest(INDENT)
.append(break_("", ""))
.group();
docvec!["-record(", atom_string(name), ", {", fields, "}).", line()]
.to_pretty_string(MAX_COLUMNS)
}
pub fn module<'a>(
module: &'a TypedModule,
line_numbers: &'a LineNumbers,
root: &'a Utf8Path,
) -> Result<String> {
Ok(module_document(module, line_numbers, root)?.to_pretty_string(MAX_COLUMNS))
}
fn module_document<'a>(
module: &'a TypedModule,
line_numbers: &'a LineNumbers,
root: &'a Utf8Path,
) -> Result<Document<'a>> {
let mut exports = vec![];
let mut type_defs = vec![];
let mut type_exports = vec![];
let header = "-module("
.to_doc()
.append(module.erlang_name())
.append(").")
.append(line());
// We need to know which private functions are referenced in importable
// constants so that we can export them anyway in the generated Erlang.
// This is because otherwise when the constant is used in another module it
// would result in an error as it tries to reference this private function.
let overridden_publicity = find_private_functions_referenced_in_importable_constants(module);
for function in &module.definitions.functions {
register_function_exports(function, &mut exports, &overridden_publicity);
}
for custom_type in &module.definitions.custom_types {
register_custom_type_exports(custom_type, &mut type_exports, &mut type_defs, &module.name);
}
let exports = match (!exports.is_empty(), !type_exports.is_empty()) {
(false, false) => return Ok(header),
(true, false) => "-export(["
.to_doc()
.append(join(exports, ", ".to_doc()))
.append("]).")
.append(lines(2)),
(true, true) => "-export(["
.to_doc()
.append(join(exports, ", ".to_doc()))
.append("]).")
.append(line())
.append("-export_type([")
.to_doc()
.append(join(type_exports, ", ".to_doc()))
.append("]).")
.append(lines(2)),
(false, true) => "-export_type(["
.to_doc()
.append(join(type_exports, ", ".to_doc()))
.append("]).")
.append(lines(2)),
};
let type_defs = if type_defs.is_empty() {
nil()
} else {
join(type_defs, lines(2)).append(lines(2))
};
let src_path_full = &module.type_info.src_path;
let src_path_relative = EcoString::from(
src_path_full
.strip_prefix(root)
.unwrap_or(src_path_full)
.as_str(),
)
.replace("\\", "\\\\");
let mut needs_function_docs = false;
let mut echo_used = false;
let mut statements = vec![];
for function in &module.definitions.functions {
if let Some((statement_document, env)) = module_function(
function,
&module.name,
module.type_info.is_internal,
line_numbers,
src_path_relative.clone(),
&module.unused_definition_positions,
) {
needs_function_docs = needs_function_docs || env.needs_function_docs;
echo_used = echo_used || env.echo_used;
statements.push(statement_document);
}
}
let module_doc = if module.type_info.is_internal {
Some(hidden_module_doc().append(lines(2)))
} else if module.documentation.is_empty() {
None
} else {
Some(module_doc(&module.documentation).append(lines(2)))
};
// We're going to need the documentation directives if any of the module's
// functions need it, or if the module has a module comment that we want to
// include in the generated Erlang source, or if the module is internal.
let needs_doc_directive = needs_function_docs || module_doc.is_some();
let documentation_directive = if needs_doc_directive {
"-if(?OTP_RELEASE >= 27).
-define(MODULEDOC(Str), -moduledoc(Str)).
-define(DOC(Str), -doc(Str)).
-else.
-define(MODULEDOC(Str), -compile([])).
-define(DOC(Str), -compile([])).
-endif."
.to_doc()
.append(lines(2))
} else {
nil()
};
let module = docvec![
header,
"-compile([no_auto_import, nowarn_unused_vars, nowarn_unused_function, nowarn_nomatch, inline]).",
line(),
"-define(FILEPATH, \"",
src_path_relative,
"\").",
line(),
exports,
documentation_directive,
module_doc,
type_defs,
join(statements, lines(2)),
];
let module = if echo_used {
module
.append(lines(2))
.append(std::include_str!("../templates/echo.erl").to_doc())
} else {
module
};
Ok(module.append(line()))
}
fn register_function_exports(
function: &TypedFunction,
exports: &mut Vec<Document<'_>>,
overridden_publicity: &im::HashSet<EcoString>,
) {
let Function {
publicity,
name: Some((_, name)),
arguments,
implementations,
..
} = function
else {
return;
};
// If the function isn't for this target then don't attempt to export it
if implementations.supports(Target::Erlang)
&& (publicity.is_importable() || overridden_publicity.contains(name))
{
let function_name = escape_erlang_existing_name(name);
exports.push(
atom_string(function_name.into())
.append("/")
.append(arguments.len()),
)
}
}
fn register_custom_type_exports(
custom_type: &TypedCustomType,
type_exports: &mut Vec<Document<'_>>,
type_defs: &mut Vec<Document<'_>>,
module_name: &str,
) {
let TypedCustomType {
name,
constructors,
opaque,
typed_parameters,
external_erlang,
..
} = custom_type;
// Erlang doesn't allow phantom type variables in type definitions but gleam does
// so we check the type declaratinon against its constroctors and generate a phantom
// value that uses the unused type variables.
let type_var_usages = collect_type_var_usages(HashMap::new(), typed_parameters);
let mut constructor_var_usages = HashMap::new();
for c in constructors {
constructor_var_usages =
collect_type_var_usages(constructor_var_usages, c.arguments.iter().map(|a| &a.type_));
}
let phantom_vars: Vec<_> = type_var_usages
.keys()
.filter(|&id| !constructor_var_usages.contains_key(id))
.sorted()
.map(|&id| Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id })),
})
.collect();
let phantom_vars_constructor = if !phantom_vars.is_empty() {
let type_printer = TypePrinter::new(module_name);
Some(tuple(
std::iter::once("gleam_phantom".to_doc())
.chain(phantom_vars.iter().map(|pv| type_printer.print(pv))),
))
} else {
None
};
// Type Exports
type_exports.push(
erl_safe_type_name(to_snake_case(name))
.to_doc()
.append("/")
.append(typed_parameters.len()),
);
// Type definitions
let definition = if constructors.is_empty() {
if let Some((module, external_type, _location)) = external_erlang {
let printer = TypePrinter::new(module_name);
docvec![
module,
":",
external_type,
"(",
join(
typed_parameters
.iter()
.map(|parameter| printer.print(parameter)),
", ".to_doc()
),
")"
]
} else {
let constructors = std::iter::once("any()".to_doc()).chain(phantom_vars_constructor);
join(constructors, break_(" |", " | "))
}
} else {
let constructors = constructors
.iter()
.map(|constructor| {
let name = atom_string(to_snake_case(&constructor.name));
if constructor.arguments.is_empty() {
name
} else {
let type_printer = TypePrinter::new(module_name);
let arguments = constructor
.arguments
.iter()
.map(|argument| type_printer.print(&argument.type_));
tuple(std::iter::once(name).chain(arguments))
}
})
.chain(phantom_vars_constructor);
join(constructors, break_(" |", " | "))
}
.nest(INDENT);
let type_printer = TypePrinter::new(module_name);
let params = join(
typed_parameters
.iter()
.map(|type_| type_printer.print(type_)),
", ".to_doc(),
);
let doc = if *opaque { "-opaque " } else { "-type " }
.to_doc()
.append(erl_safe_type_name(to_snake_case(name)))
.append("(")
.append(params)
.append(") :: ")
.append(definition)
.group()
.append(".");
type_defs.push(doc);
}
fn module_function<'a>(
function: &'a TypedFunction,
module: &'a str,
is_internal_module: bool,
line_numbers: &'a LineNumbers,
src_path: EcoString,
unused_definition_positions: &HashSet<u32>,
) -> Option<(Document<'a>, Env<'a>)> {
// We don't generate any code for unused functions.
if unused_definition_positions.contains(&function.location.start) {
return None;
}
// Private external functions don't need to render anything, the underlying
// Erlang implementation is used directly at the call site.
if function.external_erlang.is_some() && function.publicity.is_private() {
return None;
}
// If the function has no suitable Erlang implementation then there is nothing
// to generate for it.
if !function.implementations.supports(Target::Erlang) {
return None;
}
let (_, function_name) = function
.name
.as_ref()
.expect("A module's function must be named");
let function_name = escape_erlang_existing_name(function_name);
let file_attribute = file_attribute(src_path, function, line_numbers);
let mut env = Env::new(module, function_name, line_numbers);
let var_usages = collect_type_var_usages(
HashMap::new(),
std::iter::once(&function.return_type).chain(function.arguments.iter().map(|a| &a.type_)),
);
let type_printer = TypePrinter::new(module).with_var_usages(&var_usages);
let arguments_spec = function
.arguments
.iter()
.map(|a| type_printer.print(&a.type_));
let return_spec = type_printer.print(&function.return_type);
let spec = fun_spec(function_name, arguments_spec, return_spec);
let arguments = if function.external_erlang.is_some() {
external_fun_arguments(&function.arguments, &mut env)
} else {
fun_arguments(&function.arguments, &mut env)
};
let body = function
.external_erlang
.as_ref()
.map(|(module, function, _location)| {
docvec![
atom(module),
":",
atom(escape_erlang_existing_name(function)),
arguments.clone()
]
})
.unwrap_or_else(|| statement_sequence(&function.body, &mut env));
let attributes = file_attribute;
let attributes = if is_internal_module || function.publicity.is_internal() {
// If a function is marked as internal or comes from an internal module
// we want to hide its documentation in the Erlang shell!
// So the doc directive will look like this: `-doc(false).`
env.needs_function_docs = true;
docvec![attributes, line(), hidden_function_doc()]
} else {
match &function.documentation {
Some((_, documentation)) => {
env.needs_function_docs = true;
let doc_lines = documentation
.trim_end()
.split('\n')
.map(EcoString::from)
.collect_vec();
docvec![attributes, line(), function_doc(&doc_lines)]
}
_ => attributes,
}
};
Some((
docvec![
attributes,
line(),
spec,
atom_string(escape_erlang_existing_name(function_name).into()),
arguments,
" ->",
line().append(body).nest(INDENT).group(),
".",
],
env,
))
}
fn file_attribute<'a>(
path: EcoString,
function: &'a TypedFunction,
line_numbers: &'a LineNumbers,
) -> Document<'a> {
let line = line_numbers.line_number(function.location.start);
docvec!["-file(\"", path, "\", ", line, ")."]
}
enum DocCommentKind {
Module,
Function,
}
enum DocCommentContent<'a> {
String(&'a Vec<EcoString>),
False,
}
fn hidden_module_doc<'a>() -> Document<'a> {
doc_attribute(DocCommentKind::Module, DocCommentContent::False)
}
fn module_doc<'a>(content: &Vec<EcoString>) -> Document<'a> {
doc_attribute(DocCommentKind::Module, DocCommentContent::String(content))
}
fn hidden_function_doc<'a>() -> Document<'a> {
doc_attribute(DocCommentKind::Function, DocCommentContent::False)
}
fn function_doc<'a>(content: &Vec<EcoString>) -> Document<'a> {
doc_attribute(DocCommentKind::Function, DocCommentContent::String(content))
}
fn doc_attribute<'a>(kind: DocCommentKind, content: DocCommentContent<'_>) -> Document<'a> {
let prefix = match kind {
DocCommentKind::Module => "?MODULEDOC",
DocCommentKind::Function => "?DOC",
};
match content {
DocCommentContent::False => prefix.to_doc().append("(false)."),
DocCommentContent::String(doc_lines) => {
let is_multiline_doc_comment = doc_lines.len() > 1;
let doc_lines = join(
doc_lines.iter().map(|line| {
let line = line.replace("\\", "\\\\").replace("\"", "\\\"");
docvec!["\"", line, "\\n\""]
}),
line(),
);
if is_multiline_doc_comment {
let nested_documentation = docvec![line(), doc_lines].nest(INDENT);
docvec![prefix, "(", nested_documentation, line(), ")."]
} else {
docvec![prefix, "(", doc_lines, ")."]
}
}
}
}
fn external_fun_arguments<'a>(arguments: &'a [TypedArg], env: &mut Env<'a>) -> Document<'a> {
wrap_arguments(arguments.iter().map(|argument| {
let name = match &argument.names {
ArgNames::Discard { name, .. }
| ArgNames::LabelledDiscard { name, .. }
| ArgNames::Named { name, .. }
| ArgNames::NamedLabelled { name, .. } => name,
};
if name.chars().all(|c| c == '_') {
env.next_local_var_name("argument")
} else {
env.next_local_var_name(name)
}
}))
}
fn fun_arguments<'a>(arguments: &'a [TypedArg], env: &mut Env<'a>) -> Document<'a> {
wrap_arguments(arguments.iter().map(|argument| match &argument.names {
ArgNames::Discard { .. } | ArgNames::LabelledDiscard { .. } => "_".to_doc(),
ArgNames::Named { name, .. } | ArgNames::NamedLabelled { name, .. } => {
env.next_local_var_name(name)
}
}))
}
fn wrap_arguments<'a, I>(arguments: I) -> Document<'a>
where
I: IntoIterator<Item = Document<'a>>,
{
break_("", "")
.append(join(arguments, break_(",", ", ")))
.nest(INDENT)
.append(break_("", ""))
.surround("(", ")")
.group()
}
fn fun_spec<'a>(
name: &'a str,
arguments: impl IntoIterator<Item = Document<'a>>,
return_: Document<'a>,
) -> Document<'a> {
"-spec "
.to_doc()
.append(atom(name))
.append(wrap_arguments(arguments))
.append(" -> ")
.append(return_)
.append(".")
.append(line())
.group()
}
fn atom_string(value: EcoString) -> Document<'static> {
escape_atom_string(value).to_doc()
}
fn atom_pattern() -> &'static Regex {
static ATOM_PATTERN: OnceLock<Regex> = OnceLock::new();
ATOM_PATTERN.get_or_init(|| Regex::new(r"^[a-z][a-z0-9_@]*$").expect("atom RE regex"))
}
fn atom(value: &str) -> Document<'_> {
if is_erlang_reserved_word(value) {
// Escape because of keyword collision
eco_format!("'{value}'").to_doc()
} else if atom_pattern().is_match(value) {
// No need to escape
EcoString::from(value).to_doc()
} else {
// Escape because of characters contained
eco_format!("'{value}'").to_doc()
}
}
pub fn escape_atom_string(value: EcoString) -> EcoString {
if is_erlang_reserved_word(&value) {
// Escape because of keyword collision
eco_format!("'{value}'")
} else if atom_pattern().is_match(&value) {
value
} else {
// Escape because of characters contained
eco_format!("'{value}'")
}
}
fn unicode_escape_sequence_pattern() -> &'static Regex {
static PATTERN: OnceLock<Regex> = OnceLock::new();
PATTERN.get_or_init(|| {
Regex::new(r#"(\\+)(u)"#).expect("Unicode escape sequence regex cannot be constructed")
})
}
fn string_inner(value: &str) -> Document<'_> {
let content = unicode_escape_sequence_pattern()
// `\\u`-s should not be affected, so that "\\u..." is not converted to
// "\\x...". That's why capturing groups is used to exclude cases that
// shouldn't be replaced.
.replace_all(value, |caps: &Captures<'_>| {
let slashes = caps.get(1).map_or("", |m| m.as_str());
if slashes.len().is_multiple_of(2) {
format!("{slashes}u")
} else {
format!("{slashes}x")
}
});
EcoString::from(content).to_doc()
}
fn string(value: &str) -> Document<'_> {
string_inner(value).surround("<<\"", "\"/utf8>>")
}
fn string_length_utf8_bytes(str: &EcoString) -> usize {
convert_string_escape_chars(str).len()
}
fn tuple<'a>(elements: impl IntoIterator<Item = Document<'a>>) -> Document<'a> {
join(elements, break_(",", ", "))
.nest(INDENT)
.surround("{", "}")
.group()
}
fn const_string_concatenate_bit_array<'a>(
elements: impl IntoIterator<Item = Document<'a>>,
) -> Document<'a> {
join(elements, break_(",", ", "))
.nest(INDENT)
.surround("<<", ">>")
.group()
}
fn const_string_concatenate<'a>(
left: &'a TypedConstant,
right: &'a TypedConstant,
env: &mut Env<'a>,
) -> Document<'a> {
let left = const_string_concatenate_argument(left, env);
let right = const_string_concatenate_argument(right, env);
const_string_concatenate_bit_array([left, right])
}
fn const_string_concatenate_inner<'a>(
left: &'a TypedConstant,
right: &'a TypedConstant,
env: &mut Env<'a>,
) -> Document<'a> {
let left = const_string_concatenate_argument(left, env);
let right = const_string_concatenate_argument(right, env);
join([left, right], break_(",", ", "))
}
fn const_string_concatenate_argument<'a>(
value: &'a TypedConstant,
env: &mut Env<'a>,
) -> Document<'a> {
match value {
Constant::String { value, .. } => docvec!['"', string_inner(value), "\"/utf8"],
Constant::Var {
constructor: Some(constructor),
..
} => match &constructor.variant {
ValueConstructorVariant::ModuleConstant {
literal: Constant::String { value, .. },
..
} => docvec!['"', string_inner(value), "\"/utf8"],
ValueConstructorVariant::ModuleConstant {
literal: Constant::StringConcatenation { left, right, .. },
..
} => const_string_concatenate_inner(left, right, env),
ValueConstructorVariant::LocalVariable { .. }
| ValueConstructorVariant::ModuleConstant { .. }
| ValueConstructorVariant::ModuleFn { .. }
| ValueConstructorVariant::Record { .. } => const_inline(value, env),
},
Constant::StringConcatenation { left, right, .. } => {
const_string_concatenate_inner(left, right, env)
}
Constant::Int { .. }
| Constant::Float { .. }
| Constant::Tuple { .. }
| Constant::List { .. }
| Constant::Record { .. }
| Constant::RecordUpdate { .. }
| Constant::BitArray { .. }
| Constant::Var { .. }
| Constant::Invalid { .. } => const_inline(value, env),
}
}
fn string_concatenate<'a>(
left: &'a TypedExpr,
right: &'a TypedExpr,
env: &mut Env<'a>,
) -> Document<'a> {
let left = string_concatenate_argument(left, env);
let right = string_concatenate_argument(right, env);
bit_array([left, right])
}
fn string_concatenate_argument<'a>(value: &'a TypedExpr, env: &mut Env<'a>) -> Document<'a> {
match value {
TypedExpr::Var {
constructor:
ValueConstructor {
variant:
ValueConstructorVariant::ModuleConstant {
literal: Constant::String { value, .. },
..
},
..
},
..
}
| TypedExpr::String { value, .. } => docvec!['"', string_inner(value), "\"/utf8"],
TypedExpr::Var {
name,
constructor:
ValueConstructor {
variant: ValueConstructorVariant::LocalVariable { .. },
..
},
..
} => docvec![env.local_var_name(name), "/binary"],
TypedExpr::BinOp {
name: BinOp::Concatenate,
..
} => docvec![expr(value, env), "/binary"],
TypedExpr::Int { .. }
| TypedExpr::Float { .. }
| TypedExpr::Block { .. }
| TypedExpr::Pipeline { .. }
| TypedExpr::Var { .. }
| TypedExpr::Fn { .. }
| TypedExpr::List { .. }
| TypedExpr::Call { .. }
| TypedExpr::BinOp { .. }
| TypedExpr::Case { .. }
| TypedExpr::RecordAccess { .. }
| TypedExpr::PositionalAccess { .. }
| TypedExpr::ModuleSelect { .. }
| TypedExpr::Tuple { .. }
| TypedExpr::TupleIndex { .. }
| TypedExpr::Todo { .. }
| TypedExpr::Panic { .. }
| TypedExpr::Echo { .. }
| TypedExpr::BitArray { .. }
| TypedExpr::RecordUpdate { .. }
| TypedExpr::NegateBool { .. }
| TypedExpr::NegateInt { .. }
| TypedExpr::Invalid { .. } => docvec!["(", maybe_block_expr(value, env), ")/binary"],
}
}
fn bit_array<'a>(elements: impl IntoIterator<Item = Document<'a>>) -> Document<'a> {
join(elements, break_(",", ", "))
.nest(INDENT)
.surround("<<", ">>")
.group()
}
fn const_segment<'a>(
value: &'a TypedConstant,
options: &'a [TypedConstantBitArraySegmentOption],
env: &mut Env<'a>,
) -> Document<'a> {
let value_is_a_string_literal = matches!(value, Constant::String { .. });
let create_document = |env: &mut Env<'a>| {
match value {
// Skip the normal <<value/utf8>> surrounds
Constant::String { value, .. } => value.to_doc().surround("\"", "\""),
// As normal
Constant::Int { .. } | Constant::Float { .. } | Constant::BitArray { .. } => {
const_inline(value, env)
}
// Wrap anything else in parentheses
Constant::Tuple { .. }
| Constant::List { .. }
| Constant::Record { .. }
| Constant::RecordUpdate { .. }
| Constant::Var { .. }
| Constant::StringConcatenation { .. }
| Constant::Invalid { .. } => const_inline(value, env).surround("(", ")"),
}
};
let size = |value: &'a TypedConstant, env: &mut Env<'a>| {
if let Constant::Int { .. } = value {
Some(":".to_doc().append(const_inline(value, env)))
} else {
Some(
":".to_doc()
.append(const_inline(value, env).surround("(", ")")),
)
}
};
let unit = |value: &'a u8| Some(eco_format!("unit:{value}").to_doc());
bit_array_segment(
create_document,
options,
size,
unit,
value_is_a_string_literal,
false,
env,
)
}
enum Position {
Tail,
NotTail,
}
fn statement<'a>(
statement: &'a TypedStatement,
env: &mut Env<'a>,
position: Position,
) -> Document<'a> {
match statement {
Statement::Expression(e) => expr(e, env),
Statement::Assignment(a) => assignment(a, env, position),
Statement::Use(use_) => expr(&use_.call, env),
Statement::Assert(a) => assert(a, env),
}
}
fn expr_segment<'a>(
value: &'a TypedExpr,
options: &'a [BitArrayOption<TypedExpr>],
env: &mut Env<'a>,
) -> Document<'a> {
let value_is_a_string_literal = matches!(value, TypedExpr::String { .. });
let create_document = |env: &mut Env<'a>| {
match value {
// Skip the normal <<value/utf8>> surrounds and set the string literal flag
TypedExpr::String { value, .. } => string_inner(value).surround("\"", "\""),
// As normal
TypedExpr::Int { .. }
| TypedExpr::Float { .. }
| TypedExpr::Var { .. }
| TypedExpr::BitArray { .. } => expr(value, env),
// Wrap anything else in parentheses
TypedExpr::Block { .. }
| TypedExpr::Pipeline { .. }
| TypedExpr::Fn { .. }
| TypedExpr::List { .. }
| TypedExpr::Call { .. }
| TypedExpr::BinOp { .. }
| TypedExpr::Case { .. }
| TypedExpr::RecordAccess { .. }
| TypedExpr::PositionalAccess { .. }
| TypedExpr::ModuleSelect { .. }
| TypedExpr::Tuple { .. }
| TypedExpr::TupleIndex { .. }
| TypedExpr::Todo { .. }
| TypedExpr::Panic { .. }
| TypedExpr::Echo { .. }
| TypedExpr::RecordUpdate { .. }
| TypedExpr::NegateBool { .. }
| TypedExpr::NegateInt { .. }
| TypedExpr::Invalid { .. } => expr(value, env).surround("(", ")"),
}
};
let size = |expression: &'a TypedExpr, env: &mut Env<'a>| {
if let TypedExpr::Int { value, .. } = expression {
let v = value.replace("_", "");
let v = u64::from_str(&v).unwrap_or(0);
Some(eco_format!(":{v}").to_doc())
} else {
let inner_expr = maybe_block_expr(expression, env).surround("(", ")");
// The value of size must be a non-negative integer, we use lists:max here to ensure
// it is at least 0;
let value_guard = ":(lists:max(["
.to_doc()
.append(inner_expr)
.append(", 0]))")
.group();
Some(value_guard)
}
};
let unit = |value: &'a u8| Some(eco_format!("unit:{value}").to_doc());
bit_array_segment(
create_document,
options,
size,
unit,
value_is_a_string_literal,
false,
env,
)
}
fn bit_array_segment<'a, Value: 'a, CreateDoc, SizeToDoc, UnitToDoc, State>(
mut create_document: CreateDoc,
options: &'a [BitArrayOption<Value>],
mut size_to_doc: SizeToDoc,
mut unit_to_doc: UnitToDoc,
value_is_a_string_literal: bool,
value_is_a_discard: bool,
state: &mut State,
) -> Document<'a>
where
CreateDoc: FnMut(&mut State) -> Document<'a>,
SizeToDoc: FnMut(&'a Value, &mut State) -> Option<Document<'a>>,
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/derivation_tree.rs | compiler-core/src/derivation_tree.rs | use crate::error::wrap;
use ecow::EcoString;
use hexpm::version::Version;
use im::HashSet;
use itertools::Itertools;
use petgraph::Direction;
use petgraph::algo::all_simple_paths;
use petgraph::graph::NodeIndex;
use petgraph::prelude::StableGraph;
use pubgrub::External;
use pubgrub::{DerivationTree, Derived, Ranges};
use std::collections::HashMap;
use std::hash::RandomState;
use std::ops::Bound::{Excluded, Included, Unbounded};
use std::sync::Arc;
macro_rules! wrap_format {
($($tts:tt)*) => {
wrap(&format!($($tts)*))
}
}
/// Makes a best effort at turning a derivation tree into a nice readable error
/// message.
///
pub struct DerivationTreePrinter {
derivation_tree: DerivationTree<String, Ranges<Version>, String>,
/// The name of the root package for which we're trying to add new
/// dependencies. This is the starting point we use to find and report
/// dependency conflicts!
root_package_name: EcoString,
/// The graph of dependencies built from the derivation tree. The nodes are
/// packages and the arcs connecting them represent a dependency:
///
/// ```txt
/// wibble ---- (range1, range2) ---> wobble
/// ```
///
/// Means "package wibble with version `range1` requires package wobble
/// with version `range2`".
///
dependencies: StableGraph<String, (Ranges<Version>, Ranges<Version>)>,
/// A map going from package name to its index in the dependencies graph.
///
nodes: HashMap<String, NodeIndex>,
}
impl DerivationTreePrinter {
pub fn new(
root_package_name: EcoString,
mut derivation_tree: DerivationTree<String, Ranges<Version>, String>,
) -> Self {
// We start by trying to simplify the derivation tree as much as
// possible.
derivation_tree.collapse_no_versions();
simplify_derivation_tree(&mut derivation_tree);
let mut dependencies = StableGraph::new();
let mut nodes = HashMap::new();
build_dependencies_graph(&derivation_tree, &mut dependencies, &mut nodes);
DerivationTreePrinter {
root_package_name,
derivation_tree,
dependencies,
nodes,
}
}
pub fn print(&self) -> String {
self.pretty_explanation()
.unwrap_or_else(|| self.fallback_explanation())
}
/// Tries and print a pretty explanation for the given resolution tree.
/// If for some reason our heuristic to produce a nice error message fails
/// we return `None` so we can still produce a good enough error message!
///
fn pretty_explanation(&self) -> Option<String> {
let root_package_index = self.nodes.get(self.root_package_name.as_str())?;
let unresolvable_nodes = self.find_unresolvable_nodes();
if unresolvable_nodes.is_empty() {
return None;
}
let mut unresolvable = vec![];
for unresolvable_node in unresolvable_nodes {
let paths = all_simple_paths::<Vec<_>, _, RandomState>(
&self.dependencies,
*root_package_index,
unresolvable_node,
0,
None,
);
let package = self
.dependencies
.node_weight(unresolvable_node)
.expect("package is in the graph");
let heading = format!("There's no compatible version of `{package}`:");
let explanation = paths.sorted().map(|path| self.pretty_path(path)).join("\n");
unresolvable.push(format!("{heading}\n{explanation}"));
}
Some(unresolvable.join("\n\n"))
}
fn pretty_path(&self, path: Vec<NodeIndex>) -> String {
let (you, dependee, rest) = match path.as_slice() {
[you, dependee, rest @ ..] => (you, dependee, rest),
_ => panic!("path with less than two nodes"),
};
let dependee_name = self
.dependencies
.node_weight(*dependee)
.expect("path node is in the graph");
let (_, dependee_range) = self
.ranges_between(you, dependee)
.expect("path edge is in the graph");
let mut message = format!(
" - You require {dependee_name} {}",
pretty_range(dependee_range)
);
let mut previous = dependee;
for next in rest {
let previous_name = self
.dependencies
.node_weight(*previous)
.expect("path node is in the graph");
let next_name = self
.dependencies
.node_weight(*next)
.expect("path node is in the graph");
let (_, next_range) = self
.ranges_between(previous, next)
.expect("path edge is in the graph");
message.push_str(&format!(
"\n - {previous_name} requires {next_name} {}",
pretty_range(next_range)
));
previous = next;
}
message
}
fn find_unresolvable_nodes(&self) -> Vec<NodeIndex> {
self.dependencies
.node_indices()
.filter(|node_index| {
self.dependencies
.neighbors_directed(*node_index, Direction::Incoming)
.count()
> 1
})
.sorted()
.collect_vec()
}
fn ranges_between(
&self,
one: &NodeIndex,
other: &NodeIndex,
) -> Option<(&Ranges<Version>, &Ranges<Version>)> {
let edge = self.dependencies.find_edge(*one, *other)?;
self.dependencies
.edge_weight(edge)
.map(|(one, other)| (one, other))
}
/// A good enough explanation in case we're not able to produce anything
/// nicer.
fn fallback_explanation(&self) -> String {
let mut conflicting_packages = HashSet::new();
collect_conflicting_packages(&self.derivation_tree, &mut conflicting_packages);
wrap_format!(
"Unable to find compatible versions for \
the version constraints in your gleam.toml. \
The conflicting packages are:
{}
",
conflicting_packages
.into_iter()
.map(|s| format!("- {s}"))
.join("\n")
)
}
}
fn build_dependencies_graph(
derivation_tree: &DerivationTree<String, Ranges<Version>, String>,
graph: &mut StableGraph<String, (Ranges<Version>, Ranges<Version>)>,
nodes: &mut HashMap<String, NodeIndex<u32>>,
) {
match derivation_tree {
DerivationTree::External(External::FromDependencyOf(
one,
range_one,
other,
range_other,
)) => {
let one_index = match nodes.get(one) {
Some(index) => *index,
None => {
let index = graph.add_node(one.clone());
let _ = nodes.insert(one.clone(), index);
index
}
};
let other_index = match nodes.get(other) {
Some(index) => *index,
None => {
let index = graph.add_node(other.clone());
let _ = nodes.insert(other.clone(), index);
index
}
};
let edges = graph.edges_connecting(one_index, other_index);
let edge_weight = match edges.peekable().peek() {
Some(edge) => {
let (old_range_one, old_range_other) = edge.weight();
(
range_one.union(old_range_one),
range_other.union(old_range_other),
)
}
None => (range_one.clone(), range_other.clone()),
};
let _ = graph.update_edge(one_index, other_index, edge_weight);
}
DerivationTree::External(_) => (),
DerivationTree::Derived(Derived { cause1, cause2, .. }) => {
build_dependencies_graph(cause1, graph, nodes);
build_dependencies_graph(cause2, graph, nodes);
}
}
}
/// This function collapses adjacent levels of a derivation tree that are all
/// relative to the same dependency.
///
/// By default a derivation tree might have many nodes for a specific package,
/// each node referring to a specific version range. For example:
///
/// - package_wibble `>= 1.0.0 and < 1.1.0` requires package_wobble `>= 1.1.0`
/// - package_wibble `>= 1.1.0 and < 1.2.0` requires package_wobble `>= 1.2.0`
/// - package_wibble `1.1.0` requires package_wobble `>= 1.1.0`
///
/// This level of fine-grained detail would be quite overwhelming in the vast
/// majority of cases so we're fine with collapsing all these details into a
/// single node taking the union of all the ranges that are there:
///
/// - package_wibble `>= 1.0.0 and < 1.2.0` requires package_wobble `>= 1.1.0`
///
/// This way we can print an error message that is way more concise and still
/// informative about what went wrong, at the cost of
///
fn simplify_derivation_tree(derivation_tree: &mut DerivationTree<String, Ranges<Version>, String>) {
match derivation_tree {
DerivationTree::External(_) => {}
DerivationTree::Derived(derived) => {
simplify_derivation_tree(Arc::make_mut(&mut derived.cause1));
simplify_derivation_tree(Arc::make_mut(&mut derived.cause2));
simplify_derivation_tree_outer(derivation_tree);
}
}
}
fn simplify_derivation_tree_outer(
derivation_tree: &mut DerivationTree<String, Ranges<Version>, String>,
) {
match derivation_tree {
DerivationTree::External(_) => {}
DerivationTree::Derived(derived) => {
match (
Arc::make_mut(&mut derived.cause1),
Arc::make_mut(&mut derived.cause2),
) {
(
DerivationTree::External(External::FromDependencyOf(
package,
package_range,
required_package,
required_package_range,
)),
DerivationTree::External(External::FromDependencyOf(
maybe_package,
other_package_range,
maybe_required_package,
other_required_package_range,
)),
) if package == maybe_package && required_package == maybe_required_package => {
*derivation_tree = DerivationTree::External(External::FromDependencyOf(
package.clone(),
package_range.union(other_package_range),
required_package.clone(),
required_package_range.union(other_required_package_range),
))
}
_ => {}
}
}
}
}
fn collect_conflicting_packages<'dt>(
derivation_tree: &'dt DerivationTree<String, Ranges<Version>, String>,
conflicting_packages: &mut HashSet<&'dt String>,
) {
match derivation_tree {
DerivationTree::External(external) => match external {
External::NotRoot(package, _)
| External::NoVersions(package, _)
| External::Custom(package, _, _) => {
let _ = conflicting_packages.insert(package);
}
External::FromDependencyOf(package, _, dep_package, _) => {
let _ = conflicting_packages.insert(package);
let _ = conflicting_packages.insert(dep_package);
}
},
DerivationTree::Derived(derived) => {
collect_conflicting_packages(&derived.cause1, conflicting_packages);
collect_conflicting_packages(&derived.cause2, conflicting_packages);
}
}
}
fn pretty_range(range: &Ranges<Version>) -> String {
range
.iter()
.map(|(lower, upper)| match (lower, upper) {
(Included(lower), Included(upper)) if lower == upper => format!("{lower}"),
(Included(lower), Included(upper)) => format!(">= {lower} and <= {upper}"),
(Included(lower), Excluded(upper)) => format!(">= {lower} and < {upper}"),
(Excluded(lower), Included(upper)) => format!("> {lower} and <= {upper}"),
(Excluded(lower), Excluded(upper)) => format!("> {lower} and < {upper}"),
(Included(version), Unbounded) => format!(">= {version}"),
(Excluded(version), Unbounded) => format!("> {version}"),
(Unbounded, Included(version)) => format!("<= {version}"),
(Unbounded, Excluded(version)) => format!("< {version}"),
(Unbounded, Unbounded) => "".into(),
})
.join(" or ")
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/metadata.rs | compiler-core/src/metadata.rs | //! Seriaisation and deserialisation of Gleam compiler metadata into binary files
//! using the Cap'n Proto schema.
mod module_decoder;
mod module_encoder;
#[cfg(test)]
mod tests;
pub use self::{module_decoder::ModuleDecoder, module_encoder::ModuleEncoder};
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/ast_folder.rs | compiler-core/src/ast_folder.rs | use ecow::EcoString;
use itertools::Itertools;
use num_bigint::BigInt;
use vec1::Vec1;
use crate::{
analyse::Inferred,
ast::{
Assert, AssignName, Assignment, BinOp, BitArraySize, CallArg, Constant, Definition,
FunctionLiteralKind, InvalidExpression, Pattern, RecordBeingUpdated, RecordUpdateArg,
SrcSpan, Statement, TailPattern, TargetedDefinition, TodoKind, TypeAst, TypeAstConstructor,
TypeAstFn, TypeAstHole, TypeAstTuple, TypeAstVar, UntypedArg, UntypedAssert,
UntypedAssignment, UntypedClause, UntypedConstant, UntypedConstantBitArraySegment,
UntypedCustomType, UntypedDefinition, UntypedExpr, UntypedExprBitArraySegment,
UntypedFunction, UntypedImport, UntypedModule, UntypedModuleConstant, UntypedPattern,
UntypedPatternBitArraySegment, UntypedRecordUpdateArg, UntypedStatement,
UntypedTailPattern, UntypedTypeAlias, UntypedUse, UntypedUseAssignment, Use, UseAssignment,
},
build::Target,
parse::LiteralFloatValue,
type_::error::VariableOrigin,
};
#[allow(dead_code)]
pub trait UntypedModuleFolder: TypeAstFolder + UntypedExprFolder {
/// You probably don't want to override this method.
fn fold_module(&mut self, mut module: UntypedModule) -> UntypedModule {
module.definitions = module
.definitions
.into_iter()
.map(|definition| {
let TargetedDefinition { definition, target } = definition;
match definition {
Definition::Function(function) => {
let function = self.fold_function_definition(function, target);
let definition = self.walk_function_definition(function);
TargetedDefinition { definition, target }
}
Definition::TypeAlias(type_alias) => {
let type_alias = self.fold_type_alias(type_alias, target);
let definition = self.walk_type_alias(type_alias);
TargetedDefinition { definition, target }
}
Definition::CustomType(custom_type) => {
let custom_type = self.fold_custom_type(custom_type, target);
let definition = self.walk_custom_type(custom_type);
TargetedDefinition { definition, target }
}
Definition::Import(import) => {
let import = self.fold_import(import, target);
let definition = self.walk_import(import);
TargetedDefinition { definition, target }
}
Definition::ModuleConstant(constant) => {
let constant = self.fold_module_constant(constant, target);
let definition = self.walk_module_constant(constant);
TargetedDefinition { definition, target }
}
}
})
.collect();
module
}
/// You probably don't want to override this method.
fn walk_function_definition(&mut self, mut function: UntypedFunction) -> UntypedDefinition {
function.body = function
.body
.into_iter()
.map(|statement| self.fold_statement(statement))
.collect_vec();
function.return_annotation = function
.return_annotation
.map(|type_| self.fold_type(type_));
function.arguments = function
.arguments
.into_iter()
.map(|mut argument| {
argument.annotation = argument.annotation.map(|type_| self.fold_type(type_));
argument
})
.collect();
Definition::Function(function)
}
/// You probably don't want to override this method.
fn walk_type_alias(&mut self, mut type_alias: UntypedTypeAlias) -> UntypedDefinition {
type_alias.type_ast = self.fold_type(type_alias.type_ast);
Definition::TypeAlias(type_alias)
}
/// You probably don't want to override this method.
fn walk_custom_type(&mut self, mut custom_type: UntypedCustomType) -> UntypedDefinition {
custom_type.constructors = custom_type
.constructors
.into_iter()
.map(|mut constructor| {
constructor.arguments = constructor
.arguments
.into_iter()
.map(|mut argument| {
argument.ast = self.fold_type(argument.ast);
argument
})
.collect();
constructor
})
.collect();
Definition::CustomType(custom_type)
}
/// You probably don't want to override this method.
fn walk_import(&mut self, i: UntypedImport) -> UntypedDefinition {
Definition::Import(i)
}
/// You probably don't want to override this method.
fn walk_module_constant(&mut self, mut constant: UntypedModuleConstant) -> UntypedDefinition {
constant.annotation = constant.annotation.map(|type_| self.fold_type(type_));
constant.value = Box::new(self.fold_constant(*constant.value));
Definition::ModuleConstant(constant)
}
fn fold_function_definition(
&mut self,
function: UntypedFunction,
_target: Option<Target>,
) -> UntypedFunction {
function
}
fn fold_type_alias(
&mut self,
function: UntypedTypeAlias,
_target: Option<Target>,
) -> UntypedTypeAlias {
function
}
fn fold_custom_type(
&mut self,
custom_type: UntypedCustomType,
_target: Option<Target>,
) -> UntypedCustomType {
custom_type
}
fn fold_import(&mut self, import: UntypedImport, _target: Option<Target>) -> UntypedImport {
import
}
fn fold_module_constant(
&mut self,
constant: UntypedModuleConstant,
_target: Option<Target>,
) -> UntypedModuleConstant {
constant
}
}
#[allow(dead_code)]
pub trait TypeAstFolder {
/// Visit a node and potentially replace it with another node using the
/// `fold_*` methods. Afterwards, the `walk` method is called on the new
/// node to continue traversing.
///
/// You probably don't want to override this method.
fn fold_type(&mut self, type_: TypeAst) -> TypeAst {
let type_ = self.update_type(type_);
self.walk_type(type_)
}
/// You probably don't want to override this method.
fn update_type(&mut self, type_: TypeAst) -> TypeAst {
match type_ {
TypeAst::Constructor(constructor) => self.fold_type_constructor(constructor),
TypeAst::Fn(fn_) => self.fold_type_fn(fn_),
TypeAst::Var(var) => self.fold_type_var(var),
TypeAst::Tuple(tuple) => self.fold_type_tuple(tuple),
TypeAst::Hole(hole) => self.fold_type_hole(hole),
}
}
/// You probably don't want to override this method.
fn walk_type(&mut self, type_: TypeAst) -> TypeAst {
match type_ {
TypeAst::Constructor(mut constructor) => {
constructor.arguments = self.fold_all_types(constructor.arguments);
TypeAst::Constructor(constructor)
}
TypeAst::Fn(mut fn_) => {
fn_.arguments = self.fold_all_types(fn_.arguments);
fn_.return_ = Box::new(self.fold_type(*fn_.return_));
TypeAst::Fn(fn_)
}
TypeAst::Tuple(mut tuple) => {
tuple.elements = self.fold_all_types(tuple.elements);
TypeAst::Tuple(tuple)
}
TypeAst::Var(_) | TypeAst::Hole(_) => type_,
}
}
/// You probably don't want to override this method.
fn fold_all_types(&mut self, types: Vec<TypeAst>) -> Vec<TypeAst> {
types
.into_iter()
.map(|type_| self.fold_type(type_))
.collect()
}
fn fold_type_constructor(&mut self, constructor: TypeAstConstructor) -> TypeAst {
TypeAst::Constructor(constructor)
}
fn fold_type_fn(&mut self, function: TypeAstFn) -> TypeAst {
TypeAst::Fn(function)
}
fn fold_type_tuple(&mut self, tuple: TypeAstTuple) -> TypeAst {
TypeAst::Tuple(tuple)
}
fn fold_type_var(&mut self, var: TypeAstVar) -> TypeAst {
TypeAst::Var(var)
}
fn fold_type_hole(&mut self, hole: TypeAstHole) -> TypeAst {
TypeAst::Hole(hole)
}
}
#[allow(dead_code)]
pub trait UntypedExprFolder: TypeAstFolder + UntypedConstantFolder + PatternFolder {
/// Visit a node and potentially replace it with another node using the
/// `fold_*` methods. Afterwards, the `walk` method is called on the new
/// node to continue traversing.
///
/// You probably don't want to override this method.
fn fold_expr(&mut self, expression: UntypedExpr) -> UntypedExpr {
let expression = self.update_expr(expression);
self.walk_expr(expression)
}
/// You probably don't want to override this method.
fn update_expr(&mut self, expression: UntypedExpr) -> UntypedExpr {
match expression {
UntypedExpr::Var { location, name } => self.fold_var(location, name),
UntypedExpr::Int {
location,
value,
int_value,
} => self.fold_int(location, value, int_value),
UntypedExpr::Float {
location,
value,
float_value,
} => self.fold_float(location, value, float_value),
UntypedExpr::String { location, value } => self.fold_string(location, value),
UntypedExpr::Block {
location,
statements,
} => self.fold_block(location, statements),
UntypedExpr::Fn {
location,
end_of_head_byte_index,
kind,
arguments,
body,
return_annotation,
} => self.fold_fn(
location,
end_of_head_byte_index,
kind,
arguments,
body,
return_annotation,
),
UntypedExpr::List {
location,
elements,
tail,
} => self.fold_list(location, elements, tail),
UntypedExpr::Call {
location,
fun,
arguments,
} => self.fold_call(location, fun, arguments),
UntypedExpr::BinOp {
location,
name,
name_location,
left,
right,
} => self.fold_bin_op(location, name, name_location, left, right),
UntypedExpr::PipeLine { expressions } => self.fold_pipe_line(expressions),
UntypedExpr::Case {
location,
subjects,
clauses,
} => self.fold_case(location, subjects, clauses),
UntypedExpr::FieldAccess {
location,
label_location,
label,
container,
} => self.fold_field_access(location, label_location, label, container),
UntypedExpr::Tuple { location, elements } => self.fold_tuple(location, elements),
UntypedExpr::TupleIndex {
location,
index,
tuple,
} => self.fold_tuple_index(location, index, tuple),
UntypedExpr::Todo {
kind,
location,
message,
} => self.fold_todo(kind, location, message),
UntypedExpr::Echo {
location,
keyword_end,
expression,
message,
} => self.fold_echo(location, keyword_end, expression, message),
UntypedExpr::Panic { location, message } => self.fold_panic(location, message),
UntypedExpr::BitArray { location, segments } => self.fold_bit_array(location, segments),
UntypedExpr::RecordUpdate {
location,
constructor,
record,
arguments,
} => self.fold_record_update(location, constructor, record, arguments),
UntypedExpr::NegateBool { location, value } => self.fold_negate_bool(location, value),
UntypedExpr::NegateInt { location, value } => self.fold_negate_int(location, value),
}
}
/// You probably don't want to override this method.
fn walk_expr(&mut self, expression: UntypedExpr) -> UntypedExpr {
match expression {
UntypedExpr::Int { .. }
| UntypedExpr::Var { .. }
| UntypedExpr::Float { .. }
| UntypedExpr::String { .. }
| UntypedExpr::NegateInt { .. }
| UntypedExpr::NegateBool { .. } => expression,
UntypedExpr::Todo {
kind,
location,
message,
} => UntypedExpr::Todo {
kind,
location,
message: message.map(|msg_expr| Box::new(self.fold_expr(*msg_expr))),
},
UntypedExpr::Panic { location, message } => UntypedExpr::Panic {
location,
message: message.map(|msg_expr| Box::new(self.fold_expr(*msg_expr))),
},
UntypedExpr::Echo {
location,
expression,
keyword_end,
message,
} => UntypedExpr::Echo {
location,
keyword_end,
expression: expression.map(|expression| Box::new(self.fold_expr(*expression))),
message: message.map(|message| Box::new(self.fold_expr(*message))),
},
UntypedExpr::Block {
location,
statements,
} => {
let statements = statements.mapped(|s| self.fold_statement(s));
UntypedExpr::Block {
location,
statements,
}
}
UntypedExpr::Fn {
location,
kind,
end_of_head_byte_index,
arguments,
body,
return_annotation,
} => {
let arguments = arguments.into_iter().map(|a| self.fold_arg(a)).collect();
let return_annotation = return_annotation.map(|type_| self.fold_type(type_));
let body = body.mapped(|s| self.fold_statement(s));
UntypedExpr::Fn {
location,
end_of_head_byte_index,
kind,
arguments,
body,
return_annotation,
}
}
UntypedExpr::List {
location,
elements,
tail,
} => {
let elements = elements
.into_iter()
.map(|element| self.fold_expr(element))
.collect();
let tail = tail.map(|e| Box::new(self.fold_expr(*e)));
UntypedExpr::List {
location,
elements,
tail,
}
}
UntypedExpr::Call {
location,
fun,
arguments,
} => {
let fun = Box::new(self.fold_expr(*fun));
let arguments = arguments
.into_iter()
.map(|mut a| {
a.value = self.fold_expr(a.value);
a
})
.collect();
UntypedExpr::Call {
location,
fun,
arguments,
}
}
UntypedExpr::BinOp {
location,
name,
name_location,
left,
right,
} => {
let left = Box::new(self.fold_expr(*left));
let right = Box::new(self.fold_expr(*right));
UntypedExpr::BinOp {
location,
name,
name_location,
left,
right,
}
}
UntypedExpr::PipeLine { expressions } => {
let expressions = expressions.mapped(|e| self.fold_expr(e));
UntypedExpr::PipeLine { expressions }
}
UntypedExpr::Case {
location,
subjects,
clauses,
} => {
let subjects = subjects.into_iter().map(|e| self.fold_expr(e)).collect();
let clauses = clauses.map(|clauses| {
clauses
.into_iter()
.map(|mut c| {
c.pattern = c
.pattern
.into_iter()
.map(|p| self.fold_pattern(p))
.collect();
c.alternative_patterns = c
.alternative_patterns
.into_iter()
.map(|p| p.into_iter().map(|p| self.fold_pattern(p)).collect())
.collect();
c.then = self.fold_expr(c.then);
c
})
.collect()
});
UntypedExpr::Case {
location,
subjects,
clauses,
}
}
UntypedExpr::FieldAccess {
location,
label_location,
label,
container,
} => {
let container = Box::new(self.fold_expr(*container));
UntypedExpr::FieldAccess {
location,
label_location,
label,
container,
}
}
UntypedExpr::Tuple { location, elements } => {
let elements = elements
.into_iter()
.map(|element| self.fold_expr(element))
.collect();
UntypedExpr::Tuple { location, elements }
}
UntypedExpr::TupleIndex {
location,
index,
tuple,
} => {
let tuple = Box::new(self.fold_expr(*tuple));
UntypedExpr::TupleIndex {
location,
index,
tuple,
}
}
UntypedExpr::BitArray { location, segments } => {
let segments = segments
.into_iter()
.map(|mut s| {
s.value = Box::new(self.fold_expr(*s.value));
s
})
.collect();
UntypedExpr::BitArray { location, segments }
}
UntypedExpr::RecordUpdate {
location,
constructor,
record,
arguments,
} => {
let constructor = Box::new(self.fold_expr(*constructor));
let arguments = arguments
.into_iter()
.map(|mut a| {
a.value = self.fold_expr(a.value);
a
})
.collect();
UntypedExpr::RecordUpdate {
location,
constructor,
record,
arguments,
}
}
}
}
/// You probably don't want to override this method.
fn fold_arg(&mut self, arg: UntypedArg) -> UntypedArg {
let UntypedArg {
location,
names,
annotation,
type_,
} = arg;
let annotation = annotation.map(|type_| self.fold_type(type_));
UntypedArg {
location,
names,
annotation,
type_,
}
}
/// You probably don't want to override this method.
fn fold_statement(&mut self, statement: UntypedStatement) -> UntypedStatement {
let statement = self.update_statement(statement);
self.walk_statement(statement)
}
/// You probably don't want to override this method.
fn update_statement(&mut self, statement: UntypedStatement) -> UntypedStatement {
match statement {
Statement::Expression(expression) => Statement::Expression(expression),
Statement::Assignment(assignment) => {
Statement::Assignment(Box::new(self.fold_assignment(*assignment)))
}
Statement::Use(use_) => Statement::Use(self.fold_use(use_)),
Statement::Assert(assert) => Statement::Assert(self.fold_assert(assert)),
}
}
/// You probably don't want to override this method.
fn walk_statement(&mut self, statement: UntypedStatement) -> UntypedStatement {
match statement {
Statement::Expression(expression) => Statement::Expression(self.fold_expr(expression)),
Statement::Assignment(assignment) => {
let Assignment {
location,
value,
pattern,
kind,
annotation,
compiled_case,
} = *assignment;
let pattern = self.fold_pattern(pattern);
let annotation = annotation.map(|type_| self.fold_type(type_));
let value = self.fold_expr(value);
Statement::Assignment(Box::new(Assignment {
location,
value,
pattern,
kind,
annotation,
compiled_case,
}))
}
Statement::Use(Use {
location,
right_hand_side_location,
assignments_location,
call,
assignments,
}) => {
let assignments = assignments
.into_iter()
.map(|assignment| {
let mut use_ = self.fold_use_assignment(assignment);
use_.pattern = self.fold_pattern(use_.pattern);
use_
})
.collect();
let call = Box::new(self.fold_expr(*call));
Statement::Use(Use {
location,
right_hand_side_location,
assignments_location,
call,
assignments,
})
}
Statement::Assert(Assert {
location,
value,
message,
}) => {
let value = self.fold_expr(value);
let message = message.map(|message| self.fold_expr(message));
Statement::Assert(Assert {
location,
value,
message,
})
}
}
}
/// You probably don't want to override this method.
fn fold_use_assignment(&mut self, use_: UntypedUseAssignment) -> UntypedUseAssignment {
let UseAssignment {
location,
pattern,
annotation,
} = use_;
let annotation = annotation.map(|type_| self.fold_type(type_));
UseAssignment {
location,
pattern,
annotation,
}
}
fn fold_int(&mut self, location: SrcSpan, value: EcoString, int_value: BigInt) -> UntypedExpr {
UntypedExpr::Int {
location,
value,
int_value,
}
}
fn fold_float(
&mut self,
location: SrcSpan,
value: EcoString,
float_value: LiteralFloatValue,
) -> UntypedExpr {
UntypedExpr::Float {
location,
value,
float_value,
}
}
fn fold_string(&mut self, location: SrcSpan, value: EcoString) -> UntypedExpr {
UntypedExpr::String { location, value }
}
fn fold_block(&mut self, location: SrcSpan, statements: Vec1<UntypedStatement>) -> UntypedExpr {
UntypedExpr::Block {
location,
statements,
}
}
fn fold_var(&mut self, location: SrcSpan, name: EcoString) -> UntypedExpr {
UntypedExpr::Var { location, name }
}
fn fold_fn(
&mut self,
location: SrcSpan,
end_of_head_byte_index: u32,
kind: FunctionLiteralKind,
arguments: Vec<UntypedArg>,
body: Vec1<UntypedStatement>,
return_annotation: Option<TypeAst>,
) -> UntypedExpr {
UntypedExpr::Fn {
location,
end_of_head_byte_index,
kind,
arguments,
body,
return_annotation,
}
}
fn fold_list(
&mut self,
location: SrcSpan,
elements: Vec<UntypedExpr>,
tail: Option<Box<UntypedExpr>>,
) -> UntypedExpr {
UntypedExpr::List {
location,
elements,
tail,
}
}
fn fold_call(
&mut self,
location: SrcSpan,
fun: Box<UntypedExpr>,
arguments: Vec<CallArg<UntypedExpr>>,
) -> UntypedExpr {
UntypedExpr::Call {
location,
fun,
arguments,
}
}
fn fold_bin_op(
&mut self,
location: SrcSpan,
name: BinOp,
name_location: SrcSpan,
left: Box<UntypedExpr>,
right: Box<UntypedExpr>,
) -> UntypedExpr {
UntypedExpr::BinOp {
location,
name,
name_location,
left,
right,
}
}
fn fold_pipe_line(&mut self, expressions: Vec1<UntypedExpr>) -> UntypedExpr {
UntypedExpr::PipeLine { expressions }
}
fn fold_case(
&mut self,
location: SrcSpan,
subjects: Vec<UntypedExpr>,
clauses: Option<Vec<UntypedClause>>,
) -> UntypedExpr {
UntypedExpr::Case {
location,
subjects,
clauses,
}
}
fn fold_field_access(
&mut self,
location: SrcSpan,
label_location: SrcSpan,
label: EcoString,
container: Box<UntypedExpr>,
) -> UntypedExpr {
UntypedExpr::FieldAccess {
location,
label_location,
label,
container,
}
}
fn fold_tuple(&mut self, location: SrcSpan, elements: Vec<UntypedExpr>) -> UntypedExpr {
UntypedExpr::Tuple { location, elements }
}
fn fold_tuple_index(
&mut self,
location: SrcSpan,
index: u64,
tuple: Box<UntypedExpr>,
) -> UntypedExpr {
UntypedExpr::TupleIndex {
location,
index,
tuple,
}
}
fn fold_todo(
&mut self,
kind: TodoKind,
location: SrcSpan,
message: Option<Box<UntypedExpr>>,
) -> UntypedExpr {
UntypedExpr::Todo {
kind,
location,
message,
}
}
fn fold_echo(
&mut self,
location: SrcSpan,
keyword_end: u32,
expression: Option<Box<UntypedExpr>>,
message: Option<Box<UntypedExpr>>,
) -> UntypedExpr {
UntypedExpr::Echo {
location,
keyword_end,
expression,
message,
}
}
fn fold_panic(&mut self, location: SrcSpan, message: Option<Box<UntypedExpr>>) -> UntypedExpr {
UntypedExpr::Panic { location, message }
}
fn fold_bit_array(
&mut self,
location: SrcSpan,
segments: Vec<UntypedExprBitArraySegment>,
) -> UntypedExpr {
UntypedExpr::BitArray { location, segments }
}
fn fold_record_update(
&mut self,
location: SrcSpan,
constructor: Box<UntypedExpr>,
record: RecordBeingUpdated<UntypedExpr>,
arguments: Vec<UntypedRecordUpdateArg>,
) -> UntypedExpr {
UntypedExpr::RecordUpdate {
location,
constructor,
record,
arguments,
}
}
fn fold_negate_bool(&mut self, location: SrcSpan, value: Box<UntypedExpr>) -> UntypedExpr {
UntypedExpr::NegateBool { location, value }
}
fn fold_negate_int(&mut self, location: SrcSpan, value: Box<UntypedExpr>) -> UntypedExpr {
UntypedExpr::NegateInt { location, value }
}
fn fold_assignment(&mut self, assignment: UntypedAssignment) -> UntypedAssignment {
assignment
}
fn fold_use(&mut self, use_: UntypedUse) -> UntypedUse {
use_
}
fn fold_assert(&mut self, assert: UntypedAssert) -> UntypedAssert {
assert
}
}
#[allow(dead_code)]
pub trait UntypedConstantFolder {
/// You probably don't want to override this method.
fn fold_constant(&mut self, constant: UntypedConstant) -> UntypedConstant {
let constant = self.update_constant(constant);
self.walk_constant(constant)
}
/// You probably don't want to override this method.
fn update_constant(&mut self, constant: UntypedConstant) -> UntypedConstant {
match constant {
Constant::Int {
location,
value,
int_value,
} => self.fold_constant_int(location, value, int_value),
Constant::Float {
location,
value,
float_value,
} => self.fold_constant_float(location, value, float_value),
Constant::String { location, value } => self.fold_constant_string(location, value),
Constant::Tuple {
location,
elements,
type_: (),
} => self.fold_constant_tuple(location, elements),
Constant::List {
location,
elements,
type_: (),
} => self.fold_constant_list(location, elements),
Constant::Record {
location,
module,
name,
arguments,
tag: (),
type_: (),
field_map: _,
record_constructor: _,
} => self.fold_constant_record(location, module, name, arguments),
Constant::RecordUpdate {
location,
constructor_location,
module,
name,
record,
arguments,
tag: (),
type_: (),
field_map: _,
} => self.fold_constant_record_update(
location,
constructor_location,
module,
name,
record,
arguments,
),
Constant::BitArray { location, segments } => {
self.fold_constant_bit_array(location, segments)
}
Constant::Var {
location,
module,
name,
constructor: _,
type_: (),
} => self.fold_constant_var(location, module, name),
Constant::StringConcatenation {
location,
left,
right,
} => self.fold_constant_string_concatenation(location, left, right),
Constant::Invalid {
location,
type_: (),
extra_information,
} => self.fold_constant_invalid(location, extra_information),
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/pretty.rs | compiler-core/src/pretty.rs | //! This module implements the functionality described in
//! ["Strictly Pretty" (2000) by Christian Lindig][0], with a few
//! extensions.
//!
//! This module is heavily influenced by Elixir's Inspect.Algebra and
//! JavaScript's Prettier.
//!
//! [0]: http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.34.2200
//!
//! ## Extensions
//!
//! - `ForcedBreak` from Elixir.
//! - `FlexBreak` from Elixir.
//!
//! The way this module works is fairly simple conceptually, however the actual
//! behaviour in practice can be hard to wrap one's head around.
//!
//! The basic premise is the `Document` type, which is a tree structure,
//! containing some text as well as information on how it can be formatted.
//! Once the document is constructed, it can be printed using the
//! `to_pretty_string` function.
//!
//! It will then traverse the tree, and construct
//! a string, attempting to wrap lines to that they do not exceed the line length
//! limit specified. Where and when it wraps lines is determined by the structure
//! of the `Document` itself.
//!
#![allow(clippy::wrong_self_convention)]
#[cfg(test)]
mod tests;
use ecow::{EcoString, eco_format};
use itertools::Itertools;
use num_bigint::BigInt;
use unicode_segmentation::UnicodeSegmentation;
use crate::{Result, io::Utf8Writer};
/// Join multiple documents together in a vector. This macro calls the `to_doc`
/// method on each element, providing a concise way to write a document sequence.
/// For example:
///
/// ```rust:norun
/// docvec!["Hello", line(), "world!"]
/// ```
///
/// Note: each document in a docvec is not separated in any way: the formatter
/// will never break a line unless a `Document::Break` or `Document::Line`
/// is used. Therefore, `docvec!["a", "b", "c"]` is equivalent to
/// `"abc".to_doc()`.
///
#[macro_export]
macro_rules! docvec {
() => {
Document::Vec(Vec::new())
};
($($x:expr),+ $(,)?) => {
Document::Vec(vec![$($x.to_doc()),+])
};
}
/// Coerce a value into a Document.
/// Note we do not implement this for String as a slight pressure to favour str
/// over String.
pub trait Documentable<'a> {
fn to_doc(self) -> Document<'a>;
}
impl<'a> Documentable<'a> for char {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for &'a str {
fn to_doc(self) -> Document<'a> {
Document::str(self)
}
}
impl<'a> Documentable<'a> for EcoString {
fn to_doc(self) -> Document<'a> {
Document::eco_string(self)
}
}
impl<'a> Documentable<'a> for &EcoString {
fn to_doc(self) -> Document<'a> {
Document::eco_string(self.clone())
}
}
impl<'a> Documentable<'a> for isize {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for i64 {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for usize {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for f64 {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self:?}"))
}
}
impl<'a> Documentable<'a> for u64 {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self:?}"))
}
}
impl<'a> Documentable<'a> for u32 {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for u16 {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for u8 {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for BigInt {
fn to_doc(self) -> Document<'a> {
Document::eco_string(eco_format!("{self}"))
}
}
impl<'a> Documentable<'a> for Document<'a> {
fn to_doc(self) -> Document<'a> {
self
}
}
impl<'a> Documentable<'a> for Vec<Document<'a>> {
fn to_doc(self) -> Document<'a> {
Document::Vec(self)
}
}
impl<'a, D: Documentable<'a>> Documentable<'a> for Option<D> {
fn to_doc(self) -> Document<'a> {
self.map(Documentable::to_doc).unwrap_or_else(nil)
}
}
/// Joins an iterator into a single document, in the same way as `docvec!`.
pub fn concat<'a>(docs: impl IntoIterator<Item = Document<'a>>) -> Document<'a> {
Document::Vec(docs.into_iter().collect())
}
/// Joins an iterator into a single document, interspersing each element with
/// another document. This is useful for example in argument lists, where a
/// list of arguments must all be separated with a comma.
pub fn join<'a>(
docs: impl IntoIterator<Item = Document<'a>>,
separator: Document<'a>,
) -> Document<'a> {
concat(Itertools::intersperse(docs.into_iter(), separator))
}
/// A pretty printable document. A tree structure, made up of text and other
/// elements which determine how it can be formatted.
///
/// The variants of this enum should probably not be constructed directly,
/// rather use the helper functions of the same names to construct them.
/// For example, use `line()` instead of `Document::Line(1)`.
///
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Document<'a> {
/// A mandatory linebreak. This is always printed as a string of newlines,
/// equal in length to the number specified.
Line(usize),
/// Forces the breaks of the wrapped document to be considered as not
/// fitting on a single line. Used in combination with a `Group` it can be
/// used to force its `Break`s to always break.
ForceBroken(Box<Self>),
/// Ignore the next break, forcing it to render as unbroken.
NextBreakFits(Box<Self>, NextBreakFitsMode),
/// A document after which the formatter can insert a newline. This determines
/// where line breaks can occur, outside of hardcoded `Line`s.
/// See `break_` and `flex_break` for usage.
Break {
broken: &'a str,
unbroken: &'a str,
kind: BreakKind,
},
/// Join multiple documents together. The documents are not separated in any
/// way: the formatter will only print newlines if `Document::Break` or
/// `Document::Line` is used.
Vec(Vec<Self>),
/// Nests the given document by the given indent, depending on the specified
/// condition. See `Document::nest`, `Document::set_nesting` and
/// `Document::nest_if_broken` for usages.
Nest(isize, NestMode, NestCondition, Box<Self>),
/// Groups a document. When pretty printing a group, the formatter will
/// first attempt to fit the entire group on one line. If it fails, all
/// `break_` documents in the group will render broken.
///
/// Nested groups are handled separately to their parents, so if the
/// outermost group is broken, any sub-groups might be rendered broken
/// or unbroken, depending on whether they fit on a single line.
Group(Box<Self>),
/// Renders a string slice. This will always render the string verbatim,
/// without any line breaks or other modifications to it.
Str {
string: &'a str,
/// The number of extended grapheme clusters in the string.
/// This is what the pretty printer uses as the width of the string as it
/// is closes to what a human would consider the "length" of a string.
///
/// Since computing the number of grapheme clusters requires walking over
/// the string we precompute it to avoid iterating through a string over
/// and over again in the pretty printing algorithm.
///
graphemes: isize,
},
/// Renders an `EcoString`. This will always render the string verbatim,
/// without any line breaks or other modifications to it.
EcoString {
string: EcoString,
/// The number of extended grapheme clusters in the string.
/// This is what the pretty printer uses as the width of the string as it
/// is closes to what a human would consider the "length" of a string.
///
/// Since computing the number of grapheme clusters requires walking over
/// the string we precompute it to avoid iterating through a string over
/// and over again in the pretty printing algorithm.
///
graphemes: isize,
},
/// A string that is not taken into account when determining line length.
/// This is useful for additional formatting text which won't be rendered
/// in the final output, such as ANSI codes or HTML elements.
ZeroWidthString { string: EcoString },
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Mode {
/// The mode used when a group doesn't fit on a single line: when `Broken`
/// the `Break`s inside it will be rendered as newlines, splitting the
/// group.
Broken,
/// The default mode used when a group can fit on a single line: all its
/// `Break`s will be rendered as their unbroken string and kept on a single
/// line.
Unbroken,
/// This mode is used by the `NextBreakFit` document to force a break to be
/// considered as broken.
ForcedBroken,
/// This mode is used to disable a `NextBreakFit` document.
ForcedUnbroken,
}
/// A flag that can be used to enable or disable a `NextBreakFit` document.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NextBreakFitsMode {
Enabled,
Disabled,
}
/// A flag that can be used to conditionally disable a `Nest` document.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NestCondition {
/// This always applies the nesting. This is a sensible default that will
/// work for most of the cases.
Always,
/// Only applies the nesting if the wrapping `Group` couldn't fit on a
/// single line and has been broken.
IfBroken,
}
/// Used to change the way nesting of documents work.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum NestMode {
/// If the nesting mode is `Increase`, the current indentation will be
/// increased by the specified value.
Increase,
/// If the nesting mode is `Set`, the current indentation is going to be set
/// to exactly the specified value.
///
/// `doc.nest(2).set_nesting(0)`
/// "wibble
/// wobble <- no indentation is added!
/// wubble"
Set,
}
fn fits(
limit: isize,
mut current_width: isize,
mut docs: im::Vector<(isize, Mode, &Document<'_>)>,
) -> bool {
// The `fits` function is going to take each document from the `docs` queue
// and check if those can fit on a single line. In order to do so documents
// are going to be pushed in front of this queue and have to be accompanied
// by additional information:
// - the document indentation, that can be increased by the `Nest` block.
// - the current mode; this is needed to know if a group is being broken or
// not and treat `Break`s differently as a consequence. You can see how
// the behaviour changes in [ref:break-fit].
//
// The loop might be broken earlier without checking all documents under one
// of two conditions:
// - the documents exceed the line `limit` and surely won't fit
// [ref:document-unfit].
// - the documents are sure to fit the line - for example, if we meet a
// broken `Break` [ref:break-fit] or a newline [ref:newline-fit].
loop {
// [tag:document-unfit] If we've exceeded the maximum width allowed for
// a line, it means that the document won't fit on a single line, we can
// break the loop.
if current_width > limit {
return false;
};
// We start by checking the first document of the queue. If there's no
// documents then we can safely say that it fits (if reached this point
// it means that the limit wasn't exceeded).
let (indent, mode, document) = match docs.pop_front() {
Some(x) => x,
None => return true,
};
match document {
// If a document is marked as `ForceBroken` we can immediately say
// that it doesn't fit, so that every break is going to be
// forcefully broken.
Document::ForceBroken(doc) => match mode {
// If the mode is `ForcedBroken` it means that we have to ignore
// this break [ref:forced-broken], so we go check the inner
// document ignoring the effects of this one.
Mode::ForcedBroken => docs.push_front((indent, mode, doc)),
Mode::Broken | Mode::Unbroken | Mode::ForcedUnbroken => return false,
},
// [tag:newline-fit] When we run into a line we know that the
// document has a bit that fits in the current line; if it didn't
// fit (that is, it exceeded the maximum allowed width) the loop
// would have been broken by one of the earlier checks.
Document::Line(_) => return true,
// If the nesting level is increased we go on checking the wrapped
// document and increase its indentation level based on the nesting
// condition.
Document::Nest(i, nest_mode, condition, doc) => match condition {
NestCondition::IfBroken => docs.push_front((indent, mode, doc)),
NestCondition::Always => {
let new_indent = match nest_mode {
NestMode::Increase => indent + i,
NestMode::Set => *i,
};
docs.push_front((new_indent, mode, doc))
}
},
// As a general rule, a group fits if it can stay on a single line
// without its breaks being broken down.
Document::Group(doc) => match mode {
// If an outer group was broken, we still try to fit the inner
// group on a single line, that's why for the inner document
// we change the mode back to `Unbroken`.
Mode::Broken => docs.push_front((indent, Mode::Unbroken, doc)),
// Any other mode is preserved as-is: if the mode is forced it
// has to be left unchanged, and if the mode is already unbroken
// there's no need to change it.
Mode::Unbroken | Mode::ForcedBroken | Mode::ForcedUnbroken => {
docs.push_front((indent, mode, doc))
}
},
// When we run into a string we increase the current_width; looping
// back we will check if we've exceeded the maximum allowed width.
Document::Str { graphemes, .. } | Document::EcoString { graphemes, .. } => {
current_width += graphemes
}
// Zero width strings do nothing: they do not contribute to line length
Document::ZeroWidthString { .. } => {}
// If we get to a break we need to first see if it has to be
// rendered as its unbroken or broken string, depending on the mode.
Document::Break { unbroken, .. } => match mode {
// [tag:break-fit] If the break has to be broken we're done!
// We haven't exceeded the maximum length (otherwise the loop
// iteration would have stopped with one of the earlier checks),
// and - since it needs to be broken - we'll have to go on a new
// line anyway.
// This means that the document inspected so far will fit on a
// single line, thus we return true.
Mode::Broken | Mode::ForcedBroken => return true,
// If the break is not broken then it will be rendered inline as
// its unbroken string, so we treat it exactly as if it were a
// normal string.
Mode::Unbroken | Mode::ForcedUnbroken => current_width += unbroken.len() as isize,
},
// The `NextBreakFits` can alter the current mode to `ForcedBroken`
// or `ForcedUnbroken` based on its enabled flag.
Document::NextBreakFits(doc, enabled) => match enabled {
// [tag:disable-next-break] If it is disabled then we check the
// wrapped document changing the mode to `ForcedUnbroken`.
NextBreakFitsMode::Disabled => docs.push_front((indent, Mode::ForcedUnbroken, doc)),
NextBreakFitsMode::Enabled => match mode {
// If we're in `ForcedUnbroken` mode it means that the check
// was disabled by a document wrapping this one
// [ref:disable-next-break]; that's why we do nothing and
// check the wrapped document as if it were a normal one.
Mode::ForcedUnbroken => docs.push_front((indent, mode, doc)),
// [tag:forced-broken] Any other mode is turned into
// `ForcedBroken` so that when we run into a break, the
// response to the question "Does the document fit?" will be
// yes [ref:break-fit].
// This is why this is called `NextBreakFit` I think.
Mode::Broken | Mode::Unbroken | Mode::ForcedBroken => {
docs.push_front((indent, Mode::ForcedBroken, doc))
}
},
},
// If there's a sequence of documents we will check each one, one
// after the other to see if - as a whole - they can fit on a single
// line.
Document::Vec(vec) => {
// The array needs to be reversed to preserve the order of the
// documents since each one is pushed _to the front_ of the
// queue of documents to check.
for doc in vec.iter().rev() {
docs.push_front((indent, mode, doc));
}
}
}
}
}
/// The kind of line break this `Document::Break` is.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BreakKind {
/// A `flex_break`.
Flex,
/// A `break_`.
Strict,
}
fn format(
writer: &mut impl Utf8Writer,
limit: isize,
mut width: isize,
mut docs: im::Vector<(isize, Mode, &Document<'_>)>,
) -> Result<()> {
// As long as there are documents to print we'll take each one by one and
// output the corresponding string to the given writer.
//
// Each document in the `docs` queue also has an accompanying indentation
// and mode:
// - the indentation is used to keep track of the current indentation,
// you might notice in [ref:format-nest] that it adds documents to the
// queue increasing their current indentation.
// - the mode is used to keep track of the state of the documents inside a
// group. For example, if a group doesn't fit on a single line its
// documents will be split into multiple lines and the mode set to
// `Broken` to keep track of this.
while let Some((indent, mode, document)) = docs.pop_front() {
match document {
// When we run into a line we print the given number of newlines and
// add the indentation required by the given document.
Document::Line(i) => {
for _ in 0..*i {
writer.str_write("\n")?;
}
for _ in 0..indent {
writer.str_write(" ")?;
}
width = indent;
}
// Flex breaks are NOT conditional to the mode: if the mode is
// already `Unbroken`, then the break is left unbroken (like strict
// breaks); any other mode is ignored.
// A flexible break will only be split if the following documents
// can't fit on the same line; otherwise, it is just displayed as an
// unbroken `Break`.
Document::Break {
broken,
unbroken,
kind: BreakKind::Flex,
} => {
let unbroken_width = width + unbroken.len() as isize;
// Every time we need to check again if the remaining piece can
// fit. If it does, the flexible break is not broken.
if mode == Mode::Unbroken || fits(limit, unbroken_width, docs.clone()) {
writer.str_write(unbroken)?;
width = unbroken_width;
} else {
writer.str_write(broken)?;
writer.str_write("\n")?;
for _ in 0..indent {
writer.str_write(" ")?;
}
width = indent;
}
}
// Strict breaks are conditional to the mode. They differ from
// flexible break because, if a group gets split - that is the mode
// is `Broken` or `ForceBroken` - ALL of the breaks in that group
// will be split. You can notice the difference with flexible breaks
// because here we only check the mode and then take action; before
// we would try and see if the remaining documents fit on a single
// line before deciding if the (flexible) break can be split or not.
Document::Break {
broken,
unbroken,
kind: BreakKind::Strict,
} => match mode {
// If the mode requires the break to be broken, then its broken
// string is printed, then we start a newline and indent it
// according to the current indentation level.
Mode::Broken | Mode::ForcedBroken => {
writer.str_write(broken)?;
writer.str_write("\n")?;
for _ in 0..indent {
writer.str_write(" ")?;
}
width = indent;
}
// If the mode doesn't require the break to be broken, then its
// unbroken string is printed as if it were a normal string;
// also updating the width of the current line.
Mode::Unbroken | Mode::ForcedUnbroken => {
writer.str_write(unbroken)?;
width += unbroken.len() as isize
}
},
// Strings are printed as they are and the current width is
// increased accordingly.
Document::EcoString { string, graphemes } => {
width += graphemes;
writer.str_write(string)?;
}
Document::Str { string, graphemes } => {
width += graphemes;
writer.str_write(string)?;
}
Document::ZeroWidthString { string } => {
// We write the string, but do not increment the length
writer.str_write(string)?;
}
// If multiple documents need to be printed, then they are all
// pushed to the front of the queue and will be printed one by one.
Document::Vec(vec) => {
// Just like `fits`, the elements will be pushed _on the front_
// of the queue. In order to keep their original order they need
// to be pushed in reverse order.
for doc in vec.iter().rev() {
docs.push_front((indent, mode, doc));
}
}
// A `Nest` document doesn't result in anything being printed, its
// only effect is to increase the current nesting level for the
// wrapped document [tag:format-nest].
Document::Nest(i, nest_mode, condition, doc) => match (condition, mode) {
// The nesting is only applied under two conditions:
// - either the nesting condition is `Always`.
// - or the condition is `IfBroken` and the group was actually
// broken (that is, the current mode is `Broken`).
(NestCondition::Always, _) | (NestCondition::IfBroken, Mode::Broken) => {
let new_indent = match nest_mode {
NestMode::Increase => indent + i,
NestMode::Set => *i,
};
docs.push_front((new_indent, mode, doc))
}
// If none of the above conditions is met, then the nesting is
// not applied.
_ => docs.push_front((indent, mode, doc)),
},
Document::Group(doc) => {
// When we see a group we first try and see if it can fit on a
// single line without breaking any break; that is why we use
// the `Unbroken` mode here: we want to try to fit everything on
// a single line.
let group_docs = im::vector![(indent, Mode::Unbroken, doc.as_ref())];
if fits(limit, width, group_docs) {
// If everything can stay on a single line we print the
// wrapped document with the `Unbroken` mode, leaving all
// the group's break as unbroken.
docs.push_front((indent, Mode::Unbroken, doc));
} else {
// Otherwise, we need to break the group. We print the
// wrapped document changing its mode to `Broken` so that
// all its breaks will be split on newlines.
docs.push_front((indent, Mode::Broken, doc));
}
}
// `ForceBroken` and `NextBreakFits` only change the way the `fit`
// function works but do not actually change the formatting of a
// document by themselves. That's why when we run into those we
// just go on printing the wrapped document without altering the
// current mode.
Document::ForceBroken(document) | Document::NextBreakFits(document, _) => {
docs.push_front((indent, mode, document));
}
}
}
Ok(())
}
/// Renders an empty document.
pub fn nil<'a>() -> Document<'a> {
Document::Vec(vec![])
}
/// Renders a single newline.
pub fn line<'a>() -> Document<'a> {
Document::Line(1)
}
/// Renders a string of newlines, equal in length to the number provided.
pub fn lines<'a>(i: usize) -> Document<'a> {
Document::Line(i)
}
/// A document after which the formatter can insert a newline. This determines
/// where line breaks can occur, outside of hardcoded `Line`s.
///
/// If the formatter determines that a group cannot fit on a single line,
/// all breaks in the group will be rendered as broken. Otherwise, they
/// will be rendered as unbroken.
///
/// A broken `Break` renders the `broken` string, followed by a newline.
/// An unbroken `Break` renders the `unbroken` string by itself.
///
/// For example:
/// ```rust:norun
/// let document = docvec!["Hello", break_("", ", "), "world!"];
/// assert_eq!(document.to_pretty_string(20), "Hello, world!");
/// assert_eq!(document.to_pretty_string(10), "Hello\nworld!");
/// ```
///
pub fn break_<'a>(broken: &'a str, unbroken: &'a str) -> Document<'a> {
Document::Break {
broken,
unbroken,
kind: BreakKind::Strict,
}
}
/// A document after which the formatter can insert a newline, similar to
/// `break_()`. The difference is that when a group is rendered broken, all
/// breaks are rendered broken. However, `flex_break` decides whether to
/// break or not for every individual `flex_break`.
///
/// For example:
/// ```rust:norun
/// let with_breaks = docvec!["Hello", break_("", ", "), "pretty", break_("", ", "), "printed!"];
/// assert_eq!(with_breaks.to_pretty_string(20), "Hello\npretty\nprinted!");
///
/// let with_flex_breaks = docvec!["Hello", flex_break("", ", "), "pretty", flex_break("", ", "), "printed!"];
/// assert_eq!(with_flex_breaks.to_pretty_string(20), "Hello, pretty\nprinted!");
/// ```
///
pub fn flex_break<'a>(broken: &'a str, unbroken: &'a str) -> Document<'a> {
Document::Break {
broken,
unbroken,
kind: BreakKind::Flex,
}
}
/// A string that is not taken into account when determining line length.
/// This is useful for additional formatting text which won't be rendered
/// in the final output, such as ANSI codes or HTML elements.
///
/// For example:
/// ```rust:norun
/// let document = docvec!["Hello", zero_width_string("This is a very long string"), break_("", ""), "world"];
/// assert_eq!(document.to_pretty_string(20), "HelloThis is a very long stringworld");
/// ```
///
pub fn zero_width_string<'a>(string: EcoString) -> Document<'a> {
Document::ZeroWidthString { string }
}
impl<'a> Document<'a> {
/// Creates a document from a string slice.
pub fn str(string: &'a str) -> Self {
Document::Str {
graphemes: string.graphemes(true).count() as isize,
string,
}
}
/// Creates a document from an owned `EcoString`.
pub fn eco_string(string: EcoString) -> Self {
Document::EcoString {
graphemes: string.graphemes(true).count() as isize,
string,
}
}
/// Groups a document. When pretty printing a group, the formatter will
/// first attempt to fit the entire group on one line. If it fails, all
/// `break_` documents in the group will render broken.
///
/// Nested groups are handled separately to their parents, so if the
/// outermost group is broken, any sub-groups might be rendered broken
/// or unbroken, depending on whether they fit on a single line.
pub fn group(self) -> Self {
Self::Group(Box::new(self))
}
/// Sets the indentation level of a document.
pub fn set_nesting(self, indent: isize) -> Self {
Self::Nest(indent, NestMode::Set, NestCondition::Always, Box::new(self))
}
/// Nests a document by a certain indentation. When rending linebreaks, the
/// formatter will print a new line followed by the current indentation.
pub fn nest(self, indent: isize) -> Self {
Self::Nest(
indent,
NestMode::Increase,
NestCondition::Always,
Box::new(self),
)
}
/// Nests a document by a certain indentation, but only if the current
/// group is broken.
pub fn nest_if_broken(self, indent: isize) -> Self {
Self::Nest(
indent,
NestMode::Increase,
NestCondition::IfBroken,
Box::new(self),
)
}
/// Forces all `break_` and `flex_break` documents in the current group
/// to render broken.
pub fn force_break(self) -> Self {
Self::ForceBroken(Box::new(self))
}
/// Force the next `Break` to render unbroken, regardless of whether it
/// fits on the line or not.
pub fn next_break_fits(self, mode: NextBreakFitsMode) -> Self {
Self::NextBreakFits(Box::new(self), mode)
}
/// Appends one document to another. Equivalent to `docvec![self, second]`,
/// except that it `self` is already a `Document::Vec`, it will append
/// directly to it instead of allocating a new vector.
///
/// Useful when chaining multiple documents together in a fashion where
/// they cannot be put all into one `docvec!` macro.
pub fn append(self, second: impl Documentable<'a>) -> Self {
match self {
Self::Vec(mut vec) => {
vec.push(second.to_doc());
Self::Vec(vec)
}
Self::Line(..)
| Self::ForceBroken(..)
| Self::NextBreakFits(..)
| Self::Break { .. }
| Self::Nest(..)
| Self::Group(..)
| Self::Str { .. }
| Self::EcoString { .. }
| Self::ZeroWidthString { .. } => Self::Vec(vec![self, second.to_doc()]),
}
}
/// Prints a document into a `String`, attempting to limit lines to `limit`
/// characters in length.
pub fn to_pretty_string(self, limit: isize) -> String {
let mut buffer = String::new();
self.pretty_print(limit, &mut buffer)
.expect("Writing to string buffer failed");
buffer
}
/// Surrounds a document in two delimiters. Equivalent to
/// `docvec![option, self, closed]`.
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/diagnostic.rs | compiler-core/src/diagnostic.rs | use std::collections::HashMap;
use camino::Utf8PathBuf;
pub use codespan_reporting::diagnostic::{LabelStyle, Severity};
use codespan_reporting::{diagnostic::Label as CodespanLabel, files::SimpleFiles};
use ecow::EcoString;
use termcolor::Buffer;
use crate::ast::SrcSpan;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Level {
Error,
Warning,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Label {
pub text: Option<String>,
pub span: SrcSpan,
}
impl Label {
fn to_codespan_label(&self, fileid: usize, style: LabelStyle) -> CodespanLabel<usize> {
let label = CodespanLabel::new(
style,
fileid,
(self.span.start as usize)..(self.span.end as usize),
);
match &self.text {
None => label,
Some(text) => label.with_message(text.clone()),
}
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ExtraLabel {
pub src_info: Option<(EcoString, Utf8PathBuf)>,
pub label: Label,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Location {
pub src: EcoString,
pub path: Utf8PathBuf,
pub label: Label,
pub extra_labels: Vec<ExtraLabel>,
}
// TODO: split this into locationed diagnostics and locationless diagnostics
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Diagnostic {
pub title: String,
pub text: String,
pub level: Level,
pub location: Option<Location>,
pub hint: Option<String>,
}
impl Diagnostic {
pub fn write(&self, buffer: &mut Buffer) {
use std::io::Write;
match &self.location {
Some(location) => self.write_span(location, buffer),
None => self.write_title(buffer),
};
if !self.text.is_empty() {
writeln!(buffer, "{}", self.text).expect("write text");
}
if let Some(hint) = &self.hint {
writeln!(buffer, "Hint: {hint}").expect("write hint");
}
}
fn write_span(&self, location: &Location, buffer: &mut Buffer) {
let mut file_map = HashMap::new();
let mut files = SimpleFiles::new();
let main_location_path = location.path.as_str();
let main_location_src = location.src.as_str();
let main_file_id = files.add(main_location_path, main_location_src);
let _ = file_map.insert(main_location_path, main_file_id);
let mut labels = vec![
location
.label
.to_codespan_label(main_file_id, LabelStyle::Primary),
];
location
.extra_labels
.iter()
.map(|label| {
let (location_src, location_path) = match &label.src_info {
Some(info) => (info.0.as_str(), info.1.as_str()),
_ => (main_location_src, main_location_path),
};
match file_map.get(location_path) {
None => {
let file_id = files.add(location_path, location_src);
let _ = file_map.insert(location_path, file_id);
label
.label
.to_codespan_label(file_id, LabelStyle::Secondary)
}
Some(i) => label.label.to_codespan_label(*i, LabelStyle::Secondary),
}
})
.for_each(|label| labels.push(label));
let severity = match self.level {
Level::Error => Severity::Error,
Level::Warning => Severity::Warning,
};
let diagnostic = codespan_reporting::diagnostic::Diagnostic::new(severity)
.with_message(&self.title)
.with_labels(labels);
let config = codespan_reporting::term::Config::default();
codespan_reporting::term::emit(buffer, &config, &files, &diagnostic)
.expect("write_diagnostic");
}
fn write_title(&self, buffer: &mut Buffer) {
use std::io::Write;
use termcolor::{Color, ColorSpec, WriteColor};
let (kind, colour) = match self.level {
Level::Error => ("error", Color::Red),
Level::Warning => ("warning", Color::Yellow),
};
buffer
.set_color(ColorSpec::new().set_bold(true).set_fg(Some(colour)))
.expect("write_title_color1");
write!(buffer, "{kind}").expect("write_title_kind");
buffer
.set_color(ColorSpec::new().set_bold(true))
.expect("write_title_color2");
write!(buffer, ": {}\n\n", self.title).expect("write_title_title");
buffer
.set_color(&ColorSpec::new())
.expect("write_title_reset");
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/exhaustiveness/printer.rs | compiler-core/src/exhaustiveness/printer.rs | use std::collections::HashMap;
use ecow::EcoString;
use crate::type_::printer::{NameContextInformation, Names};
use super::{Variable, missing_patterns::Term};
#[derive(Debug)]
pub struct Printer<'a> {
names: &'a Names,
}
impl<'a> Printer<'a> {
pub fn new(names: &'a Names) -> Self {
Printer { names }
}
pub fn print_terms(
&self,
subjects: &[Variable],
terms: &[Term],
mapping: &HashMap<usize, usize>,
) -> EcoString {
let mut buffer = EcoString::new();
for (i, subject) in subjects.iter().enumerate() {
if i != 0 {
buffer.push_str(", ");
}
match mapping.get(&subject.id) {
Some(&index) => {
let term = terms.get(index).expect("Term must exist");
self.print(term, terms, mapping, &mut buffer);
}
None => buffer.push('_'),
}
}
buffer
}
fn print(
&self,
term: &Term,
terms: &[Term],
mapping: &HashMap<usize, usize>,
buffer: &mut EcoString,
) {
match term {
Term::Variant {
name,
module,
fields,
..
} => {
let (module, name) = match self.names.named_constructor(module, name) {
NameContextInformation::Qualified(module, name) => (Some(module), name),
NameContextInformation::Unqualified(name) => (None, name),
NameContextInformation::Unimported(module, name) => {
(module.split('/').next_back(), name)
}
};
if let Some(module) = module {
buffer.push_str(module);
buffer.push('.');
}
buffer.push_str(name);
if fields.is_empty() {
return;
}
buffer.push('(');
for (i, field) in fields.iter().enumerate() {
if i != 0 {
buffer.push_str(", ");
}
let mut has_label = false;
if let Some(label) = &field.label {
buffer.push_str(label);
buffer.push(':');
has_label = true;
}
if let Some(&idx) = mapping.get(&field.variable.id) {
let term = terms.get(idx).expect("Term must exist");
match term {
// If it is an infinite term and this field is labelled, it is generally
// more useful to print just the label using label shorthand syntax.
// For example, printing `Person(name:, age:)` instead of
// `Person(name: _, age: _)`.
Term::Infinite { .. } if has_label => {}
Term::Infinite { .. }
| Term::Variant { .. }
| Term::Tuple { .. }
| Term::EmptyList { .. }
| Term::List { .. } => {
// If this field has a label, the current buffer looks like `label:`,
// so we want to print a space before printing the pattern for it.
// If there is no label, we don't need to print the space.
if has_label {
buffer.push(' ');
}
self.print(term, terms, mapping, buffer);
}
}
} else if !has_label {
buffer.push('_');
}
}
buffer.push(')');
}
Term::Tuple { elements, .. } => {
buffer.push_str("#(");
for (i, variable) in elements.iter().enumerate() {
if i != 0 {
buffer.push_str(", ");
}
if let Some(&idx) = mapping.get(&variable.id) {
self.print(
terms.get(idx).expect("Term must exist"),
terms,
mapping,
buffer,
);
} else {
buffer.push('_');
}
}
buffer.push(')');
}
Term::Infinite { .. } => buffer.push('_'),
Term::EmptyList { .. } => buffer.push_str("[]"),
Term::List { .. } => {
buffer.push('[');
self.print_list(term, terms, mapping, buffer);
buffer.push(']');
}
}
}
fn print_list(
&self,
term: &Term,
terms: &[Term],
mapping: &HashMap<usize, usize>,
buffer: &mut EcoString,
) {
match term {
Term::Infinite { .. } | Term::Variant { .. } | Term::Tuple { .. } => buffer.push('_'),
Term::EmptyList { .. } => {}
Term::List { first, rest, .. } => {
if let Some(&idx) = mapping.get(&first.id) {
self.print(
terms.get(idx).expect("Term must exist"),
terms,
mapping,
buffer,
)
} else {
buffer.push('_');
}
if let Some(&idx) = mapping.get(&rest.id) {
let term = terms.get(idx).expect("Term must exist");
match term {
Term::EmptyList { .. } => {}
Term::Variant { .. }
| Term::Tuple { .. }
| Term::Infinite { .. }
| Term::List { .. } => {
buffer.push_str(", ");
self.print_list(term, terms, mapping, buffer)
}
}
} else {
buffer.push_str(", ..");
}
}
}
}
}
#[cfg(test)]
mod tests {
use ecow::EcoString;
use super::Printer;
use std::{collections::HashMap, sync::Arc};
use crate::{
ast::SrcSpan,
exhaustiveness::{
Variable,
missing_patterns::{Term, VariantField},
},
type_::{Type, printer::Names},
};
/// Create a variable with a dummy type, for ease of writing tests
fn make_variable(id: usize) -> Variable {
Variable {
id,
type_: Arc::new(Type::Tuple {
elements: Vec::new(),
}),
}
}
fn field(variable: Variable, label: Option<&str>) -> VariantField {
VariantField {
variable,
label: label.map(EcoString::from),
}
}
fn get_mapping(terms: &[Term]) -> HashMap<usize, usize> {
let mut mapping: HashMap<usize, usize> = HashMap::new();
for (index, term) in terms.iter().enumerate() {
_ = mapping.insert(term.variable().id, index);
}
mapping
}
#[test]
fn test_value_in_current_module() {
let mut names = Names::new();
names.named_constructor_in_scope("module".into(), "Wibble".into(), "Wibble".into());
let printer = Printer::new(&names);
let subjects = &[make_variable(0)];
let term = Term::Variant {
variable: subjects[0].clone(),
name: "Wibble".into(),
module: "module".into(),
fields: Vec::new(),
};
let terms = &[term];
let mapping = get_mapping(terms);
assert_eq!(printer.print_terms(subjects, terms, &mapping), "Wibble");
}
#[test]
fn test_value_in_current_module_with_arguments() {
let mut names = Names::new();
names.named_constructor_in_scope("module".into(), "Wibble".into(), "Wibble".into());
let printer = Printer::new(&names);
let var1 = make_variable(1);
let var2 = make_variable(2);
let subjects = &[make_variable(0)];
let term = Term::Variant {
variable: subjects[0].clone(),
name: "Wibble".into(),
module: "module".into(),
fields: vec![field(var1.clone(), None), field(var2.clone(), None)],
};
let terms = &[
term,
Term::EmptyList { variable: var1 },
Term::Infinite { variable: var2 },
];
let mapping = get_mapping(terms);
assert_eq!(
printer.print_terms(subjects, terms, &mapping),
"Wibble([], _)"
);
}
#[test]
fn test_value_in_current_module_with_labelled_arguments() {
let mut names = Names::new();
names.named_constructor_in_scope("module".into(), "Wibble".into(), "Wibble".into());
let printer = Printer::new(&names);
let var1 = make_variable(1);
let var2 = make_variable(2);
let subjects = &[make_variable(0)];
let term = Term::Variant {
variable: subjects[0].clone(),
name: "Wibble".into(),
module: "module".into(),
fields: vec![
field(var1.clone(), Some("list")),
field(var2.clone(), Some("other")),
],
};
let terms = &[
term,
Term::EmptyList { variable: var1 },
Term::Infinite { variable: var2 },
];
let mapping = get_mapping(terms);
assert_eq!(
printer.print_terms(subjects, terms, &mapping),
"Wibble(list: [], other:)"
);
}
#[test]
fn test_module_alias() {
let mut names = Names::new();
assert!(
names
.imported_module("mod".into(), "shapes".into(), SrcSpan::new(50, 60))
.is_none()
);
let printer = Printer::new(&names);
let subjects = &[make_variable(0)];
let term = Term::Variant {
variable: subjects[0].clone(),
name: "Rectangle".into(),
module: "mod".into(),
fields: Vec::new(),
};
let terms = &[term];
let mapping = get_mapping(terms);
assert_eq!(
printer.print_terms(subjects, terms, &mapping),
"shapes.Rectangle"
);
}
#[test]
fn test_unqualified_value() {
let mut names = Names::new();
names.named_constructor_in_scope("regex".into(), "Regex".into(), "Regex".into());
let printer = Printer::new(&names);
let arg = make_variable(1);
let subjects = &[make_variable(0)];
let term = Term::Variant {
variable: subjects[0].clone(),
name: "Regex".into(),
module: "regex".into(),
fields: vec![field(arg.clone(), None)],
};
let terms = &[term, Term::Infinite { variable: arg }];
let mapping = get_mapping(terms);
assert_eq!(printer.print_terms(subjects, terms, &mapping), "Regex(_)");
}
#[test]
fn test_unqualified_value_with_alias() {
let mut names = Names::new();
names.named_constructor_in_scope("regex".into(), "Regex".into(), "Reg".into());
names.named_constructor_in_scope("gleam".into(), "None".into(), "None".into());
let printer = Printer::new(&names);
let arg = make_variable(1);
let subjects = &[make_variable(0)];
let term = Term::Variant {
variable: subjects[0].clone(),
name: "Regex".into(),
module: "regex".into(),
fields: vec![field(arg.clone(), None)],
};
let terms = &[
term,
Term::Variant {
variable: arg,
name: "None".into(),
module: "gleam".into(),
fields: vec![],
},
];
let mapping = get_mapping(terms);
assert_eq!(printer.print_terms(subjects, terms, &mapping), "Reg(None)");
}
#[test]
fn test_list_pattern() {
let mut names = Names::new();
names.named_constructor_in_scope("module".into(), "Type".into(), "Type".into());
let printer = Printer::new(&names);
let var1 = make_variable(1);
let var2 = make_variable(2);
let var3 = make_variable(3);
let subjects = &[make_variable(0)];
let term = Term::List {
variable: subjects[0].clone(),
first: var1.clone(),
rest: var2.clone(),
};
let terms = &[
term,
Term::Variant {
variable: var1,
name: "Type".into(),
module: "module".into(),
fields: Vec::new(),
},
Term::List {
variable: var2,
first: var3.clone(),
rest: make_variable(4),
},
Term::Infinite { variable: var3 },
];
let mapping = get_mapping(terms);
assert_eq!(
printer.print_terms(subjects, terms, &mapping),
"[Type, _, ..]"
);
}
#[test]
fn test_multi_pattern() {
let mut names = Names::new();
names.named_constructor_in_scope("gleam".into(), "Ok".into(), "Ok".into());
names.named_constructor_in_scope("gleam".into(), "False".into(), "False".into());
let printer = Printer::new(&names);
let subjects = &[make_variable(0), make_variable(1), make_variable(2)];
let terms = &[
Term::Variant {
variable: subjects[0].clone(),
name: "Ok".into(),
module: "gleam".into(),
fields: vec![field(make_variable(3), None)],
},
Term::Variant {
variable: subjects[2].clone(),
name: "False".into(),
module: "gleam".into(),
fields: Vec::new(),
},
];
let mapping = get_mapping(terms);
assert_eq!(
printer.print_terms(subjects, terms, &mapping),
"Ok(_), _, False"
);
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/exhaustiveness/missing_patterns.rs | compiler-core/src/exhaustiveness/missing_patterns.rs | use super::{CompileCaseResult, Decision, FallbackCheck, RuntimeCheck, Variable, printer::Printer};
use crate::type_::environment::Environment;
use ecow::EcoString;
use indexmap::IndexSet;
use std::collections::HashMap;
/// Returns a list of patterns not covered by the match expression.
pub fn missing_patterns(
result: &CompileCaseResult,
environment: &Environment<'_>,
) -> Vec<EcoString> {
let subjects = &result.compiled_case.subject_variables;
let mut generator = MissingPatternsGenerator::new(subjects, environment);
generator.add_missing_patterns(&result.compiled_case.tree);
generator.missing.into_iter().collect()
}
#[derive(Debug, Clone)]
pub struct VariantField {
pub label: Option<EcoString>,
pub variable: Variable,
}
/// Information about a single constructor/value (aka term) being tested, used
/// to build a list of names of missing patterns.
#[derive(Debug)]
pub enum Term {
Variant {
variable: Variable,
name: EcoString,
module: EcoString,
fields: Vec<VariantField>,
},
Tuple {
variable: Variable,
elements: Vec<Variable>,
},
Infinite {
variable: Variable,
},
EmptyList {
variable: Variable,
},
List {
variable: Variable,
first: Variable,
rest: Variable,
},
}
impl Term {
pub fn variable(&self) -> &Variable {
match self {
Term::Variant { variable, .. } => variable,
Term::Tuple { variable, .. } => variable,
Term::Infinite { variable } => variable,
Term::EmptyList { variable } => variable,
Term::List { variable, .. } => variable,
}
}
}
struct MissingPatternsGenerator<'a, 'env> {
subjects: &'a Vec<Variable>,
terms: Vec<Term>,
missing: IndexSet<EcoString>,
environment: &'a Environment<'env>,
printer: Printer<'a>,
}
impl<'a, 'env> MissingPatternsGenerator<'a, 'env> {
fn new(subjects: &'a Vec<Variable>, environment: &'a Environment<'env>) -> Self {
MissingPatternsGenerator {
subjects,
terms: vec![],
missing: IndexSet::new(),
environment,
printer: Printer::new(&environment.names),
}
}
fn print_terms(&self, mapping: HashMap<usize, usize>) -> EcoString {
self.printer
.print_terms(self.subjects, &self.terms, &mapping)
}
fn add_missing_patterns(&mut self, node: &Decision) {
match node {
Decision::Run { .. } => {}
Decision::Guard { if_false, .. } => self.add_missing_patterns(if_false),
Decision::Fail => {
// At this point the terms stack looks something like this:
// `[term, term + arguments, term, ...]`. To construct a pattern
// name from this stack, we first map all variables to their
// term indexes. This is needed because when a term defines
// arguments, the terms for those arguments don't necessarily
// appear in order in the term stack.
//
// This mapping is then used when (recursively) generating a
// pattern name.
//
// This approach could probably be done more efficiently, so if
// you're reading this and happen to know of a way, please
// submit a merge request :)
let mut mapping = HashMap::new();
for (index, step) in self.terms.iter().enumerate() {
_ = mapping.insert(step.variable().id, index);
}
let pattern = self.print_terms(mapping);
_ = self.missing.insert(pattern);
}
Decision::Switch {
var,
choices,
fallback,
fallback_check,
} => {
for (check, body) in choices {
self.add_missing_patterns_after_check(var, check, body);
}
match fallback_check.as_ref() {
FallbackCheck::InfiniteCatchAll => {
self.add_missing_patterns(fallback);
}
FallbackCheck::RuntimeCheck { check } => {
self.add_missing_patterns_after_check(var, check, fallback)
}
FallbackCheck::CatchAll { ignored_checks } => {
for check in ignored_checks {
self.add_missing_patterns_after_check(var, check, fallback);
}
}
};
}
}
}
fn add_missing_patterns_after_check(
&mut self,
var: &Variable,
check: &RuntimeCheck,
body: &Decision,
) {
let term = self.check_to_term(var.clone(), check);
self.terms.push(term);
self.add_missing_patterns(body);
_ = self.terms.pop();
}
fn check_to_term(&self, variable: Variable, check: &RuntimeCheck) -> Term {
match check {
RuntimeCheck::Int { .. }
| RuntimeCheck::Float { .. }
| RuntimeCheck::String { .. }
| RuntimeCheck::BitArray { .. }
| RuntimeCheck::StringPrefix { .. } => Term::Infinite { variable },
RuntimeCheck::Tuple { elements, .. } => Term::Tuple {
variable,
elements: elements.clone(),
},
RuntimeCheck::Variant {
index,
fields,
labels,
..
} => {
let (module, name) = variable
.type_
.named_type_name()
.expect("Should be a named type");
let name = self
.environment
.get_constructors_for_type(&module, &name)
.expect("Custom type constructor must have custom type kind")
.variants
.get(*index)
.expect("Custom type constructor exist for type")
.name
.clone();
let fields = fields
.iter()
.enumerate()
.map(|(index, variable)| VariantField {
label: labels.get(&index).cloned(),
variable: variable.clone(),
})
.collect();
Term::Variant {
variable,
name,
module,
fields,
}
}
RuntimeCheck::NonEmptyList { first, rest } => Term::List {
variable,
first: first.clone(),
rest: rest.clone(),
},
RuntimeCheck::EmptyList => Term::EmptyList { variable },
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/parse/lexer.rs | compiler-core/src/parse/lexer.rs | use ecow::EcoString;
use crate::ast::SrcSpan;
use crate::parse::LiteralFloatValue;
use crate::parse::error::{LexicalError, LexicalErrorType};
use crate::parse::token::Token;
use std::char;
use super::error::InvalidUnicodeEscapeError;
#[derive(Debug)]
pub struct Lexer<T: Iterator<Item = (u32, char)>> {
chars: T,
pending: Vec<Spanned>,
chr0: Option<char>,
chr1: Option<char>,
loc0: u32,
loc1: u32,
}
pub type Spanned = (u32, Token, u32);
pub type LexResult = Result<Spanned, LexicalError>;
pub fn str_to_keyword(word: &str) -> Option<Token> {
// Alphabetical keywords:
match word {
"as" => Some(Token::As),
"assert" => Some(Token::Assert),
"auto" => Some(Token::Auto),
"case" => Some(Token::Case),
"const" => Some(Token::Const),
"delegate" => Some(Token::Delegate),
"derive" => Some(Token::Derive),
"echo" => Some(Token::Echo),
"else" => Some(Token::Else),
"fn" => Some(Token::Fn),
"if" => Some(Token::If),
"implement" => Some(Token::Implement),
"import" => Some(Token::Import),
"let" => Some(Token::Let),
"macro" => Some(Token::Macro),
"opaque" => Some(Token::Opaque),
"panic" => Some(Token::Panic),
"pub" => Some(Token::Pub),
"test" => Some(Token::Test),
"todo" => Some(Token::Todo),
"type" => Some(Token::Type),
"use" => Some(Token::Use),
_ => None,
}
}
pub fn make_tokenizer(source: &str) -> impl Iterator<Item = LexResult> + '_ {
let chars = source.char_indices().map(|(i, c)| (i as u32, c));
let nlh = NewlineHandler::new(chars);
Lexer::new(nlh)
}
// The newline handler is an iterator which collapses different newline
// types into \n always.
#[derive(Debug)]
pub struct NewlineHandler<T: Iterator<Item = (u32, char)>> {
source: T,
chr0: Option<(u32, char)>,
chr1: Option<(u32, char)>,
}
impl<T> NewlineHandler<T>
where
T: Iterator<Item = (u32, char)>,
{
pub fn new(source: T) -> Self {
let mut nlh = NewlineHandler {
source,
chr0: None,
chr1: None,
};
let _ = nlh.shift();
let _ = nlh.shift();
nlh
}
fn shift(&mut self) -> Option<(u32, char)> {
let result = self.chr0;
self.chr0 = self.chr1;
self.chr1 = self.source.next();
result
}
}
impl<T> Iterator for NewlineHandler<T>
where
T: Iterator<Item = (u32, char)>,
{
type Item = (u32, char);
fn next(&mut self) -> Option<Self::Item> {
// Collapse \r\n into \n
if let Some((i, '\r')) = self.chr0 {
if let Some((_, '\n')) = self.chr1 {
// Transform windows EOL into \n
let _ = self.shift();
// using the position from the \r
self.chr0 = Some((i, '\n'))
} else {
// Transform MAC EOL into \n
self.chr0 = Some((i, '\n'))
}
}
self.shift()
}
}
impl<T> Lexer<T>
where
T: Iterator<Item = (u32, char)>,
{
pub fn new(input: T) -> Self {
let mut lxr = Lexer {
chars: input,
pending: Vec::new(),
chr0: None,
chr1: None,
loc0: 0,
loc1: 0,
};
let _ = lxr.next_char();
let _ = lxr.next_char();
lxr
}
// This is the main entry point. Call this function to retrieve the next token.
// This function is used by the iterator implementation.
fn inner_next(&mut self) -> LexResult {
// top loop, keep on processing, until we have something pending.
while self.pending.is_empty() {
self.consume_normal()?;
}
Ok(self.pending.remove(0))
}
// Take a look at the next character, if any, and decide upon the next steps.
fn consume_normal(&mut self) -> Result<(), LexicalError> {
// Check if we have some character:
if let Some(c) = self.chr0 {
let mut check_for_minus = false;
if self.is_upname_start(c) {
let name = self.lex_upname()?;
self.emit(name)
} else if self.is_name_start(c) {
check_for_minus = true;
let name = self.lex_name()?;
self.emit(name);
} else if self.is_number_start(c, self.chr1) {
check_for_minus = true;
let num = self.lex_number()?;
self.emit(num);
} else {
self.consume_character(c)?;
}
if check_for_minus {
// We want to lex `1-1` and `x-1` as `1 - 1` and `x - 1`
if Some('-') == self.chr0 && self.is_number_start('-', self.chr1) {
self.eat_single_char(Token::Minus);
}
}
} else {
// We reached end of file.
let tok_pos = self.get_pos();
self.emit((tok_pos, Token::EndOfFile, tok_pos));
}
Ok(())
}
fn consume_character(&mut self, c: char) -> Result<(), LexicalError> {
match c {
'@' => {
self.eat_single_char(Token::At);
}
'"' => {
let string = self.lex_string()?;
self.emit(string);
}
'=' => {
let tok_start = self.get_pos();
let _ = self.next_char();
match self.chr0 {
Some('=') => {
let _ = self.next_char();
let tok_end = self.get_pos();
if let Some('=') = self.chr0 {
return Err(LexicalError {
error: LexicalErrorType::InvalidTripleEqual,
location: SrcSpan {
start: tok_start,
end: tok_end + 1,
},
});
};
self.emit((tok_start, Token::EqualEqual, tok_end));
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Equal, tok_end));
}
}
}
'+' => {
let tok_start = self.get_pos();
let _ = self.next_char();
if let Some('.') = self.chr0 {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::PlusDot, tok_end));
} else {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Plus, tok_end));
}
}
'*' => {
let tok_start = self.get_pos();
let _ = self.next_char();
match self.chr0 {
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::StarDot, tok_end));
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Star, tok_end));
}
}
}
'/' => {
let tok_start = self.get_pos();
let _ = self.next_char();
match self.chr0 {
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::SlashDot, tok_end));
}
Some('/') => {
let _ = self.next_char();
let comment = self.lex_comment();
self.emit(comment);
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Slash, tok_end));
}
}
}
'%' => {
self.eat_single_char(Token::Percent);
}
'|' => {
let tok_start = self.get_pos();
let _ = self.next_char();
if let Some('|') = self.chr0 {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::VbarVbar, tok_end));
} else if let Some('>') = self.chr0 {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::Pipe, tok_end));
} else {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Vbar, tok_end));
}
}
'&' => {
let tok_start = self.get_pos();
let _ = self.next_char();
if let Some('&') = self.chr0 {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::AmperAmper, tok_end));
} else {
return Err(LexicalError {
error: LexicalErrorType::UnrecognizedToken { tok: '&' },
location: SrcSpan {
start: tok_start,
end: tok_start,
},
});
}
}
'-' => {
let tok_start = self.get_pos();
let _ = self.next_char();
match self.chr0 {
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::MinusDot, tok_end));
}
Some('>') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::RArrow, tok_end));
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Minus, tok_end));
}
}
}
'!' => {
let tok_start = self.get_pos();
let _ = self.next_char();
if let Some('=') = self.chr0 {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::NotEqual, tok_end));
} else {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Bang, tok_end));
}
}
'(' => {
self.eat_single_char(Token::LeftParen);
}
')' => {
self.eat_single_char(Token::RightParen);
}
'[' => {
self.eat_single_char(Token::LeftSquare);
}
']' => {
self.eat_single_char(Token::RightSquare);
}
'{' => {
self.eat_single_char(Token::LeftBrace);
}
'}' => {
self.eat_single_char(Token::RightBrace);
}
':' => {
self.eat_single_char(Token::Colon);
}
'<' => {
let tok_start = self.get_pos();
let _ = self.next_char();
match self.chr0 {
Some('>') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::LtGt, tok_end));
}
Some('<') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::LtLt, tok_end));
}
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::LessDot, tok_end));
}
Some('-') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::LArrow, tok_end));
}
Some('=') => {
let _ = self.next_char();
match self.chr0 {
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::LessEqualDot, tok_end));
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::LessEqual, tok_end));
}
}
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Less, tok_end));
}
}
}
'>' => {
let tok_start = self.get_pos();
let _ = self.next_char();
match self.chr0 {
Some('>') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::GtGt, tok_end));
}
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::GreaterDot, tok_end));
}
Some('=') => {
let _ = self.next_char();
match self.chr0 {
Some('.') => {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::GreaterEqualDot, tok_end));
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::GreaterEqual, tok_end));
}
}
}
_ => {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Greater, tok_end));
}
}
}
',' => {
self.eat_single_char(Token::Comma);
}
'.' => {
let tok_start = self.get_pos();
let _ = self.next_char();
if let Some('.') = &self.chr0 {
let _ = self.next_char();
let tok_end = self.get_pos();
self.emit((tok_start, Token::DotDot, tok_end));
} else {
let tok_end = self.get_pos();
self.emit((tok_start, Token::Dot, tok_end));
self.maybe_lex_dot_access()?;
}
}
'#' => {
self.eat_single_char(Token::Hash);
}
'\n' | ' ' | '\t' | '\x0C' => {
let tok_start = self.get_pos();
let _ = self.next_char();
let tok_end = self.get_pos();
if c == '\n' {
self.emit((tok_start, Token::NewLine, tok_end));
}
}
c => {
let location = self.get_pos();
return Err(LexicalError {
error: LexicalErrorType::UnrecognizedToken { tok: c },
location: SrcSpan {
start: location,
end: location,
},
});
}
}
Ok(())
}
// Lexer helper functions:
// this can be either a reserved word, or a name
fn lex_name(&mut self) -> LexResult {
let mut name = String::new();
let start_pos = self.get_pos();
while self.is_name_continuation() {
name.push(self.next_char().expect("lex_name continue"))
}
let end_pos = self.get_pos();
match str_to_keyword(&name) {
Some(tok) => Ok((start_pos, tok, end_pos)),
_ => {
if name.starts_with('_') {
Ok((start_pos, Token::DiscardName { name: name.into() }, end_pos))
} else {
Ok((start_pos, Token::Name { name: name.into() }, end_pos))
}
}
}
}
// A type name or constructor
fn lex_upname(&mut self) -> LexResult {
let mut name = String::new();
let start_pos = self.get_pos();
while self.is_name_continuation() {
name.push(self.next_char().expect("lex_upname upname"));
}
let end_pos = self.get_pos();
match str_to_keyword(&name) {
Some(tok) => Ok((start_pos, tok, end_pos)),
_ => Ok((start_pos, Token::UpName { name: name.into() }, end_pos)),
}
}
fn lex_number(&mut self) -> LexResult {
let start_pos = self.get_pos();
let num = if self.chr0 == Some('0') {
if self.chr1 == Some('x') || self.chr1 == Some('X') {
// Hex!
let _ = self.next_char();
let _ = self.next_char();
self.lex_number_radix(start_pos, 16, "0x")?
} else if self.chr1 == Some('o') || self.chr1 == Some('O') {
// Octal!
let _ = self.next_char();
let _ = self.next_char();
self.lex_number_radix(start_pos, 8, "0o")?
} else if self.chr1 == Some('b') || self.chr1 == Some('B') {
// Binary!
let _ = self.next_char();
let _ = self.next_char();
self.lex_number_radix(start_pos, 2, "0b")?
} else {
self.lex_decimal_number()?
}
} else {
self.lex_decimal_number()?
};
if Some('_') == self.chr0 {
let location = self.get_pos();
Err(LexicalError {
error: LexicalErrorType::NumTrailingUnderscore,
location: SrcSpan {
start: location,
end: location,
},
})
} else {
Ok(num)
}
}
// Lex a hex/octal/decimal/binary number without a decimal point.
fn lex_number_radix(&mut self, start_pos: u32, radix: u32, prefix: &str) -> LexResult {
let num = self.radix_run(radix);
if num.is_empty() {
let location = self.get_pos() - 1;
Err(LexicalError {
error: LexicalErrorType::RadixIntNoValue,
location: SrcSpan {
start: location,
end: location,
},
})
} else if radix < 16 && Lexer::<T>::is_digit_of_radix(self.chr0, 16) {
let location = self.get_pos();
Err(LexicalError {
error: LexicalErrorType::DigitOutOfRadix,
location: SrcSpan {
start: location,
end: location,
},
})
} else {
let value = format!("{prefix}{num}");
let int_value = super::parse_int_value(&value).expect("int value to parse as bigint");
let end_pos = self.get_pos();
Ok((
start_pos,
Token::Int {
value: value.into(),
int_value,
},
end_pos,
))
}
}
// Lex a normal number, that is, no octal, hex or binary number.
// This function cannot be reached without the head of the stream being either 0-9 or '-', 0-9
fn lex_decimal_number(&mut self) -> LexResult {
self.lex_decimal_or_int_number(true)
}
fn lex_int_number(&mut self) -> LexResult {
self.lex_decimal_or_int_number(false)
}
fn lex_decimal_or_int_number(&mut self, can_lex_decimal: bool) -> LexResult {
let start_pos = self.get_pos();
let mut value = String::new();
// consume negative sign
if self.chr0 == Some('-') {
value.push(self.next_char().expect("lex_normal_number negative"));
}
// consume first run of digits
value.push_str(&self.radix_run(10));
// If float:
if can_lex_decimal && self.chr0 == Some('.') {
value.push(self.next_char().expect("lex_normal_number float"));
value.push_str(&self.radix_run(10));
// If scientific:
if self.chr0 == Some('e') {
value.push(self.next_char().expect("lex_normal_number scientific"));
if self.chr0 == Some('-') {
value.push(
self.next_char()
.expect("lex_normal_number scientific negative"),
);
}
let exponent_run = self.radix_run(10);
if exponent_run.is_empty() {
return Err(LexicalError {
error: LexicalErrorType::MissingExponent,
location: SrcSpan::new(start_pos, self.get_pos()),
});
}
value.push_str(&exponent_run);
}
let end_pos = self.get_pos();
let float_value =
LiteralFloatValue::parse(&value).expect("float value to parse as non-NaN f64");
Ok((
start_pos,
Token::Float {
value: value.into(),
float_value,
},
end_pos,
))
} else {
let int_value = super::parse_int_value(&value).expect("int value to parse as bigint");
let end_pos = self.get_pos();
Ok((
start_pos,
Token::Int {
value: value.into(),
int_value,
},
end_pos,
))
}
}
// Maybe lex dot access that comes after name token.
fn maybe_lex_dot_access(&mut self) -> Result<(), LexicalError> {
// It can be nested like: `tuple.1.2.3.4`
loop {
if matches!(self.chr0, Some('0'..='9')) {
let number = self.lex_int_number()?;
self.emit(number);
} else {
break;
}
}
Ok(())
}
// Consume a sequence of numbers with the given radix,
// the digits can be decorated with underscores
// like this: '1_2_3_4' == '1234'
fn radix_run(&mut self, radix: u32) -> String {
let mut value_text = String::new();
loop {
if let Some(c) = self.take_number(radix) {
value_text.push(c);
} else if self.chr0 == Some('_') && Lexer::<T>::is_digit_of_radix(self.chr1, radix) {
value_text.push('_');
let _ = self.next_char();
} else {
break;
}
}
value_text
}
// Consume a single character with the given radix.
fn take_number(&mut self, radix: u32) -> Option<char> {
let take_char = Lexer::<T>::is_digit_of_radix(self.chr0, radix);
if take_char {
Some(self.next_char().expect("take_number next char"))
} else {
None
}
}
// Test if a digit is of a certain radix.
fn is_digit_of_radix(c: Option<char>, radix: u32) -> bool {
match radix {
2 | 8 | 10 | 16 => c.filter(|c| c.is_digit(radix)).is_some(),
other => panic!("Radix not implemented: {other}"),
}
}
// There are 3 kinds of comments
// 2 slash, normal
// 3 slash, document
// 4 slash, module
// this function is entered after 2 slashes
fn lex_comment(&mut self) -> Spanned {
enum Kind {
Comment,
Doc,
ModuleDoc,
}
let kind = match (self.chr0, self.chr1) {
(Some('/'), Some('/')) => {
let _ = self.next_char();
let _ = self.next_char();
Kind::ModuleDoc
}
(Some('/'), _) => {
let _ = self.next_char();
Kind::Doc
}
_ => Kind::Comment,
};
let mut content = EcoString::new();
let start_pos = self.get_pos();
while Some('\n') != self.chr0 {
match self.chr0 {
Some(c) => content.push(c),
None => break,
}
let _ = self.next_char();
}
let end_pos = self.get_pos();
let token = match kind {
Kind::Comment => Token::CommentNormal,
Kind::Doc => Token::CommentDoc { content },
Kind::ModuleDoc => Token::CommentModule,
};
(start_pos, token, end_pos)
}
fn lex_string(&mut self) -> LexResult {
let start_pos = self.get_pos();
// advance past the first quote
let _ = self.next_char();
let mut string_content = String::new();
loop {
match self.next_char() {
Some('\\') => {
let slash_pos = self.get_pos() - 1;
if let Some(c) = self.chr0 {
match c {
'f' | 'n' | 'r' | 't' | '"' | '\\' => {
let _ = self.next_char();
string_content.push('\\');
string_content.push(c);
}
'u' => {
let _ = self.next_char();
if self.chr0 != Some('{') {
return Err(LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::MissingOpeningBrace,
),
location: SrcSpan {
start: self.get_pos() - 1,
end: self.get_pos(),
},
});
}
// All digits inside \u{...}.
let mut hex_digits = String::new();
loop {
let _ = self.next_char();
let Some(chr) = self.chr0 else {
break;
};
// Don't break early when we've reached 6 digits to ensure a
// useful error message
if chr == '}' {
break;
}
hex_digits.push(chr);
if !chr.is_ascii_hexdigit() {
return Err(LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::ExpectedHexDigitOrCloseBrace,
),
location: SrcSpan {
start: self.get_pos(),
end: self.get_pos() + 1,
},
});
}
}
if self.chr0 != Some('}') {
return Err(LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::ExpectedHexDigitOrCloseBrace,
),
location: SrcSpan {
start: self.get_pos() - 1,
end: self.get_pos(),
},
});
}
let _ = self.next_char();
if !(1..=6).contains(&hex_digits.len()) {
return Err(LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::InvalidNumberOfHexDigits,
),
location: SrcSpan {
start: slash_pos,
end: self.get_pos(),
},
});
}
// Checks for i >= 0x110000 || (i >= 0xD800 && i < 0xE000),
// where i is the unicode codepoint.
if char::from_u32(u32::from_str_radix(&hex_digits, 16).expect(
"Cannot parse codepoint number in Unicode escape sequence",
))
.is_none()
{
return Err(LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::InvalidCodepoint,
),
location: SrcSpan {
start: slash_pos,
end: self.get_pos(),
},
});
}
string_content.push_str("\\u{");
string_content.push_str(&hex_digits);
string_content.push('}');
}
_ => {
return Err(LexicalError {
error: LexicalErrorType::BadStringEscape,
location: SrcSpan {
start: slash_pos,
end: slash_pos + 1,
},
});
}
}
} else {
return Err(LexicalError {
error: LexicalErrorType::BadStringEscape,
location: SrcSpan {
start: slash_pos,
end: slash_pos,
},
});
}
}
Some('"') => break,
Some(c) => string_content.push(c),
None => {
return Err(LexicalError {
error: LexicalErrorType::UnexpectedStringEnd,
location: SrcSpan {
start: start_pos,
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/parse/tests.rs | compiler-core/src/parse/tests.rs | use crate::ast::SrcSpan;
use crate::parse::error::{
InvalidUnicodeEscapeError, LexicalError, LexicalErrorType, ParseError, ParseErrorType,
};
use crate::parse::lexer::make_tokenizer;
use crate::parse::token::Token;
use crate::warning::WarningEmitter;
use camino::Utf8PathBuf;
use ecow::EcoString;
use itertools::Itertools;
use pretty_assertions::assert_eq;
macro_rules! assert_error {
($src:expr, $error:expr $(,)?) => {
let result = crate::parse::parse_statement_sequence($src).expect_err("should not parse");
assert_eq!(($src, $error), ($src, result),);
};
($src:expr) => {
let error = $crate::parse::tests::expect_error($src);
let output = format!("----- SOURCE CODE\n{}\n\n----- ERROR\n{}", $src, error);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
macro_rules! assert_module_error {
($src:expr) => {
let error = $crate::parse::tests::expect_module_error($src);
let output = format!("----- SOURCE CODE\n{}\n\n----- ERROR\n{}", $src, error);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
macro_rules! assert_parse_module {
($src:expr) => {
let result = crate::parse::parse_module(
camino::Utf8PathBuf::from("test/path"),
$src,
&crate::warning::WarningEmitter::null(),
)
.expect("should parse");
insta::assert_snapshot!(insta::internals::AutoName, &format!("{:#?}", result), $src);
};
}
macro_rules! assert_parse {
($src:expr) => {
let result = crate::parse::parse_statement_sequence($src).expect("should parse");
insta::assert_snapshot!(insta::internals::AutoName, &format!("{:#?}", result), $src);
};
}
pub fn expect_module_error(src: &str) -> String {
let result =
crate::parse::parse_module(Utf8PathBuf::from("test/path"), src, &WarningEmitter::null())
.expect_err("should not parse");
let error = crate::error::Error::Parse {
src: src.into(),
path: Utf8PathBuf::from("/src/parse/error.gleam"),
error: Box::new(result),
};
error.pretty_string()
}
pub fn expect_error(src: &str) -> String {
let result = crate::parse::parse_statement_sequence(src).expect_err("should not parse");
let error = crate::error::Error::Parse {
src: src.into(),
path: Utf8PathBuf::from("/src/parse/error.gleam"),
error: Box::new(result),
};
error.pretty_string()
}
#[test]
fn int_tests() {
// bad binary digit
assert_error!(
"0b012",
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::DigitOutOfRadix,
location: SrcSpan { start: 4, end: 4 },
}
},
location: SrcSpan { start: 4, end: 4 },
}
);
// bad octal digit
assert_error!(
"0o12345678",
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::DigitOutOfRadix,
location: SrcSpan { start: 9, end: 9 },
}
},
location: SrcSpan { start: 9, end: 9 },
}
);
// no int value
assert_error!(
"0x",
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::RadixIntNoValue,
location: SrcSpan { start: 1, end: 1 },
}
},
location: SrcSpan { start: 1, end: 1 },
}
);
// trailing underscore
assert_error!(
"1_000_",
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::NumTrailingUnderscore,
location: SrcSpan { start: 5, end: 5 },
}
},
location: SrcSpan { start: 5, end: 5 },
}
);
}
#[test]
fn string_bad_character_escape() {
assert_error!(
r#""\g""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::BadStringEscape,
location: SrcSpan { start: 1, end: 2 },
}
},
location: SrcSpan { start: 1, end: 2 },
}
);
}
#[test]
fn string_bad_character_escape_leading_backslash() {
assert_error!(
r#""\\\g""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::BadStringEscape,
location: SrcSpan { start: 3, end: 4 },
}
},
location: SrcSpan { start: 3, end: 4 },
}
);
}
#[test]
fn string_freestanding_unicode_escape_sequence() {
assert_error!(
r#""\u""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::MissingOpeningBrace,
),
location: SrcSpan { start: 2, end: 3 },
}
},
location: SrcSpan { start: 2, end: 3 },
}
);
}
#[test]
fn string_unicode_escape_sequence_no_braces() {
assert_error!(
r#""\u65""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::MissingOpeningBrace,
),
location: SrcSpan { start: 2, end: 3 },
}
},
location: SrcSpan { start: 2, end: 3 },
}
);
}
#[test]
fn string_unicode_escape_sequence_invalid_hex() {
assert_error!(
r#""\u{z}""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::ExpectedHexDigitOrCloseBrace,
),
location: SrcSpan { start: 4, end: 5 },
}
},
location: SrcSpan { start: 4, end: 5 },
}
);
}
#[test]
fn string_unclosed_unicode_escape_sequence() {
assert_error!(
r#""\u{039a""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::ExpectedHexDigitOrCloseBrace,
),
location: SrcSpan { start: 8, end: 9 },
}
},
location: SrcSpan { start: 8, end: 9 },
}
);
}
#[test]
fn string_empty_unicode_escape_sequence() {
assert_error!(
r#""\u{}""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::InvalidNumberOfHexDigits,
),
location: SrcSpan { start: 1, end: 5 },
}
},
location: SrcSpan { start: 1, end: 5 },
}
);
}
#[test]
fn string_overlong_unicode_escape_sequence() {
assert_error!(
r#""\u{0011f601}""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::InvalidNumberOfHexDigits,
),
location: SrcSpan { start: 1, end: 13 },
}
},
location: SrcSpan { start: 1, end: 13 },
}
);
}
#[test]
fn string_invalid_unicode_escape_sequence() {
assert_error!(
r#""\u{110000}""#,
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidUnicodeEscape(
InvalidUnicodeEscapeError::InvalidCodepoint,
),
location: SrcSpan { start: 1, end: 11 },
}
},
location: SrcSpan { start: 1, end: 11 },
}
);
}
#[test]
fn bit_array() {
// non int value in bit array unit option
assert_error!(
"let x = <<1:unit(0)>> x",
ParseError {
error: ParseErrorType::InvalidBitArrayUnit,
location: SrcSpan { start: 17, end: 18 }
}
);
}
#[test]
fn bit_array1() {
assert_error!(
"let x = <<1:unit(257)>> x",
ParseError {
error: ParseErrorType::InvalidBitArrayUnit,
location: SrcSpan { start: 17, end: 20 }
}
);
}
#[test]
fn bit_array2() {
// patterns cannot be nested
assert_error!(
"case <<>> { <<<<1>>:bits>> -> 1 }",
ParseError {
error: ParseErrorType::NestedBitArrayPattern,
location: SrcSpan { start: 14, end: 19 }
}
);
}
// https://github.com/gleam-lang/gleam/issues/3125
#[test]
fn triple_equals() {
assert_error!(
"let wobble:Int = 32
wobble === 42",
ParseError {
error: ParseErrorType::LexError {
error: LexicalError {
error: LexicalErrorType::InvalidTripleEqual,
location: SrcSpan { start: 35, end: 38 },
}
},
location: SrcSpan { start: 35, end: 38 },
}
);
}
#[test]
fn triple_equals_with_whitespace() {
assert_error!(
"let wobble:Int = 32
wobble == = 42",
ParseError {
error: ParseErrorType::OpNakedRight,
location: SrcSpan { start: 35, end: 37 },
}
);
}
// https://github.com/gleam-lang/gleam/issues/1231
#[test]
fn pointless_spread() {
assert_error!(
"let xs = [] [..xs]",
ParseError {
error: ParseErrorType::ListSpreadWithoutElements,
location: SrcSpan { start: 12, end: 18 },
}
);
}
// https://github.com/gleam-lang/gleam/issues/1613
#[test]
fn anonymous_function_labeled_arguments() {
assert_error!(
"let anon_subtract = fn (minuend a: Int, subtrahend b: Int) -> Int {
a - b
}",
ParseError {
location: SrcSpan { start: 24, end: 31 },
error: ParseErrorType::UnexpectedLabel
}
);
}
#[test]
fn no_let_binding() {
assert_error!(
"wibble = 32",
ParseError {
location: SrcSpan { start: 7, end: 8 },
error: ParseErrorType::NoLetBinding
}
);
}
#[test]
fn no_let_binding1() {
assert_error!(
"wibble:Int = 32",
ParseError {
location: SrcSpan { start: 6, end: 7 },
error: ParseErrorType::NoLetBinding
}
);
}
#[test]
fn no_let_binding2() {
assert_error!(
"let wobble:Int = 32
wobble = 42",
ParseError {
location: SrcSpan { start: 35, end: 36 },
error: ParseErrorType::NoLetBinding
}
);
}
#[test]
fn no_let_binding3() {
assert_error!(
"[x] = [2]",
ParseError {
location: SrcSpan { start: 4, end: 5 },
error: ParseErrorType::NoLetBinding
}
);
}
#[test]
fn with_let_binding3() {
// The same with `let assert` must parse:
assert_parse!("let assert [x] = [2]");
}
#[test]
fn with_let_binding3_and_annotation() {
assert_parse!("let assert [x]: List(Int) = [2]");
}
#[test]
fn no_eq_after_binding() {
assert_error!(
"let wibble",
ParseError {
location: SrcSpan { start: 4, end: 10 },
error: ParseErrorType::ExpectedEqual
}
);
}
#[test]
fn no_eq_after_binding1() {
assert_error!(
"let wibble
wibble = 4",
ParseError {
location: SrcSpan { start: 4, end: 10 },
error: ParseErrorType::ExpectedEqual
}
);
}
#[test]
fn echo_followed_by_expression_ends_where_expression_ends() {
assert_parse!("echo wibble");
}
#[test]
fn echo_with_simple_expression_1() {
assert_parse!("echo wibble as message");
}
#[test]
fn echo_with_simple_expression_2() {
assert_parse!("echo wibble as \"message\"");
}
#[test]
fn echo_with_complex_expression() {
assert_parse!("echo wibble as { this <> complex }");
}
#[test]
fn echo_with_no_expressions_after_it() {
assert_parse!("echo");
}
#[test]
fn echo_with_no_expressions_after_it_but_a_message() {
assert_parse!("echo as message");
}
#[test]
fn echo_with_block() {
assert_parse!("echo { 1 + 1 }");
}
#[test]
fn echo_has_lower_precedence_than_binop() {
assert_parse!("echo 1 + 1");
}
#[test]
fn echo_in_a_pipeline() {
assert_parse!("[] |> echo |> wibble");
}
#[test]
fn echo_has_lower_precedence_than_pipeline() {
assert_parse!("echo wibble |> wobble |> woo");
}
#[test]
fn echo_cannot_have_an_expression_in_a_pipeline() {
// So this is actually two pipelines!
assert_parse!("[] |> echo fun |> wibble");
}
#[test]
fn panic_with_echo() {
assert_parse!("panic as echo \"string\"");
}
#[test]
fn panic_with_echo_and_message() {
assert_parse!("panic as echo wibble as message");
}
#[test]
fn echo_with_panic() {
assert_parse!("echo panic as \"a\"");
}
#[test]
fn echo_with_panic_and_message() {
assert_parse!("echo panic as \"a\"");
}
#[test]
fn echo_with_panic_and_messages() {
assert_parse!("echo panic as \"a\" as \"b\"");
}
#[test]
fn echo_with_assert_and_message_1() {
assert_parse!("assert 1 == echo 2 as this_belongs_to_echo");
}
#[test]
fn echo_with_assert_and_message_2() {
assert_parse!("assert echo True as this_belongs_to_echo");
}
#[test]
fn echo_with_assert_and_messages_1() {
assert_parse!("assert 1 == echo 2 as this_belongs_to_echo as this_belongs_to_assert");
}
#[test]
fn echo_with_assert_and_messages_2() {
assert_parse!("assert echo True as this_belongs_to_echo as this_belongs_to_assert");
}
#[test]
fn echo_with_assert_and_messages_3() {
assert_parse!("assert echo 1 == 2 as this_belongs_to_echo as this_belongs_to_assert");
}
#[test]
fn echo_with_let_assert_and_message() {
assert_parse!("let assert 1 = echo 2 as this_belongs_to_echo");
}
#[test]
fn echo_with_let_assert_and_messages() {
assert_parse!("let assert 1 = echo 1 as this_belongs_to_echo as this_belongs_to_assert");
}
#[test]
fn repeated_echos() {
assert_parse!("echo echo echo 1");
}
#[test]
fn echo_at_start_of_pipeline_wraps_the_whole_thing() {
assert_parse!("echo 1 |> wibble |> wobble");
}
#[test]
fn no_let_binding_snapshot_1() {
assert_error!("wibble = 4");
}
#[test]
fn no_let_binding_snapshot_2() {
assert_error!("wibble:Int = 4");
}
#[test]
fn no_let_binding_snapshot_3() {
assert_error!(
"let wobble:Int = 32
wobble = 42"
);
}
#[test]
fn no_eq_after_binding_snapshot_1() {
assert_error!("let wibble");
}
#[test]
fn no_eq_after_binding_snapshot_2() {
assert_error!(
"let wibble
wibble = 4"
);
}
#[test]
fn discard_left_hand_side_of_concat_pattern() {
assert_error!(
r#"
case "" {
_ <> rest -> rest
}
"#
);
}
#[test]
fn assign_left_hand_side_of_concat_pattern() {
assert_error!(
r#"
case "" {
first <> rest -> rest
}
"#
);
}
// https://github.com/gleam-lang/gleam/issues/1890
#[test]
fn valueless_list_spread_expression() {
assert_error!(r#"let x = [1, 2, 3, ..]"#);
}
// https://github.com/gleam-lang/gleam/issues/2035
#[test]
fn semicolons() {
assert_error!(r#"{ 2 + 3; - -5; }"#);
}
#[test]
fn bare_expression() {
assert_parse!(r#"1"#);
}
// https://github.com/gleam-lang/gleam/issues/1991
#[test]
fn block_of_one() {
assert_parse!(r#"{ 1 }"#);
}
// https://github.com/gleam-lang/gleam/issues/1991
#[test]
fn block_of_two() {
assert_parse!(r#"{ 1 2 }"#);
}
// https://github.com/gleam-lang/gleam/issues/1991
#[test]
fn nested_block() {
assert_parse!(r#"{ 1 { 1.0 2.0 } 3 }"#);
}
// https://github.com/gleam-lang/gleam/issues/1831
#[test]
fn argument_scope() {
assert_error!(
"
1 + let a = 5
a
"
);
}
#[test]
fn multiple_external_for_same_project_erlang() {
assert_module_error!(
r#"
@external(erlang, "one", "two")
@external(erlang, "three", "four")
pub fn one(x: Int) -> Int {
todo
}
"#
);
}
#[test]
fn multiple_external_for_same_project_javascript() {
assert_module_error!(
r#"
@external(javascript, "one", "two")
@external(javascript, "three", "four")
pub fn one(x: Int) -> Int {
todo
}
"#
);
}
#[test]
fn unknown_external_target() {
assert_module_error!(
r#"
@external(erl, "one", "two")
pub fn one(x: Int) -> Int {
todo
}"#
);
}
#[test]
fn unknown_target() {
assert_module_error!(
r#"
@target(abc)
pub fn one() {}"#
);
}
#[test]
fn missing_target() {
assert_module_error!(
r#"
@target()
pub fn one() {}"#
);
}
#[test]
fn missing_target_and_bracket() {
assert_module_error!(
r#"
@target(
pub fn one() {}"#
);
}
#[test]
fn unknown_attribute() {
assert_module_error!(
r#"@go_faster()
pub fn main() { 1 }"#
);
}
#[test]
fn incomplete_function() {
assert_error!("fn()");
}
#[test]
fn multiple_deprecation_attributes() {
assert_module_error!(
r#"
@deprecated("1")
@deprecated("2")
pub fn main() -> Nil {
Nil
}
"#
);
}
#[test]
fn deprecation_without_message() {
assert_module_error!(
r#"
@deprecated()
pub fn main() -> Nil {
Nil
}
"#
);
}
#[test]
fn multiple_internal_attributes() {
assert_module_error!(
r#"
@internal
@internal
pub fn main() -> Nil {
Nil
}
"#
);
}
#[test]
fn attributes_with_no_definition() {
assert_module_error!(
r#"
@deprecated("1")
@target(erlang)
"#
);
}
#[test]
fn external_attribute_with_custom_type() {
assert_parse_module!(
r#"
@external(erlang, "gleam_stdlib", "dict")
@external(javascript, "./gleam_stdlib.d.ts", "Dict")
pub type Dict(key, value)
"#
);
}
#[test]
fn external_attribute_with_non_fn_definition() {
assert_module_error!(
r#"
@external(erlang, "module", "fun")
pub type Fun = Fun
"#
);
}
#[test]
fn attributes_with_improper_definition() {
assert_module_error!(
r#"
@deprecated("1")
@external(erlang, "module", "fun")
"#
);
}
#[test]
fn const_with_function_call() {
assert_module_error!(
r#"
pub fn wibble() { 123 }
const wib: Int = wibble()
"#
);
}
#[test]
fn const_with_function_call_with_args() {
assert_module_error!(
r#"
pub fn wibble() { 123 }
const wib: Int = wibble(1, "wobble")
"#
);
}
#[test]
fn import_type() {
assert_parse_module!(r#"import wibble.{type Wobble, Wobble, type Wabble}"#);
}
#[test]
fn reserved_auto() {
assert_module_error!(r#"const auto = 1"#);
}
#[test]
fn reserved_delegate() {
assert_module_error!(r#"const delegate = 1"#);
}
#[test]
fn reserved_derive() {
assert_module_error!(r#"const derive = 1"#);
}
#[test]
fn reserved_else() {
assert_module_error!(r#"const else = 1"#);
}
#[test]
fn reserved_implement() {
assert_module_error!(r#"const implement = 1"#);
}
#[test]
fn reserved_macro() {
assert_module_error!(r#"const macro = 1"#);
}
#[test]
fn reserved_test() {
assert_module_error!(r#"const test = 1"#);
}
#[test]
fn reserved_echo() {
assert_module_error!(r#"const echo = 1"#);
}
#[test]
fn capture_with_name() {
assert_module_error!(
r#"
pub fn main() {
add(_name, 1)
}
fn add(x, y) {
x + y
}
"#
);
}
#[test]
fn list_spread_with_no_tail_in_the_middle_of_a_list() {
assert_module_error!(
r#"
pub fn main() -> Nil {
let xs = [1, 2, 3]
[1, 2, .., 3 + 3, 4]
}
"#
);
}
#[test]
fn list_spread_followed_by_extra_items() {
assert_module_error!(
r#"
pub fn main() -> Nil {
let xs = [1, 2, 3]
[1, 2, ..xs, 3 + 3, 4]
}
"#
);
}
#[test]
fn list_spread_followed_by_extra_item_and_another_spread() {
assert_module_error!(
r#"
pub fn main() -> Nil {
let xs = [1, 2, 3]
let ys = [5, 6, 7]
[..xs, 4, ..ys]
}
"#
);
}
#[test]
fn list_spread_followed_by_other_spread() {
assert_module_error!(
r#"
pub fn main() -> Nil {
let xs = [1, 2, 3]
let ys = [5, 6, 7]
[1, ..xs, ..ys]
}
"#
);
}
#[test]
fn list_spread_as_first_item_followed_by_other_items() {
assert_module_error!(
r#"
pub fn main() -> Nil {
let xs = [1, 2, 3]
[..xs, 3 + 3, 4]
}
"#
);
}
// Tests for nested tuples and structs in tuples
// https://github.com/gleam-lang/gleam/issues/1980
#[test]
fn nested_tuples() {
assert_parse!(
r#"
let tup = #(#(5, 6))
{tup.0}.1
"#
);
}
#[test]
fn nested_tuples_no_block() {
assert_parse!(
r#"
let tup = #(#(5, 6))
tup.0.1
"#
);
}
#[test]
fn deeply_nested_tuples() {
assert_parse!(
r#"
let tup = #(#(#(#(4))))
{{{tup.0}.0}.0}.0
"#
);
}
#[test]
fn deeply_nested_tuples_no_block() {
assert_parse!(
r#"
let tup = #(#(#(#(4))))
tup.0.0.0.0
"#
);
}
#[test]
fn inner_single_quote_parses() {
assert_parse!(
r#"
let a = "inner 'quotes'"
"#
);
}
#[test]
fn string_single_char_suggestion() {
assert_module_error!(
"
pub fn main() {
let a = 'example'
}
"
);
}
#[test]
fn private_internal_const() {
assert_module_error!(
"
@internal
const wibble = 1
"
);
}
#[test]
fn private_internal_type_alias() {
assert_module_error!(
"
@internal
type Alias = Int
"
);
}
#[test]
fn private_internal_function() {
assert_module_error!(
"
@internal
fn wibble() { todo }
"
);
}
#[test]
fn private_internal_type() {
assert_module_error!(
"
@internal
type Wibble {
Wibble
}
"
);
}
#[test]
fn wrong_record_access_pattern() {
assert_module_error!(
"
pub fn main() {
case wibble {
wibble.thing -> 1
}
}
"
);
}
#[test]
fn tuple_invalid_expr() {
assert_module_error!(
"
fn main() {
#(1, 2, const)
}
"
);
}
#[test]
fn bit_array_invalid_segment() {
assert_module_error!(
"
fn main() {
<<72, 101, 108, 108, 111, 44, 32, 74, 111, 101, const>>
}
"
);
}
#[test]
fn case_invalid_expression() {
assert_module_error!(
"
fn main() {
case 1, type {
_, _ -> 0
}
}
"
);
}
#[test]
fn case_invalid_case_pattern() {
assert_module_error!(
"
fn main() {
case 1 {
-> -> 0
}
}
"
);
}
#[test]
fn case_clause_no_subject() {
assert_module_error!(
"
fn main() {
case 1 {
-> 1
_ -> 2
}
}
"
);
}
#[test]
fn case_alternative_clause_no_subject() {
assert_module_error!(
"
fn main() {
case 1 {
1 | -> 1
_ -> 1
}
}
"
);
}
#[test]
fn use_invalid_assignments() {
assert_module_error!(
"
fn main() {
use fn <- result.try(get_username())
}
"
);
}
#[test]
fn assignment_pattern_invalid_tuple() {
assert_module_error!(
"
fn main() {
let #(a, case, c) = #(1, 2, 3)
}
"
);
}
#[test]
fn assignment_pattern_invalid_bit_segment() {
assert_module_error!(
"
fn main() {
let <<b1, pub>> = <<24, 3>>
}
"
);
}
#[test]
fn case_list_pattern_after_spread() {
assert_module_error!(
"
fn main() {
case somelist {
[..rest, last] -> 1
_ -> 2
}
}
"
);
}
#[test]
fn type_invalid_constructor() {
assert_module_error!(
"
type A {
A(String)
type
}
"
);
}
// Tests whether diagnostic presents an example of how to formulate a proper
// record constructor based off a common user error pattern.
// https://github.com/gleam-lang/gleam/issues/3324
#[test]
fn type_invalid_record_constructor() {
assert_module_error!(
"
pub type User {
name: String,
}
"
);
}
#[test]
fn type_invalid_record_constructor_without_field_type() {
assert_module_error!(
"
pub opaque type User {
name
}
"
);
}
#[test]
fn type_invalid_record_constructor_invalid_field_type() {
assert_module_error!(
r#"
type User {
name: "Test User",
}
"#
);
}
#[test]
fn type_invalid_type_name() {
assert_module_error!(
"
type A(a, type) {
A
}
"
);
}
#[test]
fn type_invalid_constructor_arg() {
assert_module_error!(
"
type A {
A(type: String)
}
"
);
}
#[test]
fn type_invalid_record() {
assert_module_error!(
"
type A {
One
Two
3
}
"
);
}
#[test]
fn function_type_invalid_param_type() {
assert_module_error!(
"
fn f(g: fn(Int, 1) -> Int) -> Int {
g(0, 1)
}
"
);
}
#[test]
fn function_invalid_signature() {
assert_module_error!(
r#"
fn f(a, "b") -> String {
a <> b
}
"#
);
}
#[test]
fn const_invalid_tuple() {
assert_module_error!(
"
const a = #(1, 2, <-)
"
);
}
#[test]
fn const_invalid_list() {
assert_module_error!(
"
const a = [1, 2, <-]
"
);
}
#[test]
fn const_invalid_bit_array_segment() {
assert_module_error!(
"
const a = <<1, 2, <->>
"
);
}
#[test]
fn const_invalid_record_constructor() {
assert_module_error!(
"
type A {
A(String, Int)
}
const a = A(\"a\", let)
"
);
}
// record access should parse even if there is no label written
#[test]
fn record_access_no_label() {
assert_parse_module!(
"
type Wibble {
Wibble(wibble: String)
}
fn wobble() {
Wibble(\"a\").
}
"
);
}
#[test]
fn newline_tokens() {
assert_eq!(
make_tokenizer("1\n\n2\n").collect_vec(),
[
Ok((
0,
Token::Int {
value: "1".into(),
int_value: 1.into()
},
1
)),
Ok((1, Token::NewLine, 2)),
Ok((2, Token::NewLine, 3)),
Ok((
3,
Token::Int {
value: "2".into(),
int_value: 2.into()
},
4
)),
Ok((4, Token::NewLine, 5))
]
);
}
// https://github.com/gleam-lang/gleam/issues/1756
#[test]
fn arithmetic_in_guards() {
assert_parse!(
"
case 2, 3 {
x, y if x + y == 1 -> True
}"
);
}
#[test]
fn const_string_concat() {
assert_parse_module!(
"
const cute = \"cute\"
const cute_bee = cute <> \"bee\"
"
);
}
#[test]
fn const_string_concat_naked_right() {
assert_module_error!(
"
const no_cute_bee = \"cute\" <>
"
);
}
#[test]
fn function_call_in_case_clause_guard() {
assert_error!(
r#"
let my_string = "hello"
case my_string {
_ if length(my_string) > 2 -> io.debug("doesn't work')
}"#
);
}
#[test]
fn dot_access_function_call_in_case_clause_guard() {
assert_error!(
r#"
let my_string = "hello"
case my_string {
_ if string.length(my_string) > 2 -> io.debug("doesn't work')
}"#
);
}
#[test]
fn invalid_left_paren_in_case_clause_guard() {
assert_error!(
r#"
let my_string = "hello"
case my_string {
_ if string.length( > 2 -> io.debug("doesn't work')
}"#
);
}
#[test]
fn invalid_label_shorthand() {
assert_module_error!(
"
pub fn main() {
wibble(:)
}
"
);
}
#[test]
fn invalid_label_shorthand_2() {
assert_module_error!(
"
pub fn main() {
wibble(:,)
}
"
);
}
#[test]
fn invalid_label_shorthand_3() {
assert_module_error!(
"
pub fn main() {
wibble(:arg)
}
"
);
}
#[test]
fn invalid_label_shorthand_4() {
assert_module_error!(
"
pub fn main() {
wibble(arg::)
}
"
);
}
#[test]
fn invalid_label_shorthand_5() {
assert_module_error!(
"
pub fn main() {
wibble(arg::arg)
}
"
);
}
#[test]
fn invalid_pattern_label_shorthand() {
assert_module_error!(
"
pub fn main() {
let Wibble(:) = todo
}
"
);
}
#[test]
fn invalid_pattern_label_shorthand_2() {
assert_module_error!(
"
pub fn main() {
let Wibble(:arg) = todo
}
"
);
}
#[test]
fn invalid_pattern_label_shorthand_3() {
assert_module_error!(
"
pub fn main() {
let Wibble(arg::) = todo
}
"
);
}
#[test]
fn invalid_pattern_label_shorthand_4() {
assert_module_error!(
"
pub fn main() {
let Wibble(arg: arg:) = todo
}
"
);
}
#[test]
fn invalid_pattern_label_shorthand_5() {
assert_module_error!(
"
pub fn main() {
let Wibble(arg1: arg2:) = todo
}
"
);
}
fn first_parsed_docstring(src: &str) -> EcoString {
let parsed =
crate::parse::parse_module(Utf8PathBuf::from("test/path"), src, &WarningEmitter::null())
.expect("should parse");
parsed
.module
.definitions
.first()
.expect("parsed a definition")
.definition
.get_doc()
.expect("definition without doc")
}
#[test]
fn doc_comment_before_comment_is_not_attached_to_following_function() {
assert_eq!(
first_parsed_docstring(
r#"
/// Not included!
// pub fn call()
/// Doc!
pub fn wibble() {}
"#
),
" Doc!\n"
)
}
#[test]
fn doc_comment_before_comment_is_not_attached_to_following_type() {
assert_eq!(
first_parsed_docstring(
r#"
/// Not included!
// pub fn call()
/// Doc!
pub type Wibble
"#
),
" Doc!\n"
)
}
#[test]
fn doc_comment_before_comment_is_not_attached_to_following_type_alias() {
assert_eq!(
first_parsed_docstring(
r#"
/// Not included!
// pub fn call()
/// Doc!
pub type Wibble = Int
"#
),
" Doc!\n"
)
}
#[test]
fn doc_comment_before_comment_is_not_attached_to_following_constant() {
assert_eq!(
first_parsed_docstring(
r#"
/// Not included!
// pub fn call()
/// Doc!
pub const wibble = 1
"#
),
" Doc!\n"
);
}
#[test]
fn non_module_level_function_with_a_name() {
assert_module_error!(
r#"
pub fn main() {
fn my() { 1 }
}
"#
);
}
#[test]
fn error_message_on_variable_starting_with_underscore() {
// https://github.com/gleam-lang/gleam/issues/3504
assert_module_error!(
"
pub fn main() {
let val = _func_starting_with_underscore(1)
}"
);
}
#[test]
fn non_module_level_function_with_not_a_name() {
assert_module_error!(
r#"
pub fn main() {
fn @() { 1 } // wrong token and not a name
}
"#
);
}
#[test]
fn error_message_on_variable_starting_with_underscore2() {
// https://github.com/gleam-lang/gleam/issues/3504
assert_module_error!(
"
pub fn main() {
case 1 {
1 -> _with_underscore(1)
}
}"
);
}
#[test]
fn function_inside_a_type() {
assert_module_error!(
r#"
type Wibble {
fn wobble() {}
}
"#
);
}
#[test]
fn pub_function_inside_a_type() {
assert_module_error!(
r#"
type Wibble {
pub fn wobble() {}
}
"#
);
}
#[test]
fn if_like_expression() {
assert_module_error!(
r#"
pub fn main() {
let a = if wibble {
wobble
}
}
"#
);
}
// https://github.com/gleam-lang/gleam/issues/3730
#[test]
fn missing_constructor_arguments() {
assert_module_error!(
"
pub type A {
A(Int)
}
const a = A()
"
);
}
// https://github.com/gleam-lang/gleam/issues/3796
#[test]
fn missing_type_constructor_arguments_in_type_definition() {
assert_module_error!(
"
pub type A() {
A(Int)
}
"
);
}
#[test]
fn tuple_without_hash() {
assert_module_error!(
r#"
pub fn main() {
let triple = (1, 2.2, "three")
io.debug(triple)
let (a, *, *) = triple
io.debug(a)
io.debug(triple.1)
}
"#
);
}
#[test]
fn deprecation_attribute_on_type_variant() {
assert_parse_module!(
r#"
type Wibble {
@deprecated("1")
Wibble1
Wibble2
}
"#
);
}
#[test]
fn float_empty_exponent() {
assert_error!("1.32e");
}
#[test]
fn multiple_deprecation_attribute_on_type_variant() {
assert_module_error!(
r#"
type Wibble {
@deprecated("1")
@deprecated("2")
Wibble1
Wibble2
}
"#
);
}
#[test]
fn target_attribute_on_type_variant() {
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/parse/error.rs | compiler-core/src/parse/error.rs | use crate::ast::{SrcSpan, TypeAst};
use crate::diagnostic::{ExtraLabel, Label};
use crate::error::wrap;
use crate::parse::Token;
use ecow::EcoString;
use itertools::Itertools;
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct LexicalError {
pub error: LexicalErrorType,
pub location: SrcSpan,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum InvalidUnicodeEscapeError {
MissingOpeningBrace, // Expected '{'
ExpectedHexDigitOrCloseBrace, // Expected hex digit or '}'
InvalidNumberOfHexDigits, // Expected between 1 and 6 hex digits
InvalidCodepoint, // Invalid Unicode codepoint
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum LexicalErrorType {
BadStringEscape, // string contains an unescaped slash
InvalidUnicodeEscape(InvalidUnicodeEscapeError), // \u{...} escape sequence is invalid
DigitOutOfRadix, // 0x012 , 2 is out of radix
NumTrailingUnderscore, // 1_000_ is not allowed
RadixIntNoValue, // 0x, 0b, 0o without a value
MissingExponent, // 1.0e, for example, where there is no exponent
UnexpectedStringEnd, // Unterminated string literal
UnrecognizedToken { tok: char },
InvalidTripleEqual,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ParseError {
pub error: ParseErrorType,
pub location: SrcSpan,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ParseErrorType {
ExpectedEqual, // expect "="
ExpectedExpr, // after "->" in a case clause
ExpectedName, // any token used when a Name was expected
ExpectedPattern, // after ':' where a pattern is expected
ExpectedType, // after ':' or '->' where a type annotation is expected
ExpectedUpName, // any token used when a UpName was expected
ExpectedValue, // no value after "="
ExpectedDefinition, // after attributes
ExpectedDeprecationMessage, // after "deprecated"
ExpectedFunctionDefinition, // after function-only attributes
ExpectedTargetName, // after "@target("
ExprLparStart, // it seems "(" was used to start an expression
ExtraSeparator, // #(1,,) <- the 2nd comma is an extra separator
IncorrectName, // UpName or DiscardName used when Name was expected
IncorrectUpName, // Name or DiscardName used when UpName was expected
InvalidBitArraySegment, // <<7:hello>> `hello` is an invalid BitArray segment
InvalidBitArrayUnit, // in <<1:unit(x)>> x must be 1 <= x <= 256
InvalidTailPattern, // only name and _name are allowed after ".." in list pattern
InvalidTupleAccess, // only positive int literals for tuple access
LexError {
error: LexicalError,
},
NestedBitArrayPattern, // <<<<1>>, 2>>, <<1>> is not allowed in there
NoLetBinding, // Bindings and rebinds always require let and must always bind to a value.
NoValueAfterEqual, // = <something other than a value>
NotConstType, // :fn(), name, _ are not valid const types
OpNakedRight, // Operator with no value to the right
OpaqueTypeAlias, // Type aliases cannot be opaque
TooManyArgHoles, // a function call can have at most 1 arg hole
DuplicateAttribute, // an attribute was used more than once
UnknownAttribute, // an attribute was used that is not known
UnknownTarget, // an unknown target was used
ListSpreadWithoutElements, // Pointless spread: `[..xs]`
ListSpreadFollowedByElements, // trying to append something after the spread: `[..xs, x]`
ListSpreadWithAnotherSpread {
first_spread_location: SrcSpan,
}, // trying to use multiple spreads: `[..xs, ..ys]`
UnexpectedLabel, // argument labels were provided, but are not supported in this context
UnexpectedEof,
UnexpectedReservedWord, // reserved word used when a name was expected
UnexpectedToken {
token: Token,
expected: Vec<EcoString>,
hint: Option<EcoString>,
},
UnexpectedFunction, // a function was used called outside of another function
// A variable was assigned or discarded on the left hand side of a <> pattern
ConcatPatternVariableLeftHandSide,
ListSpreadWithoutTail, // let x = [1, ..]
ExpectedFunctionBody, // let x = fn()
RedundantInternalAttribute, // for a private definition marked as internal
InvalidModuleTypePattern, // for patterns that have a dot like: `name.thing`
ListPatternSpreadFollowedByElements, // When there is a pattern after a spread [..rest, pattern]
ExpectedRecordConstructor {
name: EcoString,
public: bool,
opaque: bool,
field: EcoString,
field_type: Option<Box<TypeAst>>,
},
CallInClauseGuard, // case x { _ if f() -> 1 }
IfExpression,
ConstantRecordConstructorNoArguments, // const x = Record()
TypeDefinitionNoArguments, // pub type Wibble() { ... }
UnknownAttributeRecordVariant, // an attribute was used that is not know for a custom type variant
// a Python-like import was written, such as `import gleam.io`, instead of `import gleam/io`
IncorrectImportModuleSeparator {
module: EcoString,
item: EcoString,
},
/// This can happen when there's an empty block in a case clause guard.
/// For example: `_ if a == {}`
EmptyGuardBlock,
// When the use tries to define a constant inside a function
ConstantInsideFunction,
FunctionDefinitionAngleGenerics, // fn something<T>() { ... }
// let a: List<String> = []
TypeUsageAngleGenerics {
module: Option<EcoString>,
name: EcoString,
arguments: Vec<TypeAst>,
},
// type Something<T> {
TypeDefinitionAngleGenerics {
name: EcoString,
arguments: Vec<EcoString>,
},
}
pub(crate) struct ParseErrorDetails {
pub text: String,
pub label_text: EcoString,
pub extra_labels: Vec<ExtraLabel>,
pub hint: Option<String>,
}
impl ParseErrorType {
pub(crate) fn details(&self) -> ParseErrorDetails {
match self {
ParseErrorType::ExpectedEqual => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a '=' after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedExpr => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting an expression after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedName => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a name here".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedPattern => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a pattern after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedType => ParseErrorDetails {
text: "See: https://tour.gleam.run/basics/assignments/".into(),
hint: None,
label_text: "I was expecting a type after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedUpName => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a type name here".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedValue => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a value after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedDefinition => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a definition after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedDeprecationMessage => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "A deprecation attribute must have a string message.".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedFunctionDefinition => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting a function definition after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedTargetName => ParseErrorDetails {
text: "Try `erlang`, `javascript`.".into(),
hint: None,
label_text: "I was expecting a target name after this".into(),
extra_labels: vec![],
},
ParseErrorType::ExtraSeparator => ParseErrorDetails {
text: "".into(),
hint: Some("Try removing it?".into()),
label_text: "This is an extra delimiter".into(),
extra_labels: vec![],
},
ParseErrorType::ExprLparStart => ParseErrorDetails {
text: "".into(),
hint: Some(
"To group expressions in Gleam, use \"{\" and \"}\"; \
tuples are created with `#(` and `)`."
.into(),
),
label_text: "This parenthesis cannot be understood here".into(),
extra_labels: vec![],
},
ParseErrorType::IncorrectName => ParseErrorDetails {
text: "".into(),
hint: Some(wrap(
"Variable and module names start with a lowercase letter, \
and can contain a-z, 0-9, or _.",
)),
label_text: "I'm expecting a lowercase name here".into(),
extra_labels: vec![],
},
ParseErrorType::IncorrectUpName => ParseErrorDetails {
text: "".into(),
hint: Some(wrap(
"Type names start with a uppercase letter, and can \
contain a-z, A-Z, or 0-9.",
)),
label_text: "I'm expecting a type name here".into(),
extra_labels: vec![],
},
ParseErrorType::InvalidBitArraySegment => ParseErrorDetails {
text: "See: https://tour.gleam.run/data-types/bit-arrays/".into(),
hint: Some(format!(
"Valid BitArray segment options are:\n{}",
wrap(
"bits, bytes, int, float, utf8, utf16, utf32, utf8_codepoint, \
utf16_codepoint, utf32_codepoint, signed, unsigned, big, little, native, size, unit.",
)
)),
label_text: "This is not a valid BitArray segment option".into(),
extra_labels: vec![],
},
ParseErrorType::InvalidBitArrayUnit => ParseErrorDetails {
text: "See: https://tour.gleam.run/data-types/bit-arrays/".into(),
hint: Some("Unit must be an integer literal >= 1 and <= 256.".into()),
label_text: "This is not a valid BitArray unit value".into(),
extra_labels: vec![],
},
ParseErrorType::InvalidTailPattern => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "This part of a list pattern can only be a name or a discard".into(),
extra_labels: vec![],
},
ParseErrorType::InvalidTupleAccess => ParseErrorDetails {
text: "".into(),
hint: Some(
"Only non negative integer literals like 0, or 1_000 can be used.".into(),
),
label_text: "This integer is not valid for tuple access".into(),
extra_labels: vec![],
},
ParseErrorType::LexError { error: lex_err } => {
let (label_text, text_lines) = lex_err.to_parse_error_info();
let text = text_lines.join("\n");
ParseErrorDetails {
text,
hint: None,
label_text: label_text.into(),
extra_labels: vec![],
}
}
ParseErrorType::NestedBitArrayPattern => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "BitArray patterns cannot be nested".into(),
extra_labels: vec![],
},
ParseErrorType::NotConstType => ParseErrorDetails {
text: "See: https://tour.gleam.run/basics/constants/".into(),
hint: None,
label_text: "This type is not allowed in module constants".into(),
extra_labels: vec![],
},
ParseErrorType::NoLetBinding => ParseErrorDetails {
text: "See: https://tour.gleam.run/basics/assignments/".into(),
hint: Some("Use let for binding.".into()),
label_text: "There must be a 'let' to bind variable to value".into(),
extra_labels: vec![],
},
ParseErrorType::NoValueAfterEqual => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "I was expecting to see a value after this equals sign".into(),
extra_labels: vec![],
},
ParseErrorType::OpaqueTypeAlias => ParseErrorDetails {
text: "See: https://tour.gleam.run/basics/type-aliases/".into(),
hint: None,
label_text: "Type Aliases cannot be opaque".into(),
extra_labels: vec![],
},
ParseErrorType::OpNakedRight => ParseErrorDetails {
text: "".into(),
hint: Some("Remove it or put a value after it.".into()),
label_text: "This operator has no value on its right side".into(),
extra_labels: vec![],
},
ParseErrorType::TooManyArgHoles => ParseErrorDetails {
text: "See: https://tour.gleam.run/functions/functions/".into(),
hint: Some("Function calls can have at most one argument hole.".into()),
label_text: "There is more than 1 argument hole in this function call".into(),
extra_labels: vec![],
},
ParseErrorType::UnexpectedEof => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "The module ended unexpectedly".into(),
extra_labels: vec![],
},
ParseErrorType::ListSpreadWithoutElements => ParseErrorDetails {
text: "See: https://tour.gleam.run/basics/lists/".into(),
hint: Some("Try prepending some elements [1, 2, ..list].".into()),
label_text: "This spread does nothing".into(),
extra_labels: vec![],
},
ParseErrorType::ListSpreadWithAnotherSpread {
first_spread_location,
} => ParseErrorDetails {
text: [
"Lists are immutable and singly-linked, so to join two or more lists",
"all the elements of the lists would need to be copied into a new list.",
"This would be slow, so there is no built-in syntax for it.",
]
.join("\n"),
hint: None,
label_text: "I wasn't expecting a second list here".into(),
extra_labels: vec![ExtraLabel {
src_info: None,
label: Label {
text: Some("You're using a list here".into()),
span: *first_spread_location,
},
}],
},
ParseErrorType::ListSpreadFollowedByElements => ParseErrorDetails {
text: [
"Lists are immutable and singly-linked, so to append items to them",
"all the elements of a list would need to be copied into a new list.",
"This would be slow, so there is no built-in syntax for it.",
"",
]
.join("\n"),
hint: Some(
"Prepend items to the list and then reverse it once you are done.".into(),
),
label_text: "I wasn't expecting elements after this".into(),
extra_labels: vec![],
},
ParseErrorType::ListPatternSpreadFollowedByElements => ParseErrorDetails {
text: [
"Lists are immutable and singly-linked, so to match on the end",
"of a list would require the whole list to be traversed. This",
"would be slow, so there is no built-in syntax for it. Pattern",
"match on the start of the list instead.",
]
.join("\n"),
hint: None,
label_text: "I wasn't expecting elements after this".into(),
extra_labels: vec![],
},
ParseErrorType::UnexpectedReservedWord => ParseErrorDetails {
text: "".into(),
hint: Some("I was expecting to see a name here.".into()),
label_text: "This is a reserved word".into(),
extra_labels: vec![],
},
ParseErrorType::UnexpectedLabel => ParseErrorDetails {
text: "Please remove the argument label.".into(),
hint: None,
label_text: "Argument labels are not allowed for anonymous functions".into(),
extra_labels: vec![],
},
ParseErrorType::UnexpectedToken {
token,
expected,
hint,
} => {
let found = match token {
Token::Int { .. } => "an Int".to_string(),
Token::Float { .. } => "a Float".to_string(),
Token::String { .. } => "a String".to_string(),
Token::CommentDoc { .. } => "a comment".to_string(),
Token::DiscardName { .. } => "a discard name".to_string(),
Token::Name { .. } | Token::UpName { .. } => "a name".to_string(),
_ if token.is_reserved_word() => format!("the keyword {token}"),
Token::LeftParen
| Token::RightParen
| Token::LeftSquare
| Token::RightSquare
| Token::LeftBrace
| Token::RightBrace
| Token::Plus
| Token::Minus
| Token::Star
| Token::Slash
| Token::Less
| Token::Greater
| Token::LessEqual
| Token::GreaterEqual
| Token::Percent
| Token::PlusDot
| Token::MinusDot
| Token::StarDot
| Token::SlashDot
| Token::LessDot
| Token::GreaterDot
| Token::LessEqualDot
| Token::GreaterEqualDot
| Token::LtGt
| Token::Colon
| Token::Comma
| Token::Hash
| Token::Bang
| Token::Equal
| Token::EqualEqual
| Token::NotEqual
| Token::Vbar
| Token::VbarVbar
| Token::AmperAmper
| Token::LtLt
| Token::GtGt
| Token::Pipe
| Token::Dot
| Token::RArrow
| Token::LArrow
| Token::DotDot
| Token::At
| Token::EndOfFile
| Token::CommentNormal
| Token::CommentModule
| Token::NewLine
| Token::As
| Token::Assert
| Token::Auto
| Token::Case
| Token::Const
| Token::Delegate
| Token::Derive
| Token::Echo
| Token::Else
| Token::Fn
| Token::If
| Token::Implement
| Token::Import
| Token::Let
| Token::Macro
| Token::Opaque
| Token::Panic
| Token::Pub
| Token::Test
| Token::Todo
| Token::Type
| Token::Use => token.to_string(),
};
let messages = std::iter::once(format!("Found {found}, expected one of: "))
.chain(expected.iter().map(|s| format!("- {s}")));
let messages = match hint {
Some(hint_text) => messages
.chain(std::iter::once(format!("Hint: {hint_text}")))
.collect_vec(),
_ => messages.collect(),
};
ParseErrorDetails {
text: messages.join("\n"),
hint: None,
label_text: "I was not expecting this".into(),
extra_labels: vec![],
}
}
ParseErrorType::ConcatPatternVariableLeftHandSide => ParseErrorDetails {
text: [
"We can't tell what size this prefix should be so we don't know",
"how to handle this pattern.",
"",
"If you want to match one character consider using `pop_grapheme`",
"from the stdlib's `gleam/string` module.",
]
.join("\n"),
hint: None,
label_text: "This must be a string literal".into(),
extra_labels: vec![],
},
ParseErrorType::UnexpectedFunction => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "Functions can only be called within other functions".into(),
extra_labels: vec![],
},
ParseErrorType::ListSpreadWithoutTail => ParseErrorDetails {
text: "If a list expression has a spread then a tail must also be given.".into(),
hint: None,
label_text: "I was expecting a value after this spread".into(),
extra_labels: vec![],
},
ParseErrorType::UnknownAttribute => ParseErrorDetails {
text: "".into(),
hint: Some("Try `deprecated`, `external` or `target` instead.".into()),
label_text: "I don't recognise this attribute".into(),
extra_labels: vec![],
},
ParseErrorType::DuplicateAttribute => ParseErrorDetails {
text: "This attribute has already been given.".into(),
hint: None,
label_text: "Duplicate attribute".into(),
extra_labels: vec![],
},
ParseErrorType::UnknownTarget => ParseErrorDetails {
text: "Try `erlang`, `javascript`.".into(),
hint: None,
label_text: "I don't recognise this target".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedFunctionBody => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "This function does not have a body".into(),
extra_labels: vec![],
},
ParseErrorType::RedundantInternalAttribute => ParseErrorDetails {
text: "Only a public definition can be annotated as internal.".into(),
hint: Some("Remove the `@internal` annotation.".into()),
label_text: "Redundant internal attribute".into(),
extra_labels: vec![],
},
ParseErrorType::InvalidModuleTypePattern => ParseErrorDetails {
text: [
"I'm expecting a pattern here",
"Hint: A pattern can be a constructor name, a literal value",
"or a variable to bind a value to, etc.",
"See: https://tour.gleam.run/flow-control/case-expressions/",
]
.join("\n"),
hint: None,
label_text: "Invalid pattern".into(),
extra_labels: vec![],
},
ParseErrorType::ExpectedRecordConstructor {
name,
public,
opaque,
field,
field_type,
} => {
let (accessor, opaque) = match *public {
true if *opaque => ("pub ", "opaque "),
true => ("pub ", ""),
false => ("", ""),
};
let mut annotation = EcoString::new();
match field_type {
Some(t) => t.print(&mut annotation),
None => annotation.push_str("Type"),
};
ParseErrorDetails {
text: [
"Each custom type variant must have a constructor:\n".into(),
format!("{accessor}{opaque}type {name} {{"),
format!(" {name}("),
format!(" {field}: {annotation},"),
" )".into(),
"}".into(),
]
.join("\n"),
hint: None,
label_text: "I was not expecting this".into(),
extra_labels: vec![],
}
}
ParseErrorType::CallInClauseGuard => ParseErrorDetails {
text: "Functions cannot be called in clause guards.".into(),
hint: None,
label_text: "Unsupported expression".into(),
extra_labels: vec![],
},
ParseErrorType::IfExpression => ParseErrorDetails {
text: [
"If you want to write a conditional expression you can use a `case`:",
"",
" case condition {",
" True -> todo",
" False -> todo",
" }",
"",
"See: https://tour.gleam.run/flow-control/case-expressions/",
]
.join("\n"),
hint: None,
label_text: "Gleam doesn't have if expressions".into(),
extra_labels: vec![],
},
ParseErrorType::ConstantRecordConstructorNoArguments => ParseErrorDetails {
text: "A record must be passed arguments when constructed.".into(),
hint: None,
label_text: "I was expecting arguments here".into(),
extra_labels: vec![],
},
ParseErrorType::TypeDefinitionNoArguments => ParseErrorDetails {
text: "A generic type must have at least a generic parameter.".into(),
hint: Some("If a type is not generic you should omit the `()`.".into()),
label_text: "I was expecting generic parameters here".into(),
extra_labels: vec![],
},
ParseErrorType::UnknownAttributeRecordVariant => ParseErrorDetails {
text: "".into(),
hint: Some("Did you mean `@deprecated`?".into()),
label_text: "This attribute cannot be used on a variant.".into(),
extra_labels: vec![],
},
ParseErrorType::IncorrectImportModuleSeparator { module, item } => ParseErrorDetails {
text: [
"Perhaps you meant one of:".into(),
"".into(),
format!(" import {module}/{item}"),
format!(" import {module}.{{item}}"),
]
.join("\n"),
hint: None,
label_text: "I was expecting either `/` or `.{` here.".into(),
extra_labels: vec![],
},
ParseErrorType::EmptyGuardBlock => ParseErrorDetails {
text: "".into(),
hint: None,
label_text: "A clause guard block cannot be empty".into(),
extra_labels: vec![],
},
ParseErrorType::ConstantInsideFunction => ParseErrorDetails {
text: wrap(
"All variables are immutable in Gleam, so constants inside \
functions are not necessary.",
),
hint: Some(
"Either move this into the global scope or use `let` binding instead.".into(),
),
label_text: "Constants are not allowed inside functions".into(),
extra_labels: vec![],
},
ParseErrorType::FunctionDefinitionAngleGenerics => ParseErrorDetails {
text: "\
Generic function type variables do not need to be predeclared like they
would be in some other languages, instead they are written with lowercase
names.
fn example(argument: generic) -> generic
See: https://tour.gleam.run/functions/generic-functions/"
.into(),
hint: None,
label_text: "I was expecting `(` here.".into(),
extra_labels: vec![],
},
ParseErrorType::TypeUsageAngleGenerics {
module,
name,
arguments,
} => {
let type_arguments = arguments
.iter()
.map(|argument| {
let mut argument_string = EcoString::new();
argument.print(&mut argument_string);
argument_string
})
.join(", ");
let replacement_type = match module {
Some(module) => format!("{module}.{name}({type_arguments})"),
None => format!("{name}({type_arguments})"),
};
ParseErrorDetails {
text: format!(
"\
Type parameters use lowercase names and are surrounded by parentheses.
{replacement_type}
See: https://tour.gleam.run/data-types/generic-custom-types/"
),
hint: None,
label_text: "I was expecting `(` here.".into(),
extra_labels: vec![],
}
}
ParseErrorType::TypeDefinitionAngleGenerics { name, arguments } => {
let comma_separated_arguments = arguments.join(", ");
ParseErrorDetails {
text: format!(
"\
Type parameters use lowercase names and are surrounded by parentheses.
type {name}({comma_separated_arguments}) {{
See: https://tour.gleam.run/data-types/generic-custom-types/"
),
hint: None,
label_text: "I was expecting `(` here.".into(),
extra_labels: vec![],
}
}
}
}
}
impl LexicalError {
pub fn to_parse_error_info(&self) -> (&'static str, Vec<String>) {
match &self.error {
LexicalErrorType::BadStringEscape => (
"I don't understand this escape code",
vec![
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/parse/extra.rs | compiler-core/src/parse/extra.rs | use std::cmp::Ordering;
use ecow::EcoString;
use crate::ast::SrcSpan;
#[derive(Debug, PartialEq, Eq, Default)]
pub struct ModuleExtra {
pub module_comments: Vec<SrcSpan>,
pub doc_comments: Vec<SrcSpan>,
pub comments: Vec<SrcSpan>,
pub empty_lines: Vec<u32>,
pub new_lines: Vec<u32>,
pub trailing_commas: Vec<u32>,
}
impl ModuleExtra {
pub fn new() -> Self {
Default::default()
}
/// Detects if a byte index is in a comment context
pub fn is_within_comment(&self, byte_index: u32) -> bool {
let cmp = |span: &SrcSpan| {
if byte_index < span.start {
Ordering::Greater
} else if byte_index > span.end {
Ordering::Less
} else {
Ordering::Equal
}
};
self.comments.binary_search_by(cmp).is_ok()
|| self.doc_comments.binary_search_by(cmp).is_ok()
|| self.module_comments.binary_search_by(cmp).is_ok()
}
pub(crate) fn has_comment_between(&self, start: u32, end: u32) -> bool {
self.first_comment_between(start, end).is_some()
}
pub fn first_comment_between(&self, start: u32, end: u32) -> Option<SrcSpan> {
self.comments
.binary_search_by(|comment| {
if comment.end < start {
Ordering::Less
} else if comment.start > end {
Ordering::Greater
} else {
Ordering::Equal
}
})
.ok()
.and_then(|index| self.comments.get(index).copied())
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct Comment<'a> {
pub start: u32,
pub content: &'a str,
}
impl<'a> From<(&SrcSpan, &'a EcoString)> for Comment<'a> {
fn from(value: (&SrcSpan, &'a EcoString)) -> Self {
Self::from((value.0, value.1.as_str()))
}
}
impl<'a> From<(&SrcSpan, &'a str)> for Comment<'a> {
fn from(src: (&SrcSpan, &'a str)) -> Comment<'a> {
let start = src.0.start;
let end = src.0.end as usize;
Comment {
start,
content: src
.1
.get(start as usize..end)
.expect("From span to comment"),
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/parse/token.rs | compiler-core/src/parse/token.rs | use num_bigint::BigInt;
use std::fmt;
use ecow::EcoString;
use crate::parse::LiteralFloatValue;
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Token {
Name {
name: EcoString,
},
UpName {
name: EcoString,
},
DiscardName {
name: EcoString,
},
Int {
value: EcoString,
int_value: BigInt,
},
Float {
value: EcoString,
float_value: LiteralFloatValue,
},
String {
value: EcoString,
},
CommentDoc {
content: EcoString,
},
// Groupings
LeftParen, // (
RightParen, // )
LeftSquare, // [
RightSquare, // ]
LeftBrace, // {
RightBrace, // }
// Int Operators
Plus,
Minus,
Star,
Slash,
Less,
Greater,
LessEqual,
GreaterEqual,
Percent,
// Float Operators
PlusDot, // '+.'
MinusDot, // '-.'
StarDot, // '*.'
SlashDot, // '/.'
LessDot, // '<.'
GreaterDot, // '>.'
LessEqualDot, // '<=.'
GreaterEqualDot, // '>=.'
// String Operators
LtGt, // '<>'
// Other Punctuation
Colon,
Comma,
Hash, // '#'
Bang, // '!'
Equal,
EqualEqual, // '=='
NotEqual, // '!='
Vbar, // '|'
VbarVbar, // '||'
AmperAmper, // '&&'
LtLt, // '<<'
GtGt, // '>>'
Pipe, // '|>'
Dot, // '.'
RArrow, // '->'
LArrow, // '<-'
DotDot, // '..'
At, // '@'
EndOfFile,
// Extra
CommentNormal,
CommentModule,
NewLine,
// Keywords (alphabetically):
As,
Assert,
Auto,
Case,
Const,
Delegate,
Derive,
Echo,
Else,
Fn,
If,
Implement,
Import,
Let,
Macro,
Opaque,
Panic,
Pub,
Test,
Todo,
Type,
Use,
}
impl Token {
pub fn guard_precedence(&self) -> Option<u8> {
match self {
Self::VbarVbar => Some(1),
Self::AmperAmper => Some(2),
Self::EqualEqual | Self::NotEqual => Some(3),
Self::Less
| Self::LessEqual
| Self::LessDot
| Self::LessEqualDot
| Self::GreaterEqual
| Self::Greater
| Self::GreaterEqualDot
| Self::GreaterDot => Some(4),
Self::Plus | Self::PlusDot | Self::Minus | Self::MinusDot => Some(5),
Self::Star | Self::StarDot | Self::Slash | Self::SlashDot | Self::Percent => Some(6),
Self::Name { .. }
| Self::UpName { .. }
| Self::DiscardName { .. }
| Self::Int { .. }
| Self::Float { .. }
| Self::String { .. }
| Self::CommentDoc { .. }
| Self::LeftParen
| Self::RightParen
| Self::LeftSquare
| Self::RightSquare
| Self::LeftBrace
| Self::RightBrace
| Self::LtGt
| Self::Colon
| Self::Comma
| Self::Hash
| Self::Bang
| Self::Equal
| Self::Vbar
| Self::LtLt
| Self::GtGt
| Self::Pipe
| Self::Dot
| Self::RArrow
| Self::LArrow
| Self::DotDot
| Self::At
| Self::EndOfFile
| Self::CommentNormal
| Self::CommentModule
| Self::NewLine
| Self::As
| Self::Assert
| Self::Auto
| Self::Case
| Self::Const
| Self::Delegate
| Self::Derive
| Self::Echo
| Self::Else
| Self::Fn
| Self::If
| Self::Implement
| Self::Import
| Self::Let
| Self::Macro
| Self::Opaque
| Self::Panic
| Self::Pub
| Self::Test
| Self::Todo
| Self::Type
| Self::Use => None,
}
}
pub fn is_reserved_word(&self) -> bool {
match self {
Token::As
| Token::Assert
| Token::Case
| Token::Const
| Token::Fn
| Token::If
| Token::Import
| Token::Let
| Token::Opaque
| Token::Pub
| Token::Todo
| Token::Type
| Token::Use
| Token::Auto
| Token::Delegate
| Token::Derive
| Token::Echo
| Token::Else
| Token::Implement
| Token::Macro
| Token::Panic
| Token::Test => true,
Token::Name { .. }
| Token::UpName { .. }
| Token::DiscardName { .. }
| Token::Int { .. }
| Token::Float { .. }
| Token::String { .. }
| Token::CommentDoc { .. }
| Token::LeftParen
| Token::RightParen
| Token::LeftSquare
| Token::RightSquare
| Token::LeftBrace
| Token::RightBrace
| Token::Plus
| Token::Minus
| Token::Star
| Token::Slash
| Token::Less
| Token::Greater
| Token::LessEqual
| Token::GreaterEqual
| Token::Percent
| Token::PlusDot
| Token::MinusDot
| Token::StarDot
| Token::SlashDot
| Token::LessDot
| Token::GreaterDot
| Token::LessEqualDot
| Token::GreaterEqualDot
| Token::LtGt
| Token::Colon
| Token::Comma
| Token::Hash
| Token::Bang
| Token::Equal
| Token::EqualEqual
| Token::NotEqual
| Token::Vbar
| Token::VbarVbar
| Token::AmperAmper
| Token::LtLt
| Token::GtGt
| Token::Pipe
| Token::Dot
| Token::RArrow
| Token::LArrow
| Token::DotDot
| Token::At
| Token::EndOfFile
| Token::CommentNormal
| Token::CommentModule
| Token::NewLine => false,
}
}
}
impl fmt::Display for Token {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let s = match self {
Token::Name { name } | Token::UpName { name } | Token::DiscardName { name } => {
name.as_str()
}
Token::Int {
value,
int_value: _,
}
| Token::Float {
value,
float_value: _,
}
| Token::String { value } => value.as_str(),
Token::AmperAmper => "&&",
Token::As => "as",
Token::Assert => "assert",
Token::At => "@",
Token::Auto => "auto",
Token::Bang => "!",
Token::Case => "case",
Token::Colon => ":",
Token::Comma => ",",
Token::CommentDoc { .. } => "///",
Token::CommentModule => "////",
Token::CommentNormal => "//",
Token::Const => "const",
Token::Delegate => "delegate",
Token::Derive => "derive",
Token::Dot => ".",
Token::DotDot => "..",
Token::Echo => "echo",
Token::Else => "else",
Token::NewLine => "NEWLINE",
Token::EndOfFile => "EOF",
Token::Equal => "=",
Token::EqualEqual => "==",
Token::Fn => "fn",
Token::Greater => ">",
Token::GreaterDot => ">.",
Token::GreaterEqual => ">=",
Token::GreaterEqualDot => ">=.",
Token::GtGt => ">>",
Token::Hash => "#",
Token::If => "if",
Token::Implement => "implement",
Token::Import => "import",
Token::LArrow => "<-",
Token::LeftBrace => "{",
Token::LeftParen => "(",
Token::LeftSquare => "[",
Token::Less => "<",
Token::LessDot => "<.",
Token::LessEqual => "<=",
Token::LessEqualDot => "<=.",
Token::Let => "let",
Token::LtGt => "<>",
Token::LtLt => "<<",
Token::Macro => "macro",
Token::Minus => "-",
Token::MinusDot => "-.",
Token::NotEqual => "!=",
Token::Opaque => "opaque",
Token::Panic => "panic",
Token::Percent => "%",
Token::Pipe => "|>",
Token::Plus => "+",
Token::PlusDot => "+.",
Token::Pub => "pub",
Token::RArrow => "->",
Token::RightBrace => "}",
Token::RightParen => ")",
Token::RightSquare => "]",
Token::Slash => "/",
Token::SlashDot => "/.",
Token::Star => "*",
Token::StarDot => "*.",
Token::Test => "test",
Token::Todo => "todo",
Token::Type => "type",
Token::Use => "use",
Token::Vbar => "|",
Token::VbarVbar => "||",
};
write!(f, "`{s}`")
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/metadata/tests.rs | compiler-core/src/metadata/tests.rs | use hexpm::version::Version;
use rand::Rng;
use type_::{AccessorsMap, FieldMap, RecordAccessor};
use super::*;
use crate::{
analyse::Inferred,
ast::{
BitArrayOption, BitArraySegment, CallArg, Constant, Publicity, SrcSpan, TypedConstant,
TypedConstantBitArraySegmentOption,
},
build::Origin,
line_numbers::LineNumbers,
parse::LiteralFloatValue,
reference::{Reference, ReferenceKind},
type_::{
self, Deprecation, ModuleInterface, Opaque, References, Type, TypeAliasConstructor,
TypeConstructor, TypeValueConstructor, TypeValueConstructorField, TypeVariantConstructors,
ValueConstructor, ValueConstructorVariant,
expression::{Implementations, Purity},
prelude,
},
uid::UniqueIdGenerator,
};
use std::{collections::HashMap, io::BufReader, sync::Arc};
use pretty_assertions::assert_eq;
fn roundtrip(input: &ModuleInterface) -> ModuleInterface {
let buffer = ModuleEncoder::new(input).encode().unwrap();
let ids = UniqueIdGenerator::new();
ModuleDecoder::new(ids)
.read(BufReader::new(buffer.as_slice()))
.unwrap()
}
fn constant_module(constant: TypedConstant) -> ModuleInterface {
ModuleInterface {
warnings: vec![],
is_internal: true,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::ModuleConstant {
documentation: Some("Some documentation".into()),
literal: constant,
location: SrcSpan::default(),
module: "one/two".into(),
implementations: Implementations {
gleam: true,
uses_erlang_externals: false,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
name: "one".into(),
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
}
}
fn bit_array_segment_option_module(option: TypedConstantBitArraySegmentOption) -> ModuleInterface {
constant_module(Constant::BitArray {
location: Default::default(),
segments: vec![BitArraySegment {
location: Default::default(),
value: Box::new(Constant::Int {
location: Default::default(),
value: "1".into(),
int_value: 1.into(),
}),
options: vec![option],
type_: type_::int(),
}],
})
}
#[test]
fn empty_module() {
let module = ModuleInterface {
warnings: vec![],
is_internal: true,
package: "some_package".into(),
origin: Origin::Src,
name: "one/two".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn with_line_numbers() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "one/two".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(
"const a = 1
const b = 2
const c = 3",
),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_with_private_type() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a/b".into(),
types: [(
"ListIntType".into(),
TypeConstructor {
type_: type_::list(type_::int()),
publicity: Publicity::Private,
origin: Default::default(),
module: "the/module".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_with_app_type() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a/b".into(),
types: [(
"ListIntType".into(),
TypeConstructor {
type_: type_::list(type_::int()),
publicity: Publicity::Public,
origin: Default::default(),
module: "the/module".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_with_fn_type() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a/b".into(),
types: [(
"FnType".into(),
TypeConstructor {
type_: type_::fn_(vec![type_::nil(), type_::float()], type_::int()),
publicity: Publicity::Public,
origin: Default::default(),
module: "the/module".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_with_tuple_type() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a/b".into(),
types: [(
"TupleType".into(),
TypeConstructor {
type_: type_::tuple(vec![type_::nil(), type_::float(), type_::int()]),
publicity: Publicity::Public,
origin: Default::default(),
module: "the/module".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_with_generic_type() {
let t0 = type_::generic_var(0);
let t1 = type_::generic_var(1);
let t7 = type_::generic_var(7);
let t8 = type_::generic_var(8);
fn make(t1: Arc<Type>, t2: Arc<Type>) -> ModuleInterface {
ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a/b".into(),
types: [(
"TupleType".into(),
TypeConstructor {
type_: type_::tuple(vec![t1.clone(), t1.clone(), t2.clone()]),
publicity: Publicity::Public,
origin: Default::default(),
module: "the/module".into(),
parameters: vec![t1, t2],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
}
}
assert_eq!(roundtrip(&make(t7, t8)), make(t0, t1));
}
#[test]
fn module_with_type_links() {
let linked_type = type_::link(type_::int());
let type_ = type_::int();
fn make(type_: Arc<Type>) -> ModuleInterface {
ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: [(
"SomeType".into(),
TypeConstructor {
type_,
publicity: Publicity::Public,
origin: Default::default(),
module: "a".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
}
}
assert_eq!(roundtrip(&make(linked_type)), make(type_));
}
#[test]
fn module_with_type_constructor_documentation() {
let linked_type = type_::link(type_::int());
let type_ = type_::int();
fn make(type_: Arc<Type>) -> ModuleInterface {
ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: [(
"SomeType".into(),
TypeConstructor {
type_,
publicity: Publicity::Public,
origin: Default::default(),
module: "a".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: Some("type documentation".into()),
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
}
}
assert_eq!(roundtrip(&make(linked_type)), make(type_));
}
#[test]
fn module_with_type_constructor_origin() {
let linked_type = type_::link(type_::int());
let type_ = type_::int();
fn make(type_: Arc<Type>) -> ModuleInterface {
ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: [(
"SomeType".into(),
TypeConstructor {
type_,
publicity: Publicity::Public,
origin: SrcSpan {
start: 535,
end: 543,
},
module: "a".into(),
parameters: vec![],
deprecation: Deprecation::NotDeprecated,
documentation: None,
},
)]
.into(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
}
}
assert_eq!(roundtrip(&make(linked_type)), make(type_));
}
#[test]
fn module_type_to_constructors_mapping() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: [(
"SomeType".into(),
TypeVariantConstructors {
type_parameters_ids: vec![0, 1, 2],
variants: vec![TypeValueConstructor {
name: "One".into(),
parameters: vec![],
documentation: Some("Some documentation".into()),
}],
opaque: Opaque::NotOpaque,
},
)]
.into(),
accessors: HashMap::new(),
values: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_fn_value() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::ModuleFn {
documentation: Some("wobble!".into()),
name: "one".into(),
field_map: None,
module: "a".into(),
arity: 5,
location: SrcSpan {
start: 535,
end: 1100,
},
external_erlang: None,
external_javascript: None,
implementations: Implementations {
gleam: true,
uses_erlang_externals: false,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
purity: Purity::Pure,
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn deprecated_module_fn_value() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::Deprecated {
message: "wibble wobble".into(),
},
type_: type_::int(),
variant: ValueConstructorVariant::ModuleFn {
documentation: Some("wobble!".into()),
name: "one".into(),
field_map: None,
module: "a".into(),
arity: 5,
location: SrcSpan {
start: 535,
end: 1100,
},
external_erlang: None,
external_javascript: None,
implementations: Implementations {
gleam: true,
uses_erlang_externals: false,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
purity: Purity::Pure,
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn private_module_fn_value() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Private,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::ModuleFn {
documentation: Some("wobble!".into()),
name: "one".into(),
field_map: None,
module: "a".into(),
arity: 5,
location: SrcSpan {
start: 535,
end: 1100,
},
external_erlang: None,
external_javascript: None,
implementations: Implementations {
gleam: true,
uses_erlang_externals: false,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
purity: Purity::Pure,
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
// https://github.com/gleam-lang/gleam/commit/c8f3bd0ddbf61c27ea35f37297058ecca7515f6c
#[test]
fn module_fn_value_regression() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a/b/c".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::ModuleFn {
documentation: Some("wabble!".into()),
name: "one".into(),
field_map: None,
module: "a".into(),
arity: 5,
location: SrcSpan {
start: 52,
end: 1100,
},
external_erlang: None,
external_javascript: None,
implementations: Implementations {
gleam: true,
uses_erlang_externals: false,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
purity: Purity::TrustedPure,
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn module_fn_value_with_field_map() {
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::ModuleFn {
documentation: Some("wubble!".into()),
name: "one".into(),
field_map: Some(FieldMap {
arity: 20,
fields: [("ok".into(), 5), ("ko".into(), 7)].into(),
}),
external_erlang: None,
external_javascript: None,
module: "a".into(),
arity: 5,
location: SrcSpan { start: 2, end: 11 },
implementations: Implementations {
gleam: true,
uses_erlang_externals: false,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
purity: Purity::Pure,
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn record_value() {
let mut random = rand::rng();
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::Record {
documentation: Some("webble!".into()),
name: "one".into(),
module: "themodule".into(),
field_map: None,
arity: random.random(),
variants_count: random.random(),
location: SrcSpan {
start: random.random(),
end: random.random(),
},
variant_index: random.random(),
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn record_value_with_field_map() {
let mut random = rand::rng();
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
accessors: HashMap::new(),
values: [(
"one".into(),
ValueConstructor {
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
type_: type_::int(),
variant: ValueConstructorVariant::Record {
documentation: Some("wybble!".into()),
module: "themodule".into(),
name: "one".into(),
field_map: Some(FieldMap {
arity: random.random(),
fields: [
("ok".into(), random.random()),
("ko".into(), random.random()),
]
.into(),
}),
arity: random.random(),
variants_count: random.random(),
variant_index: random.random(),
location: SrcSpan {
start: random.random(),
end: random.random(),
},
},
},
)]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn accessors() {
let accessors1 = [
(
"a".into(),
RecordAccessor {
index: 6,
label: "siiixxx".into(),
type_: type_::nil(),
documentation: Some("Here is some documentation".into()),
},
),
(
"a".into(),
RecordAccessor {
index: 5,
label: "fiveee".into(),
type_: type_::float(),
documentation: None,
},
),
];
let accessors2 = [(
"a".into(),
RecordAccessor {
index: 1,
label: "ok".into(),
type_: type_::float(),
documentation: Some("Documentation for the ok field".into()),
},
)];
let module = ModuleInterface {
warnings: vec![],
is_internal: false,
package: "some_package".into(),
origin: Origin::Src,
name: "a".into(),
types: HashMap::new(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: [
(
"one".into(),
AccessorsMap {
publicity: Publicity::Public,
type_: type_::int(),
shared_accessors: accessors1.clone().into(),
variant_specific_accessors: vec![accessors1.into()],
variant_positional_accessors: vec![vec![type_::int(), type_::float()]],
},
),
(
"two".into(),
AccessorsMap {
publicity: Publicity::Public,
type_: type_::int(),
shared_accessors: accessors2.clone().into(),
variant_specific_accessors: vec![accessors2.into()],
variant_positional_accessors: vec![vec![]],
},
),
]
.into(),
line_numbers: LineNumbers::new(""),
src_path: "some_path".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
assert_eq!(roundtrip(&module), module);
}
#[test]
fn private_accessors() {
let accessors1 = [
(
"a".into(),
RecordAccessor {
index: 6,
label: "siiixxx".into(),
type_: type_::nil(),
documentation: None,
},
),
(
"a".into(),
RecordAccessor {
index: 5,
label: "fiveee".into(),
type_: type_::float(),
documentation: None,
},
),
];
let accessors2 = [(
"a".into(),
RecordAccessor {
index: 1,
label: "ok".into(),
type_: type_::float(),
documentation: None,
},
)];
let module = ModuleInterface {
warnings: vec![],
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/metadata/module_decoder.rs | compiler-core/src/metadata/module_decoder.rs | #![allow(clippy::unnecessary_wraps)] // Needed for macro
use capnp::{text, text_list};
use ecow::EcoString;
use itertools::Itertools;
use crate::{
Result,
analyse::Inferred,
ast::{
BitArrayOption, BitArraySegment, CallArg, Constant, Publicity, SrcSpan, TypedConstant,
TypedConstantBitArraySegment, TypedConstantBitArraySegmentOption,
},
build::Origin,
line_numbers::{Character, LineNumbers},
parse::LiteralFloatValue,
reference::{Reference, ReferenceKind, ReferenceMap},
schema_capnp::{self as schema, *},
type_::{
self, AccessorsMap, Deprecation, FieldMap, ModuleInterface, Opaque, RecordAccessor,
References, Type, TypeAliasConstructor, TypeConstructor, TypeValueConstructor,
TypeValueConstructorField, TypeVariantConstructors, ValueConstructor,
ValueConstructorVariant,
expression::{Implementations, Purity},
},
uid::UniqueIdGenerator,
};
use std::{collections::HashMap, collections::HashSet, io::BufRead, sync::Arc};
macro_rules! read_vec {
($reader:expr, $self:expr, $method:ident) => {{
let reader = $reader;
let mut vec = Vec::with_capacity(reader.len() as usize);
for reader in reader.into_iter() {
let value = $self.$method(&reader)?;
vec.push(value);
}
vec
}};
}
macro_rules! read_hashmap {
($reader:expr, $self:expr, $method:ident) => {{
let reader = $reader;
let mut map = HashMap::with_capacity(reader.len() as usize);
for prop in reader.into_iter() {
let name = $self.string(prop.get_key()?)?;
let values = $self.$method(&prop.get_value()?.into())?;
let _ = map.insert(name, values);
}
map
}};
}
#[derive(Debug)]
pub struct ModuleDecoder {
ids: UniqueIdGenerator,
type_var_id_map: HashMap<u64, u64>,
}
impl ModuleDecoder {
pub fn new(ids: UniqueIdGenerator) -> Self {
Self {
ids,
type_var_id_map: Default::default(),
}
}
pub fn read(&mut self, reader: impl BufRead) -> Result<ModuleInterface> {
let message_reader =
capnp::serialize_packed::read_message(reader, capnp::message::ReaderOptions::new())?;
let reader = message_reader.get_root::<module::Reader<'_>>()?;
Ok(ModuleInterface {
name: self.string(reader.get_name()?)?,
package: self.string(reader.get_package()?)?,
is_internal: reader.get_is_internal(),
origin: Origin::Src,
values: read_hashmap!(reader.get_values()?, self, value_constructor),
types: read_hashmap!(reader.get_types()?, self, type_constructor),
types_value_constructors: read_hashmap!(
reader.get_types_constructors()?,
self,
type_variants_constructors
),
accessors: read_hashmap!(reader.get_accessors()?, self, accessors_map),
line_numbers: self.line_numbers(&reader.get_line_numbers()?)?,
src_path: self.str(reader.get_src_path()?)?.into(),
warnings: vec![],
minimum_required_version: self.version(&reader.get_required_version()?),
type_aliases: read_hashmap!(reader.get_type_aliases()?, self, type_alias_constructor),
documentation: self.string_list(reader.get_documentation()?)?,
contains_echo: reader.get_contains_echo(),
references: self.references(reader.get_references()?)?,
inline_functions: HashMap::new(),
})
}
fn string(&self, reader: text::Reader<'_>) -> Result<EcoString> {
self.str(reader).map(|str| str.into())
}
fn string_list(&self, reader: text_list::Reader<'_>) -> Result<Vec<EcoString>> {
let mut vec = Vec::with_capacity(reader.len() as usize);
for reader in reader.into_iter() {
vec.push(self.string(reader?)?);
}
Ok(vec)
}
fn str<'a>(&self, reader: text::Reader<'a>) -> Result<&'a str> {
reader
.to_str()
.map_err(|_| capnp::Error::failed("String contains non-utf8 characters".into()).into())
}
fn references(&self, reader: references::Reader<'_>) -> Result<References> {
Ok(References {
imported_modules: self.string_set(reader.get_imported_modules()?)?,
value_references: self.reference_map(reader.get_value_references()?)?,
type_references: self.reference_map(reader.get_type_references()?)?,
})
}
fn string_set(&self, reader: text_list::Reader<'_>) -> Result<HashSet<EcoString>> {
let mut set = HashSet::with_capacity(reader.len() as usize);
for reader in reader.into_iter() {
let _ = set.insert(self.string(reader?)?);
}
Ok(set)
}
fn reference_map(
&self,
reader: capnp::struct_list::Reader<'_, reference_map::Owned>,
) -> Result<ReferenceMap> {
let mut map = HashMap::with_capacity(reader.len() as usize);
for prop in reader.into_iter() {
let module = self.string(prop.get_module()?)?;
let name = self.string(prop.get_name()?)?;
let references = read_vec!(prop.get_references()?, self, reference);
let _ = map.insert((module, name), references);
}
Ok(map)
}
fn reference(&self, reader: &reference::Reader<'_>) -> Result<Reference> {
Ok(Reference {
location: self.src_span(&reader.get_location()?)?,
kind: self.reference_kind(&reader.get_kind()?)?,
})
}
fn reference_kind(&self, reader: &reference_kind::Reader<'_>) -> Result<ReferenceKind> {
use reference_kind::Which;
Ok(match reader.which()? {
Which::Qualified(_) => ReferenceKind::Qualified,
Which::Unqualified(_) => ReferenceKind::Unqualified,
Which::Import(_) => ReferenceKind::Import,
Which::Definition(_) => ReferenceKind::Definition,
Which::Alias(_) => ReferenceKind::Alias,
})
}
fn type_constructor(
&mut self,
reader: &type_constructor::Reader<'_>,
) -> Result<TypeConstructor> {
let type_ = self.type_(&reader.get_type()?)?;
let deprecation = reader.get_deprecated()?;
let deprecation = if deprecation.is_empty() {
Deprecation::NotDeprecated
} else {
Deprecation::Deprecated {
message: self.string(deprecation)?,
}
};
Ok(TypeConstructor {
publicity: self.publicity(reader.get_publicity()?)?,
origin: self.src_span(&reader.get_origin()?)?,
module: self.string(reader.get_module()?)?,
parameters: read_vec!(reader.get_parameters()?, self, type_),
type_,
deprecation,
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
})
}
fn type_alias_constructor(
&mut self,
reader: &type_alias_constructor::Reader<'_>,
) -> Result<TypeAliasConstructor> {
let type_ = self.type_(&reader.get_type()?)?;
let deprecation = reader.get_deprecation()?;
let deprecation = if deprecation.is_empty() {
Deprecation::NotDeprecated
} else {
Deprecation::Deprecated {
message: self.string(deprecation)?,
}
};
let parameters = read_vec!(&reader.get_parameters()?, self, type_);
Ok(TypeAliasConstructor {
publicity: self.publicity(reader.get_publicity()?)?,
origin: self.src_span(&reader.get_origin()?)?,
module: self.string(reader.get_module()?)?,
type_,
deprecation,
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
arity: reader.get_arity() as usize,
parameters,
})
}
fn type_(&mut self, reader: &schema::type_::Reader<'_>) -> Result<Arc<Type>> {
use schema::type_::Which;
match reader.which()? {
Which::App(reader) => self.type_app(&reader),
Which::Fn(reader) => self.type_fn(&reader),
Which::Tuple(reader) => self.type_tuple(&reader),
Which::Var(reader) => self.type_var(&reader),
}
}
fn type_app(&mut self, reader: &schema::type_::app::Reader<'_>) -> Result<Arc<Type>> {
let package = self.string(reader.get_package()?)?;
let module = self.string(reader.get_module()?)?;
let name = self.string(reader.get_name()?)?;
let arguments = read_vec!(&reader.get_parameters()?, self, type_);
let inferred_variant = self.inferred_variant(&reader.get_inferred_variant()?)?;
let publicity = self.publicity(reader.get_publicity()?)?;
Ok(Arc::new(Type::Named {
publicity,
package,
module,
name,
arguments,
inferred_variant,
}))
}
fn type_fn(&mut self, reader: &schema::type_::fn_::Reader<'_>) -> Result<Arc<Type>> {
let return_ = self.type_(&reader.get_return()?)?;
let arguments = read_vec!(&reader.get_arguments()?, self, type_);
Ok(Arc::new(Type::Fn { arguments, return_ }))
}
fn type_tuple(&mut self, reader: &schema::type_::tuple::Reader<'_>) -> Result<Arc<Type>> {
let elements = read_vec!(&reader.get_elements()?, self, type_);
Ok(Arc::new(Type::Tuple { elements }))
}
fn type_var(&mut self, reader: &schema::type_::var::Reader<'_>) -> Result<Arc<Type>> {
let serialized_id = reader.get_id();
let id = self.get_or_insert_type_var_id(serialized_id);
Ok(type_::generic_var(id))
}
fn get_or_insert_type_var_id(&mut self, id: u64) -> u64 {
match self.type_var_id_map.get(&id) {
Some(&id) => id,
None => {
let new_id = self.ids.next();
let _ = self.type_var_id_map.insert(id, new_id);
new_id
}
}
}
fn type_variants_constructors(
&mut self,
reader: &types_variant_constructors::Reader<'_>,
) -> Result<TypeVariantConstructors> {
let variants = reader
.get_variants()?
.iter()
.map(|r| self.type_value_constructor(&r))
.try_collect()?;
let type_parameters_ids = read_vec!(
reader.get_type_parameters_ids()?,
self,
type_variant_constructor_type_parameter_id
);
let opaque = if reader.get_opaque() {
Opaque::Opaque
} else {
Opaque::NotOpaque
};
Ok(TypeVariantConstructors {
variants,
type_parameters_ids,
opaque,
})
}
fn type_variant_constructor_type_parameter_id(&mut self, i: &u16) -> Result<u64> {
Ok(self.get_or_insert_type_var_id(*i as u64))
}
fn type_value_constructor(
&mut self,
reader: &type_value_constructor::Reader<'_>,
) -> Result<TypeValueConstructor> {
Ok(TypeValueConstructor {
name: self.string(reader.get_name()?)?,
parameters: read_vec!(
reader.get_parameters()?,
self,
type_value_constructor_parameter
),
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
})
}
fn type_value_constructor_parameter(
&mut self,
reader: &type_value_constructor_parameter::Reader<'_>,
) -> Result<TypeValueConstructorField> {
Ok(TypeValueConstructorField {
type_: self.type_(&reader.get_type()?)?,
label: self.optional_string(self.str(reader.get_label()?)?),
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
})
}
fn inferred_variant(&mut self, reader: &inferred_variant::Reader<'_>) -> Result<Option<u16>> {
use schema::inferred_variant::Which;
match reader.which()? {
Which::Unknown(_) => Ok(None),
Which::Inferred(variant) => Ok(Some(variant)),
}
}
fn value_constructor(
&mut self,
reader: &value_constructor::Reader<'_>,
) -> Result<ValueConstructor> {
let type_ = self.type_(&reader.get_type()?)?;
let variant = self.value_constructor_variant(&reader.get_variant()?)?;
let publicity = self.publicity(reader.get_publicity()?)?;
let deprecation = reader.get_deprecated()?;
let deprecation = if deprecation.is_empty() {
Deprecation::NotDeprecated
} else {
Deprecation::Deprecated {
message: self.string(deprecation)?,
}
};
Ok(ValueConstructor {
deprecation,
publicity,
type_,
variant,
})
}
fn publicity(&self, reader: publicity::Reader<'_>) -> Result<Publicity> {
match reader.which()? {
publicity::Which::Public(()) => Ok(Publicity::Public),
publicity::Which::Private(()) => Ok(Publicity::Private),
publicity::Which::Internal(reader) => match reader?.which()? {
option::Which::None(()) => Ok(Publicity::Internal {
attribute_location: None,
}),
option::Which::Some(reader) => Ok(Publicity::Internal {
attribute_location: Some(self.src_span(&reader?)?),
}),
},
}
}
fn constant(&mut self, reader: &constant::Reader<'_>) -> Result<TypedConstant> {
use constant::Which;
match reader.which()? {
Which::Int(reader) => Ok(self.constant_int(self.str(reader?)?)),
Which::Float(reader) => Ok(self.constant_float(self.str(reader?)?)),
Which::String(reader) => Ok(self.constant_string(self.str(reader?)?)),
Which::Tuple(reader) => self.constant_tuple(&reader),
Which::List(reader) => self.constant_list(&reader),
Which::Record(reader) => self.constant_record(&reader),
Which::BitArray(reader) => self.constant_bit_array(&reader?),
Which::Var(reader) => self.constant_var(&reader),
Which::StringConcatenation(reader) => self.constant_string_concatenation(&reader),
}
}
fn constant_int(&self, value: &str) -> TypedConstant {
Constant::Int {
location: Default::default(),
value: value.into(),
int_value: crate::parse::parse_int_value(value).expect("int value to parse as bigint"),
}
}
fn constant_float(&self, value: &str) -> TypedConstant {
Constant::Float {
location: Default::default(),
value: value.into(),
float_value: LiteralFloatValue::parse(value)
.expect("float value to parse as non-NaN f64"),
}
}
fn constant_string(&self, value: &str) -> TypedConstant {
Constant::String {
location: Default::default(),
value: value.into(),
}
}
fn constant_tuple(&mut self, reader: &constant::tuple::Reader<'_>) -> Result<TypedConstant> {
let type_ = self.type_(&reader.get_type()?)?;
Ok(Constant::Tuple {
location: Default::default(),
elements: read_vec!(reader.get_elements()?, self, constant),
type_,
})
}
fn constant_list(&mut self, reader: &constant::list::Reader<'_>) -> Result<TypedConstant> {
let type_ = self.type_(&reader.get_type()?)?;
Ok(Constant::List {
location: Default::default(),
elements: read_vec!(reader.get_elements()?, self, constant),
type_,
})
}
fn constant_record(&mut self, reader: &constant::record::Reader<'_>) -> Result<TypedConstant> {
let type_ = self.type_(&reader.get_type()?)?;
let tag = self.string(reader.get_tag()?)?;
let arguments = read_vec!(reader.get_args()?, self, constant_call_arg);
Ok(Constant::Record {
location: Default::default(),
module: Default::default(),
name: Default::default(),
arguments,
tag,
type_,
field_map: Inferred::Unknown,
record_constructor: None,
})
}
fn constant_call_arg(
&mut self,
reader: &constant::Reader<'_>,
) -> Result<CallArg<TypedConstant>> {
Ok(CallArg {
implicit: None,
label: Default::default(),
location: Default::default(),
value: self.constant(reader)?,
})
}
fn constant_bit_array(
&mut self,
reader: &capnp::struct_list::Reader<'_, bit_array_segment::Owned>,
) -> Result<TypedConstant> {
Ok(Constant::BitArray {
location: Default::default(),
segments: read_vec!(reader, self, bit_array_segment),
})
}
fn constant_var(&mut self, reader: &constant::var::Reader<'_>) -> Result<TypedConstant> {
let type_ = self.type_(&reader.get_type()?)?;
let module = self.optional_string(self.str(reader.get_module()?)?);
let name = reader.get_name()?;
let constructor = self.value_constructor(&reader.get_constructor()?)?;
Ok(Constant::Var {
location: Default::default(),
module: module.map(|module| (module, Default::default())),
name: self.string(name)?,
constructor: Some(Box::from(constructor)),
type_,
})
}
fn constant_string_concatenation(
&mut self,
reader: &constant::string_concatenation::Reader<'_>,
) -> Result<TypedConstant> {
Ok(Constant::StringConcatenation {
location: Default::default(),
left: Box::new(self.constant(&reader.get_left()?)?),
right: Box::new(self.constant(&reader.get_right()?)?),
})
}
fn bit_array_segment(
&mut self,
reader: &bit_array_segment::Reader<'_>,
) -> Result<TypedConstantBitArraySegment> {
Ok(BitArraySegment {
location: Default::default(),
type_: self.type_(&reader.get_type()?)?,
value: Box::new(self.constant(&reader.get_value()?)?),
options: read_vec!(reader.get_options()?, self, bit_array_segment_option),
})
}
fn bit_array_segment_option(
&mut self,
reader: &bit_array_segment_option::Reader<'_>,
) -> Result<TypedConstantBitArraySegmentOption> {
use bit_array_segment_option::Which;
Ok(match reader.which()? {
Which::Bytes(_) => BitArrayOption::Bytes {
location: Default::default(),
},
Which::Integer(_) => BitArrayOption::Int {
location: Default::default(),
},
Which::Float(_) => BitArrayOption::Float {
location: Default::default(),
},
Which::Bits(_) => BitArrayOption::Bits {
location: Default::default(),
},
Which::Utf8(_) => BitArrayOption::Utf8 {
location: Default::default(),
},
Which::Utf16(_) => BitArrayOption::Utf16 {
location: Default::default(),
},
Which::Utf32(_) => BitArrayOption::Utf32 {
location: Default::default(),
},
Which::Utf8Codepoint(_) => BitArrayOption::Utf8Codepoint {
location: Default::default(),
},
Which::Utf16Codepoint(_) => BitArrayOption::Utf16Codepoint {
location: Default::default(),
},
Which::Utf32Codepoint(_) => BitArrayOption::Utf32Codepoint {
location: Default::default(),
},
Which::Signed(_) => BitArrayOption::Signed {
location: Default::default(),
},
Which::Unsigned(_) => BitArrayOption::Unsigned {
location: Default::default(),
},
Which::Big(_) => BitArrayOption::Big {
location: Default::default(),
},
Which::Little(_) => BitArrayOption::Little {
location: Default::default(),
},
Which::Native(_) => BitArrayOption::Native {
location: Default::default(),
},
Which::Size(reader) => BitArrayOption::Size {
location: Default::default(),
short_form: reader.get_short_form(),
value: Box::new(self.constant(&reader.get_value()?)?),
},
Which::Unit(reader) => BitArrayOption::Unit {
location: Default::default(),
value: reader.get_value(),
},
})
}
fn value_constructor_variant(
&mut self,
reader: &value_constructor_variant::Reader<'_>,
) -> Result<ValueConstructorVariant> {
use value_constructor_variant::Which;
match reader.which()? {
Which::ModuleConstant(reader) => self.module_constant_variant(&reader),
Which::ModuleFn(reader) => self.module_fn_variant(&reader),
Which::Record(reader) => self.record(&reader),
}
}
fn module_constant_variant(
&mut self,
reader: &value_constructor_variant::module_constant::Reader<'_>,
) -> Result<ValueConstructorVariant> {
Ok(ValueConstructorVariant::ModuleConstant {
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
location: self.src_span(&reader.get_location()?)?,
literal: self.constant(&reader.get_literal()?)?,
module: self.string(reader.get_module()?)?,
name: self.string(reader.get_name()?)?,
implementations: self.implementations(reader.get_implementations()?),
})
}
fn optional_string(&self, str: &str) -> Option<EcoString> {
if str.is_empty() {
None
} else {
Some(str.into())
}
}
fn src_span(&self, reader: &src_span::Reader<'_>) -> Result<SrcSpan> {
Ok(SrcSpan {
start: reader.get_start(),
end: reader.get_end(),
})
}
fn module_fn_variant(
&self,
reader: &value_constructor_variant::module_fn::Reader<'_>,
) -> Result<ValueConstructorVariant> {
let purity = match reader.get_purity()?.which()? {
purity::Which::Pure(()) => Purity::Pure,
purity::Which::TrustedPure(()) => Purity::TrustedPure,
purity::Which::Impure(()) => Purity::Impure,
purity::Which::Unknown(()) => Purity::Unknown,
};
Ok(ValueConstructorVariant::ModuleFn {
name: self.string(reader.get_name()?)?,
module: self.string(reader.get_module()?)?,
arity: reader.get_arity() as usize,
field_map: self.field_map(&reader.get_field_map()?)?,
location: self.src_span(&reader.get_location()?)?,
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
implementations: self.implementations(reader.get_implementations()?),
external_erlang: self.optional_external(reader.get_external_erlang()?)?,
external_javascript: self.optional_external(reader.get_external_javascript()?)?,
purity,
})
}
fn implementations(&self, reader: implementations::Reader<'_>) -> Implementations {
Implementations {
gleam: reader.get_gleam(),
uses_erlang_externals: reader.get_uses_erlang_externals(),
uses_javascript_externals: reader.get_uses_javascript_externals(),
can_run_on_erlang: reader.get_can_run_on_erlang(),
can_run_on_javascript: reader.get_can_run_on_javascript(),
}
}
fn record(
&self,
reader: &value_constructor_variant::record::Reader<'_>,
) -> Result<ValueConstructorVariant> {
Ok(ValueConstructorVariant::Record {
name: self.string(reader.get_name()?)?,
module: self.string(reader.get_module()?)?,
arity: reader.get_arity(),
variants_count: reader.get_constructors_count(),
field_map: self.field_map(&reader.get_field_map()?)?,
location: self.src_span(&reader.get_location()?)?,
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
variant_index: reader.get_constructor_index(),
})
}
fn field_map(&self, reader: &option::Reader<'_, field_map::Owned>) -> Result<Option<FieldMap>> {
use option::Which;
Ok(match reader.which()? {
Which::None(_) => None,
Which::Some(reader) => Some({
let reader = reader?;
FieldMap {
arity: reader.get_arity(),
fields: read_hashmap!(&reader.get_fields()?, self, u32),
}
}),
})
}
fn u32(&self, i: &boxed_u_int32::Reader<'_>) -> Result<u32> {
Ok(i.get_value())
}
fn accessors_map(&mut self, reader: &accessors_map::Reader<'_>) -> Result<AccessorsMap> {
Ok(AccessorsMap {
publicity: self.publicity(reader.get_publicity()?)?,
type_: self.type_(&reader.get_type()?)?,
shared_accessors: read_hashmap!(&reader.get_shared_accessors()?, self, record_accessor),
variant_specific_accessors: read_vec!(
&reader.get_variant_specific_accessors()?,
self,
variant_specific_accessors
),
variant_positional_accessors: read_vec!(
&reader.get_positional_accessors()?,
self,
positional_accessors
),
})
}
fn variant_specific_accessors(
&mut self,
reader: &variant_specific_accessors::Reader<'_>,
) -> Result<HashMap<EcoString, RecordAccessor>> {
Ok(read_hashmap!(
&reader.get_accessors()?,
self,
record_accessor
))
}
fn positional_accessors(
&mut self,
reader: &positional_accessors::Reader<'_>,
) -> Result<Vec<Arc<Type>>> {
Ok(read_vec!(&reader.get_accessors()?, self, type_))
}
fn record_accessor(&mut self, reader: &record_accessor::Reader<'_>) -> Result<RecordAccessor> {
Ok(RecordAccessor {
index: reader.get_index() as u64,
label: self.string(reader.get_label()?)?,
type_: self.type_(&reader.get_type()?)?,
documentation: self.optional_string(self.str(reader.get_documentation()?)?),
})
}
fn line_starts(&mut self, i: &u32) -> Result<u32> {
Ok(*i)
}
fn line_numbers(&mut self, reader: &line_numbers::Reader<'_>) -> Result<LineNumbers> {
Ok(LineNumbers {
length: reader.get_length(),
line_starts: read_vec!(reader.get_line_starts()?, self, line_starts),
mapping: self.mapping(reader.get_mapping()?),
})
}
fn mapping(
&self,
reader: capnp::struct_list::Reader<'_, character::Owned>,
) -> HashMap<usize, Character> {
let mut map = HashMap::with_capacity(reader.len() as usize);
for character in reader.into_iter() {
let byte_index = character.get_byte_index() as usize;
let length_utf8 = character.get_length_utf8();
let length_utf16 = character.get_length_utf16();
_ = map.insert(
byte_index,
Character {
length_utf16,
length_utf8,
},
)
}
map
}
fn version(&self, reader: &version::Reader<'_>) -> hexpm::version::Version {
hexpm::version::Version::new(reader.get_major(), reader.get_minor(), reader.get_patch())
}
fn optional_external(
&self,
reader: option::Reader<'_, external::Owned>,
) -> Result<Option<(EcoString, EcoString)>> {
match reader.which()? {
option::Which::None(()) => Ok(None),
option::Which::Some(reader) => {
let reader = reader?;
let module = self.string(reader.get_module()?)?;
let function = self.string(reader.get_function()?)?;
Ok(Some((module, function)))
}
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/metadata/module_encoder.rs | compiler-core/src/metadata/module_encoder.rs | use ecow::EcoString;
use crate::{
ast::{
Constant, Publicity, SrcSpan, TypedConstant, TypedConstantBitArraySegment,
TypedConstantBitArraySegmentOption,
},
reference::{Reference, ReferenceKind, ReferenceMap},
schema_capnp::{self as schema, *},
type_::{
self, AccessorsMap, Deprecation, FieldMap, Opaque, RecordAccessor, Type,
TypeAliasConstructor, TypeConstructor, TypeValueConstructor, TypeVar,
TypeVariantConstructors, ValueConstructor, ValueConstructorVariant,
expression::{Implementations, Purity},
},
};
use std::{collections::HashMap, ops::Deref, sync::Arc};
#[derive(Debug)]
pub struct ModuleEncoder<'a> {
data: &'a type_::ModuleInterface,
next_type_var_id: u64,
type_var_id_map: HashMap<u64, u64>,
}
impl<'a> ModuleEncoder<'a> {
pub fn new(data: &'a type_::ModuleInterface) -> Self {
Self {
data,
next_type_var_id: 0,
type_var_id_map: HashMap::new(),
}
}
pub fn encode(mut self) -> crate::Result<Vec<u8>> {
let span = tracing::info_span!("metadata");
let _enter = span.enter();
let mut buffer = Vec::new();
let mut message = capnp::message::Builder::new_default();
let mut module = message.init_root::<module::Builder<'_>>();
module.set_name(&self.data.name);
module.set_package(&self.data.package);
module.set_src_path(self.data.src_path.as_str());
module.set_is_internal(self.data.is_internal);
module.set_contains_echo(self.data.contains_echo);
self.set_module_types(&mut module);
self.set_module_values(&mut module);
self.set_module_accessors(&mut module);
self.set_module_types_constructors(&mut module);
self.set_line_numbers(&mut module);
self.set_version(&mut module);
self.set_module_documentation(&mut module);
self.set_module_type_aliases(&mut module);
self.set_module_references(&mut module);
capnp::serialize_packed::write_message(&mut buffer, &message).expect("capnp encode");
Ok(buffer)
}
fn set_line_numbers(&mut self, module: &mut module::Builder<'_>) {
let mut line_numbers = module.reborrow().init_line_numbers();
line_numbers.set_length(self.data.line_numbers.length);
let mut line_starts = line_numbers
.reborrow()
.init_line_starts(self.data.line_numbers.line_starts.len() as u32);
for (i, l) in self.data.line_numbers.line_starts.iter().enumerate() {
line_starts.reborrow().set(i as u32, *l);
}
let mut mapping = line_numbers.init_mapping(self.data.line_numbers.mapping.len() as u32);
for (i, (byte_index, character)) in self.data.line_numbers.mapping.iter().enumerate() {
let mut builder = mapping.reborrow().get(i as u32);
builder.set_byte_index(*byte_index as u64);
builder.set_length_utf8(character.length_utf8);
builder.set_length_utf16(character.length_utf16);
}
}
fn set_module_documentation(&mut self, module: &mut module::Builder<'_>) {
let mut documentation = module
.reborrow()
.init_documentation(self.data.documentation.len() as u32);
for (i, documentation_part) in self.data.documentation.iter().enumerate() {
documentation.set(i as u32, documentation_part.as_str());
}
}
fn set_module_accessors(&mut self, module: &mut module::Builder<'_>) {
let mut builder = module
.reborrow()
.init_accessors(self.data.accessors.len() as u32);
for (i, (key, map)) in self.data.accessors.iter().enumerate() {
let mut property = builder.reborrow().get(i as u32);
property.set_key(key);
self.build_accessors_map(property.init_value(), map);
}
}
fn build_accessors_map(
&mut self,
mut builder: accessors_map::Builder<'_>,
accessors: &AccessorsMap,
) {
self.build_type(builder.reborrow().init_type(), &accessors.type_);
self.build_publicity(builder.reborrow().init_publicity(), accessors.publicity);
let mut accessors_builder = builder
.reborrow()
.init_shared_accessors(accessors.shared_accessors.len() as u32);
for (i, (name, accessor)) in accessors.shared_accessors.iter().enumerate() {
let mut property = accessors_builder.reborrow().get(i as u32);
property.set_key(name);
self.build_record_accessor(property.init_value(), accessor)
}
let mut variant_accessors = builder
.reborrow()
.init_variant_specific_accessors(accessors.variant_specific_accessors.len() as u32);
for (i, map) in accessors.variant_specific_accessors.iter().enumerate() {
self.build_variant_accessors(variant_accessors.reborrow().get(i as u32), map);
}
let mut positional_accessors =
builder.init_positional_accessors(accessors.variant_positional_accessors.len() as u32);
for (i, fields) in accessors.variant_positional_accessors.iter().enumerate() {
self.build_positional_accessors(positional_accessors.reborrow().get(i as u32), fields);
}
}
fn build_variant_accessors(
&mut self,
builder: variant_specific_accessors::Builder<'_>,
accessors: &HashMap<EcoString, RecordAccessor>,
) {
let mut builder = builder.init_accessors(accessors.len() as u32);
for (i, (name, accessor)) in accessors.iter().enumerate() {
let mut property = builder.reborrow().get(i as u32);
property.set_key(name);
self.build_record_accessor(property.init_value(), accessor)
}
}
fn build_positional_accessors(
&mut self,
builder: positional_accessors::Builder<'_>,
accessors: &[Arc<Type>],
) {
let mut builder = builder.init_accessors(accessors.len() as u32);
for (i, type_) in accessors.iter().enumerate() {
self.build_type(builder.reborrow().get(i as u32), type_);
}
}
fn build_record_accessor(
&mut self,
mut builder: record_accessor::Builder<'_>,
accessor: &RecordAccessor,
) {
self.build_type(builder.reborrow().init_type(), &accessor.type_);
builder.reborrow().set_label(&accessor.label);
builder.set_index(accessor.index as u16);
builder.set_documentation(accessor.documentation.as_deref().unwrap_or_default());
}
fn set_module_types(&mut self, module: &mut module::Builder<'_>) {
let mut types = module.reborrow().init_types(self.data.types.len() as u32);
for (i, (name, type_)) in self.data.types.iter().enumerate() {
let mut property = types.reborrow().get(i as u32);
property.set_key(name);
self.build_type_constructor(property.init_value(), type_)
}
}
fn set_module_type_aliases(&mut self, module: &mut module::Builder<'_>) {
let mut types = module
.reborrow()
.init_type_aliases(self.data.type_aliases.len() as u32);
for (i, (name, alias)) in self.data.type_aliases.iter().enumerate() {
let mut property = types.reborrow().get(i as u32);
property.set_key(name);
self.build_type_alias_constructor(property.init_value(), alias)
}
}
fn set_module_types_constructors(&mut self, module: &mut module::Builder<'_>) {
let mut types_constructors = module
.reborrow()
.init_types_constructors(self.data.types_value_constructors.len() as u32);
for (i, (name, data)) in self.data.types_value_constructors.iter().enumerate() {
let mut property = types_constructors.reborrow().get(i as u32);
property.set_key(name);
self.build_type_variant_constructors(property.init_value(), data)
}
}
fn build_type_variant_constructors(
&mut self,
mut builder: types_variant_constructors::Builder<'_>,
data: &TypeVariantConstructors,
) {
match data.opaque {
Opaque::Opaque => builder.set_opaque(true),
Opaque::NotOpaque => builder.set_opaque(false),
}
{
let mut builder = builder
.reborrow()
.init_type_parameters_ids(data.type_parameters_ids.len() as u32);
for (i, id) in data.type_parameters_ids.iter().enumerate() {
let id = self.get_or_insert_type_var_id(*id);
builder.set(i as u32, id as u16);
}
}
let mut builder = builder.init_variants(data.variants.len() as u32);
for (i, constructor) in data.variants.iter().enumerate() {
self.build_type_value_constructor(builder.reborrow().get(i as u32), constructor);
}
}
fn set_module_values(&mut self, module: &mut module::Builder<'_>) {
let mut values = module.reborrow().init_values(self.data.values.len() as u32);
for (i, (name, value)) in self.data.values.iter().enumerate() {
let mut property = values.reborrow().get(i as u32);
property.set_key(name);
self.build_value_constructor(property.init_value(), value)
}
}
fn set_module_references(&mut self, module: &mut module::Builder<'_>) {
let references = &self.data.references;
let mut builder = module.reborrow().init_references();
let mut imported_modules = builder
.reborrow()
.init_imported_modules(references.imported_modules.len() as u32);
for (i, module) in references.imported_modules.iter().enumerate() {
imported_modules.set(i as u32, module);
}
let value_references = builder
.reborrow()
.init_value_references(references.value_references.len() as u32);
self.build_reference_map(value_references, &references.value_references);
let type_references = builder
.reborrow()
.init_type_references(references.type_references.len() as u32);
self.build_reference_map(type_references, &references.type_references);
}
fn build_reference_map(
&mut self,
mut builder: capnp::struct_list::Builder<'_, reference_map::Owned>,
map: &ReferenceMap,
) {
for (i, ((module, name), references)) in map.iter().enumerate() {
let mut builder = builder.reborrow().get(i as u32);
builder.set_module(module);
builder.set_name(name);
let mut references_builder =
builder.reborrow().init_references(references.len() as u32);
for (i, reference) in references.iter().enumerate() {
let builder = references_builder.reborrow().get(i as u32);
self.build_reference(builder, reference);
}
}
}
fn build_reference(&mut self, mut builder: reference::Builder<'_>, reference: &Reference) {
let mut kind = builder.reborrow().init_kind();
match reference.kind {
ReferenceKind::Qualified => kind.set_qualified(()),
ReferenceKind::Unqualified => kind.set_unqualified(()),
ReferenceKind::Import => kind.set_import(()),
ReferenceKind::Definition => kind.set_definition(()),
ReferenceKind::Alias => kind.set_alias(()),
}
self.build_src_span(builder.init_location(), reference.location);
}
fn set_version(&mut self, module: &mut module::Builder<'_>) {
let mut version = module.reborrow().init_required_version();
version.set_major(self.data.minimum_required_version.major);
version.set_minor(self.data.minimum_required_version.minor);
version.set_patch(self.data.minimum_required_version.patch);
}
fn build_type_constructor(
&mut self,
mut builder: type_constructor::Builder<'_>,
constructor: &TypeConstructor,
) {
builder.set_module(&constructor.module);
builder.set_deprecated(match &constructor.deprecation {
Deprecation::NotDeprecated => "",
Deprecation::Deprecated { message } => message,
});
self.build_publicity(builder.reborrow().init_publicity(), constructor.publicity);
let type_builder = builder.reborrow().init_type();
self.build_type(type_builder, &constructor.type_);
self.build_types(
builder
.reborrow()
.init_parameters(constructor.parameters.len() as u32),
&constructor.parameters,
);
self.build_src_span(builder.reborrow().init_origin(), constructor.origin);
builder.set_documentation(
constructor
.documentation
.as_ref()
.map(EcoString::as_str)
.unwrap_or_default(),
);
}
fn build_type_alias_constructor(
&mut self,
mut builder: type_alias_constructor::Builder<'_>,
constructor: &TypeAliasConstructor,
) {
builder.set_module(&constructor.module);
builder.set_deprecation(match &constructor.deprecation {
Deprecation::NotDeprecated => "",
Deprecation::Deprecated { message } => message,
});
self.build_publicity(builder.reborrow().init_publicity(), constructor.publicity);
let type_builder = builder.reborrow().init_type();
self.build_type(type_builder, &constructor.type_);
self.build_src_span(builder.reborrow().init_origin(), constructor.origin);
builder.set_documentation(constructor.documentation.as_deref().unwrap_or_default());
builder.set_arity(constructor.arity as u32);
let mut parameters_builder = builder.init_parameters(constructor.parameters.len() as u32);
for (index, parameter) in constructor.parameters.iter().enumerate() {
self.build_type(parameters_builder.reborrow().get(index as u32), parameter);
}
}
fn build_type_value_constructor(
&mut self,
mut builder: type_value_constructor::Builder<'_>,
constructor: &TypeValueConstructor,
) {
builder.set_name(&constructor.name);
builder.set_documentation(constructor.documentation.as_deref().unwrap_or_default());
let mut builder = builder.init_parameters(constructor.parameters.len() as u32);
for (i, parameter) in constructor.parameters.iter().enumerate() {
self.build_type_value_constructor_parameter(
builder.reborrow().get(i as u32),
parameter,
);
}
}
fn build_type_value_constructor_parameter(
&mut self,
mut builder: type_value_constructor_parameter::Builder<'_>,
parameter: &type_::TypeValueConstructorField,
) {
self.build_type(builder.reborrow().init_type(), parameter.type_.as_ref());
builder.set_label(parameter.label.as_deref().unwrap_or_default());
builder.set_documentation(parameter.documentation.as_deref().unwrap_or_default());
}
fn build_value_constructor(
&mut self,
mut builder: value_constructor::Builder<'_>,
constructor: &ValueConstructor,
) {
builder.set_deprecated(match &constructor.deprecation {
Deprecation::NotDeprecated => "",
Deprecation::Deprecated { message } => message,
});
self.build_publicity(builder.reborrow().init_publicity(), constructor.publicity);
self.build_type(builder.reborrow().init_type(), &constructor.type_);
self.build_value_constructor_variant(builder.init_variant(), &constructor.variant);
}
fn build_publicity(&mut self, mut builder: publicity::Builder<'_>, publicity: Publicity) {
match publicity {
Publicity::Public => builder.set_public(()),
Publicity::Private => builder.set_private(()),
Publicity::Internal {
attribute_location: None,
} => {
let mut builder = builder.init_internal();
builder.set_none(());
}
Publicity::Internal {
attribute_location: Some(location),
} => {
let builder = builder.init_internal();
let builder = builder.init_some();
self.build_src_span(builder, location);
}
}
}
fn build_src_span(&mut self, mut builder: src_span::Builder<'_>, span: SrcSpan) {
builder.set_start(span.start);
builder.set_end(span.end);
}
fn build_value_constructor_variant(
&mut self,
builder: value_constructor_variant::Builder<'_>,
constructor: &ValueConstructorVariant,
) {
match constructor {
ValueConstructorVariant::LocalVariable { .. } => {
panic!("Unexpected local variable value constructor in module interface",)
}
ValueConstructorVariant::ModuleConstant {
literal,
location,
module,
documentation: doc,
implementations,
name,
} => {
let mut builder = builder.init_module_constant();
builder.set_documentation(doc.as_ref().map(EcoString::as_str).unwrap_or_default());
self.build_src_span(builder.reborrow().init_location(), *location);
self.build_constant(builder.reborrow().init_literal(), literal);
builder.reborrow().set_module(module);
builder.reborrow().set_name(name);
self.build_implementations(builder.init_implementations(), *implementations)
}
ValueConstructorVariant::Record {
name,
field_map,
arity,
location,
module,
variants_count: constructors_count,
variant_index: constructor_index,
documentation: doc,
} => {
let mut builder = builder.init_record();
builder.set_name(name);
builder.set_module(module);
builder.set_arity(*arity);
builder.set_documentation(doc.as_ref().map(EcoString::as_str).unwrap_or_default());
builder.set_constructors_count(*constructors_count);
builder.set_constructor_index(*constructor_index);
self.build_optional_field_map(builder.reborrow().init_field_map(), field_map);
self.build_src_span(builder.init_location(), *location);
}
ValueConstructorVariant::ModuleFn {
arity,
field_map,
module,
name,
location,
documentation: doc,
implementations,
external_erlang,
external_javascript,
purity,
} => {
let mut builder = builder.init_module_fn();
builder.set_name(name);
builder.set_module(module);
builder.set_arity(*arity as u16);
builder.set_documentation(doc.as_ref().map(EcoString::as_str).unwrap_or_default());
let mut purity_builder = builder.reborrow().init_purity();
match purity {
Purity::Pure => purity_builder.set_pure(()),
Purity::TrustedPure => purity_builder.set_trusted_pure(()),
Purity::Impure => purity_builder.set_impure(()),
Purity::Unknown => purity_builder.set_unknown(()),
}
self.build_external(builder.reborrow().init_external_erlang(), external_erlang);
self.build_external(
builder.reborrow().init_external_javascript(),
external_javascript,
);
self.build_optional_field_map(builder.reborrow().init_field_map(), field_map);
self.build_src_span(builder.reborrow().init_location(), *location);
self.build_implementations(builder.init_implementations(), *implementations);
}
}
}
fn build_optional_field_map(
&mut self,
mut builder: option::Builder<'_, field_map::Owned>,
field_map: &Option<FieldMap>,
) {
match field_map {
Some(field_map) => self.build_field_map(builder.init_some(), field_map),
None => builder.set_none(()),
};
}
fn build_field_map(&mut self, mut builder: field_map::Builder<'_>, field_map: &FieldMap) {
builder.set_arity(field_map.arity);
let mut builder = builder.init_fields(field_map.fields.len() as u32);
for (i, (name, &position)) in field_map.fields.iter().enumerate() {
let mut field = builder.reborrow().get(i as u32);
field.set_key(name);
field.init_value().set_value(position);
}
}
fn build_constant(&mut self, mut builder: constant::Builder<'_>, constant: &TypedConstant) {
match constant {
Constant::Int { value, .. } => builder.set_int(value),
Constant::Float { value, .. } => builder.set_float(value),
Constant::String { value, .. } => builder.set_string(value),
Constant::Tuple {
elements, type_, ..
} => {
let mut builder = builder.init_tuple();
self.build_constants(
builder.reborrow().init_elements(elements.len() as u32),
elements,
);
self.build_type(builder.init_type(), type_);
}
Constant::List {
elements, type_, ..
} => {
let mut builder = builder.init_list();
self.build_constants(
builder.reborrow().init_elements(elements.len() as u32),
elements,
);
self.build_type(builder.init_type(), type_);
}
Constant::BitArray { segments, .. } => {
let mut builder = builder.init_bit_array(segments.len() as u32);
for (i, segment) in segments.iter().enumerate() {
self.build_bit_array_segment(builder.reborrow().get(i as u32), segment);
}
}
Constant::Record {
arguments,
tag,
type_,
..
} => {
let mut builder = builder.init_record();
{
let mut builder = builder.reborrow().init_args(arguments.len() as u32);
for (i, argument) in arguments.iter().enumerate() {
self.build_constant(builder.reborrow().get(i as u32), &argument.value);
}
}
builder.reborrow().set_tag(tag);
self.build_type(builder.reborrow().init_type(), type_);
}
Constant::Var {
module,
name,
type_,
constructor,
..
} => {
let mut builder = builder.init_var();
match module {
Some((name, _)) => builder.set_module(name),
None => builder.set_module(""),
};
builder.set_name(name);
self.build_type(builder.reborrow().init_type(), type_);
self.build_value_constructor(
builder.reborrow().init_constructor(),
constructor
.as_ref()
.expect("This is guaranteed to hold a value."),
);
}
Constant::StringConcatenation { right, left, .. } => {
let mut builder = builder.init_string_concatenation();
self.build_constant(builder.reborrow().init_right(), right);
self.build_constant(builder.reborrow().init_left(), left);
}
Constant::RecordUpdate { .. } => {
panic!("record updates should not reach code generation")
}
Constant::Invalid { .. } => {
panic!("invalid constants should not reach code generation")
}
}
}
fn build_constants(
&mut self,
mut builder: capnp::struct_list::Builder<'_, constant::Owned>,
constant: &[TypedConstant],
) {
for (i, constant) in constant.iter().enumerate() {
self.build_constant(builder.reborrow().get(i as u32), constant);
}
}
fn build_bit_array_segment(
&mut self,
mut builder: bit_array_segment::Builder<'_>,
segment: &TypedConstantBitArraySegment,
) {
self.build_constant(builder.reborrow().init_value(), &segment.value);
{
let mut builder = builder
.reborrow()
.init_options(segment.options.len() as u32);
for (i, option) in segment.options.iter().enumerate() {
self.build_bit_array_segment_option(builder.reborrow().get(i as u32), option);
}
}
self.build_type(builder.init_type(), &segment.type_);
}
fn build_bit_array_segment_option(
&mut self,
mut builder: bit_array_segment_option::Builder<'_>,
option: &TypedConstantBitArraySegmentOption,
) {
use crate::ast::TypedConstantBitArraySegmentOption as Opt;
match option {
Opt::Bytes { .. } => builder.set_bytes(()),
Opt::Int { .. } => builder.set_integer(()),
Opt::Float { .. } => builder.set_float(()),
Opt::Bits { .. } => builder.set_bits(()),
Opt::Utf8 { .. } => builder.set_utf8(()),
Opt::Utf16 { .. } => builder.set_utf16(()),
Opt::Utf32 { .. } => builder.set_utf32(()),
Opt::Utf8Codepoint { .. } => builder.set_utf8_codepoint(()),
Opt::Utf16Codepoint { .. } => builder.set_utf16_codepoint(()),
Opt::Utf32Codepoint { .. } => builder.set_utf32_codepoint(()),
Opt::Signed { .. } => builder.set_signed(()),
Opt::Unsigned { .. } => builder.set_unsigned(()),
Opt::Big { .. } => builder.set_big(()),
Opt::Little { .. } => builder.set_little(()),
Opt::Native { .. } => builder.set_native(()),
Opt::Size {
value, short_form, ..
} => {
let mut builder = builder.init_size();
self.build_constant(builder.reborrow().init_value(), value);
builder.set_short_form(*short_form);
}
Opt::Unit { value, .. } => {
let mut builder = builder.init_unit();
builder.set_value(*value);
}
}
}
fn build_type(&mut self, builder: schema::type_::Builder<'_>, type_: &Type) {
match type_ {
Type::Fn { arguments, return_ } => {
let mut fun = builder.init_fn();
self.build_types(
fun.reborrow().init_arguments(arguments.len() as u32),
arguments,
);
self.build_type(fun.init_return(), return_)
}
Type::Named {
name,
arguments,
module,
package,
inferred_variant,
publicity,
} => {
let mut app = builder.init_app();
app.set_name(name);
app.set_module(module);
app.set_package(package);
let mut variant_builder = app.reborrow().init_inferred_variant();
match inferred_variant {
Some(variant) => variant_builder.set_inferred(*variant),
None => variant_builder.set_unknown(()),
}
self.build_types(
app.reborrow().init_parameters(arguments.len() as u32),
arguments,
);
self.build_publicity(app.init_publicity(), *publicity);
}
Type::Tuple { elements } => self.build_types(
builder.init_tuple().init_elements(elements.len() as u32),
elements,
),
Type::Var { type_ } => match type_.borrow().deref() {
TypeVar::Link { type_ } => self.build_type(builder, type_),
TypeVar::Unbound { id, .. } | TypeVar::Generic { id } => {
self.build_type_var(builder.init_var(), *id)
}
},
}
}
fn build_types(
&mut self,
mut builder: capnp::struct_list::Builder<'_, schema::type_::Owned>,
types: &[Arc<Type>],
) {
for (i, type_) in types.iter().enumerate() {
self.build_type(builder.reborrow().get(i as u32), type_);
}
}
fn build_type_var(&mut self, mut builder: schema::type_::var::Builder<'_>, id: u64) {
let serialised_id = self.get_or_insert_type_var_id(id);
builder.set_id(serialised_id);
}
fn get_or_insert_type_var_id(&mut self, id: u64) -> u64 {
match self.type_var_id_map.get(&id) {
Some(&id) => id,
None => {
let new_id = self.next_type_var_id;
self.next_type_var_id += 1;
let _ = self.type_var_id_map.insert(id, new_id);
new_id
}
}
}
fn build_implementations(
&self,
mut builder: implementations::Builder<'_>,
implementations: Implementations,
) {
builder.set_gleam(implementations.gleam);
builder.set_uses_erlang_externals(implementations.uses_erlang_externals);
builder.set_uses_javascript_externals(implementations.uses_javascript_externals);
builder.set_can_run_on_erlang(implementations.can_run_on_erlang);
builder.set_can_run_on_javascript(implementations.can_run_on_javascript);
}
fn build_external(
&self,
mut builder: option::Builder<'_, external::Owned>,
external: &Option<(EcoString, EcoString)>,
) {
match external {
None => builder.set_none(()),
Some((module, function)) => {
let mut builder = builder.init_some();
builder.set_module(module);
builder.set_function(function);
}
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/error/tests.rs | compiler-core/src/error/tests.rs | use super::*;
use insta::assert_snapshot;
#[test]
fn test_shell_program_not_found_error() {
let cmds = vec!["erlc", "rebar3", "deno", "elixir", "node", "bun", "git"];
let oses = vec!["macos", "linux"];
let distros = vec!["ubuntu", "other"];
for cmd in &cmds {
for os in &oses {
if os != &"linux" {
let err = Error::ShellProgramNotFound {
program: cmd.to_string(),
os: parse_os(os, "other"),
}
.to_diagnostics();
assert_snapshot!(
format!("shell_program_not_found_{cmd}_{os}_other"),
err[0].text
);
} else {
for distro in &distros {
let err = Error::ShellProgramNotFound {
program: cmd.to_string(),
os: parse_os(os, distro),
}
.to_diagnostics();
assert_snapshot!(
format!("shell_program_not_found_{cmd}_{os}_{distro}"),
err[0].text
);
}
}
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/pretty/tests.rs | compiler-core/src/pretty/tests.rs | use super::Document::*;
use super::Mode::*;
use super::*;
use im::vector;
use pretty_assertions::assert_eq;
#[test]
fn fits_test() {
// Negative limits never fit
assert!(!fits(-1, 0, vector![]));
// If no more documents it always fits
assert!(fits(0, 0, vector![]));
// ForceBreak never fits
let doc = ForceBroken(Box::new(nil()));
assert!(!fits(100, 0, vector![(0, Unbroken, &doc)]));
let doc = ForceBroken(Box::new(nil()));
assert!(!fits(100, 0, vector![(0, Broken, &doc)]));
// Break in Broken fits always
assert!(fits(
1,
0,
vector![(
0,
Broken,
&Break {
broken: "12",
unbroken: "",
kind: BreakKind::Strict,
}
)]
));
// Break in Unbroken mode fits if `unbroken` fits
assert!(fits(
3,
0,
vector![(
0,
Unbroken,
&Break {
broken: "",
unbroken: "123",
kind: BreakKind::Strict,
}
)]
));
assert!(!fits(
2,
0,
vector![(
0,
Unbroken,
&Break {
broken: "",
unbroken: "123",
kind: BreakKind::Strict,
}
)]
));
// Line always fits
assert!(fits(0, 0, vector![(0, Broken, &Line(100))]));
assert!(fits(0, 0, vector![(0, Unbroken, &Line(100))]));
// String fits if smaller than limit
let doc = Document::str("Hello");
assert!(fits(5, 0, vector![(0, Broken, &doc)]));
let doc = Document::str("Hello");
assert!(fits(5, 0, vector![(0, Unbroken, &doc)]));
let doc = Document::str("Hello");
assert!(!fits(4, 0, vector![(0, Broken, &doc)]));
let doc = Document::str("Hello");
assert!(!fits(4, 0, vector![(0, Unbroken, &doc)]));
// Cons fits if combined smaller than limit
let doc = Document::str("1").append(Document::str("2"));
assert!(fits(2, 0, vector![(0, Broken, &doc)]));
let doc = Document::str("1").append(Document::str("2"));
assert!(fits(2, 0, vector![(0, Unbroken, &doc,)]));
let doc = Document::str("1").append(Document::str("2"));
assert!(!fits(1, 0, vector![(0, Broken, &doc)]));
let doc = Document::str("1").append(Document::str("2"));
assert!(!fits(1, 0, vector![(0, Unbroken, &doc)]));
// Nest fits if combined smaller than limit
let doc = Nest(
1,
NestMode::Increase,
NestCondition::Always,
Box::new(Document::str("12")),
);
assert!(fits(2, 0, vector![(0, Broken, &doc)]));
assert!(fits(2, 0, vector![(0, Unbroken, &doc)]));
assert!(!fits(1, 0, vector![(0, Broken, &doc)]));
assert!(!fits(1, 0, vector![(0, Unbroken, &doc)]));
// Nest fits if combined smaller than limit
let doc = Nest(
0,
NestMode::Increase,
NestCondition::Always,
Box::new(Document::str("12")),
);
assert!(fits(2, 0, vector![(0, Broken, &doc)]));
assert!(fits(2, 0, vector![(0, Unbroken, &doc)]));
assert!(!fits(1, 0, vector![(0, Broken, &doc)]));
assert!(!fits(1, 0, vector![(0, Unbroken, &doc)]));
let doc = ZeroWidthString {
string: "this is a very long string that doesn't count towards line width".into(),
};
assert!(fits(10, 0, vector![(0, Unbroken, &doc)]));
assert!(fits(10, 9, vector![(0, Unbroken, &doc)]));
let string_doc = "hello!".to_doc();
assert!(fits(
10,
0,
vector![(0, Unbroken, &string_doc), (0, Unbroken, &doc)]
));
}
#[test]
fn format_test() {
let doc = Document::str("Hi");
assert_eq!("Hi", doc.to_pretty_string(10));
let doc = Document::str("Hi").append(Document::str(", world!"));
assert_eq!("Hi, world!", doc.clone().to_pretty_string(10));
let doc = &Break {
broken: "broken",
unbroken: "unbroken",
kind: BreakKind::Strict,
}
.group();
assert_eq!("unbroken", doc.clone().to_pretty_string(10));
let doc = &Break {
broken: "broken",
unbroken: "unbroken",
kind: BreakKind::Strict,
}
.group();
assert_eq!("broken\n", doc.clone().to_pretty_string(5));
let doc = Nest(
2,
NestMode::Increase,
NestCondition::Always,
Box::new(Document::str("1").append(Line(1).append(Document::str("2")))),
);
assert_eq!("1\n 2", doc.to_pretty_string(1));
let doc = Group(Box::new(ForceBroken(Box::new(Break {
broken: "broken",
unbroken: "unbroken",
kind: BreakKind::Strict,
}))));
assert_eq!("broken\n".to_string(), doc.to_pretty_string(100));
let doc = ForceBroken(Box::new(Break {
broken: "broken",
unbroken: "unbroken",
kind: BreakKind::Flex,
}));
assert_eq!("unbroken".to_string(), doc.to_pretty_string(100));
let doc = Vec(vec![
Break {
broken: "broken",
unbroken: "unbroken",
kind: BreakKind::Strict,
},
zero_width_string("<This will not cause a line break>".into()),
Break {
broken: "broken",
unbroken: "unbroken",
kind: BreakKind::Strict,
},
]);
assert_eq!(
"unbroken<This will not cause a line break>unbroken",
doc.to_pretty_string(20)
);
}
#[test]
fn forcing_test() {
let docs = join(
[
"hello".to_doc(),
"a".to_doc(),
"b".to_doc(),
"c".to_doc(),
"d".to_doc(),
],
break_("", " "),
);
assert_eq!(
"hello\na\nb\nc\nd",
docs.clone().force_break().group().to_pretty_string(80)
);
assert_eq!(
"hello a b c d",
docs.clone()
.force_break()
.next_break_fits(NextBreakFitsMode::Enabled)
.group()
.to_pretty_string(80)
);
assert_eq!(
"hello\na\nb\nc\nd",
docs.clone()
.force_break()
.next_break_fits(NextBreakFitsMode::Enabled)
.next_break_fits(NextBreakFitsMode::Disabled)
.group()
.to_pretty_string(80)
);
}
#[test]
fn nest_if_broken_test() {
assert_eq!(
"hello\n world",
concat(["hello".to_doc(), break_("", " "), "world".to_doc()])
.nest_if_broken(2)
.group()
.to_pretty_string(10)
);
let list_doc = concat([
concat([
break_("[", "["),
"a,".to_doc(),
break_("", " "),
"b".to_doc(),
])
.nest(2),
break_(",", ""),
"]".to_doc(),
])
.group();
let arguments_doc = concat([
break_("", ""),
"one".to_doc(),
",".to_doc(),
break_("", " "),
list_doc.group().next_break_fits(NextBreakFitsMode::Enabled),
])
.nest_if_broken(2)
.group();
let function_call_doc = concat([
"some_function_call(".to_doc(),
arguments_doc,
break_("", ""),
")".to_doc(),
])
.group();
assert_eq!(
"some_function_call(\n one,\n [\n a,\n b,\n ]\n)",
function_call_doc.clone().to_pretty_string(2)
);
assert_eq!(
"some_function_call(\n one,\n [a, b]\n)",
function_call_doc.clone().to_pretty_string(20)
);
assert_eq!(
"some_function_call(one, [\n a,\n b,\n])",
function_call_doc.clone().to_pretty_string(25)
);
assert_eq!(
"some_function_call(one, [a, b])",
function_call_doc.clone().to_pretty_string(80)
);
}
#[test]
fn let_left_side_fits_test() {
let elements = break_("", "").append("1").nest(2).append(break_("", ""));
let list = "[".to_doc().append(elements).append("]").group();
let doc = list.clone().append(" = ").append(list);
assert_eq!(
"[1] = [
1
]",
doc.clone().to_pretty_string(7)
);
assert_eq!(
"[
1
] = [
1
]",
doc.clone().to_pretty_string(2)
);
assert_eq!("[1] = [1]", doc.clone().to_pretty_string(16));
}
#[test]
fn empty_documents() {
// nil
assert!(nil().is_empty());
// lines
assert!(lines(0).is_empty());
assert!(!line().is_empty());
// force break
assert!(nil().force_break().is_empty());
assert!(!"ok".to_doc().force_break().is_empty());
// strings
assert!("".to_doc().is_empty());
assert!(!"wibble".to_doc().is_empty());
assert!(!" ".to_doc().is_empty());
assert!(!"\n".to_doc().is_empty());
// containers
assert!("".to_doc().nest(2).is_empty());
assert!(!"wibble".to_doc().nest(2).is_empty());
assert!("".to_doc().group().is_empty());
assert!(!"wibble".to_doc().group().is_empty());
assert!(break_("", "").is_empty());
assert!(!break_("wibble", "wibble").is_empty());
assert!(!break_("wibble\nwobble", "wibble wobble").is_empty());
assert!("".to_doc().append("".to_doc()).is_empty());
assert!(!"wibble".to_doc().append("".to_doc()).is_empty());
assert!(!"".to_doc().append("wibble".to_doc()).is_empty());
assert!(!zero_width_string("wibble".into()).is_empty());
}
#[test]
fn set_nesting() {
let doc = Vec(vec!["wibble".to_doc(), break_("", " "), "wobble".to_doc()]).group();
assert_eq!(
"wibble\nwobble",
doc.set_nesting(0).nest(2).to_pretty_string(1)
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/package_compiler.rs | compiler-core/src/build/package_compiler.rs | use crate::analyse::{ModuleAnalyzerConstructor, TargetSupport};
use crate::build::package_loader::CacheFiles;
use crate::inline;
use crate::io::files_with_extension;
use crate::line_numbers::{self, LineNumbers};
use crate::type_::PRELUDE_MODULE_NAME;
use crate::{
Error, Result, Warning,
ast::{SrcSpan, TypedModule, UntypedModule},
build::{
Mode, Module, Origin, Outcome, Package, SourceFingerprint, Target,
elixir_libraries::ElixirLibraries,
native_file_copier::NativeFileCopier,
package_loader::{CodegenRequired, PackageLoader, StaleTracker},
},
codegen::{Erlang, ErlangApp, JavaScript, TypeScriptDeclarations},
config::PackageConfig,
dep_tree, error,
io::{BeamCompiler, CommandExecutor, FileSystemReader, FileSystemWriter, Stdio},
metadata::ModuleEncoder,
parse::extra::ModuleExtra,
paths, type_,
uid::UniqueIdGenerator,
warning::{TypeWarningEmitter, WarningEmitter},
};
use askama::Template;
use ecow::EcoString;
use std::collections::HashSet;
use std::{collections::HashMap, fmt::write, time::SystemTime};
use vec1::Vec1;
use camino::{Utf8Path, Utf8PathBuf};
use super::{ErlangAppCodegenConfiguration, TargetCodegenConfiguration, Telemetry};
pub struct Compiled {
/// The modules which were just compiled
pub modules: Vec<Module>,
/// The names of all cached modules, which are not present in the `modules` field.
pub cached_module_names: Vec<EcoString>,
}
#[derive(Debug)]
pub struct PackageCompiler<'a, IO> {
pub io: IO,
pub out: &'a Utf8Path,
pub lib: &'a Utf8Path,
pub root: &'a Utf8Path,
pub mode: Mode,
pub target: &'a TargetCodegenConfiguration,
pub config: &'a PackageConfig,
pub ids: UniqueIdGenerator,
pub write_metadata: bool,
pub perform_codegen: bool,
/// If set to false the compiler won't load and analyse any of the package's
/// modules and always succeed compilation returning no compile modules.
///
/// Code generation is still carried out so that a root package will have an
/// entry point nonetheless.
///
pub compile_modules: bool,
pub write_entrypoint: bool,
pub copy_native_files: bool,
pub compile_beam_bytecode: bool,
pub subprocess_stdio: Stdio,
pub target_support: TargetSupport,
pub cached_warnings: CachedWarnings,
pub check_module_conflicts: CheckModuleConflicts,
}
impl<'a, IO> PackageCompiler<'a, IO>
where
IO: FileSystemReader + FileSystemWriter + CommandExecutor + BeamCompiler + Clone,
{
pub fn new(
config: &'a PackageConfig,
mode: Mode,
root: &'a Utf8Path,
out: &'a Utf8Path,
lib: &'a Utf8Path,
target: &'a TargetCodegenConfiguration,
ids: UniqueIdGenerator,
io: IO,
) -> Self {
Self {
io,
ids,
out,
lib,
root,
mode,
config,
target,
write_metadata: true,
perform_codegen: true,
compile_modules: true,
write_entrypoint: false,
copy_native_files: true,
compile_beam_bytecode: true,
subprocess_stdio: Stdio::Inherit,
target_support: TargetSupport::NotEnforced,
cached_warnings: CachedWarnings::Ignore,
check_module_conflicts: CheckModuleConflicts::DoNotCheck,
}
}
/// Compile the package.
/// Returns a list of modules that were compiled. Any modules that were read
/// from the cache will not be returned.
// TODO: return the cached modules.
pub fn compile(
mut self,
warnings: &WarningEmitter,
existing_modules: &mut im::HashMap<EcoString, type_::ModuleInterface>,
already_defined_modules: &mut im::HashMap<EcoString, Utf8PathBuf>,
stale_modules: &mut StaleTracker,
incomplete_modules: &mut HashSet<EcoString>,
telemetry: &dyn Telemetry,
) -> Outcome<Compiled, Error> {
let span = tracing::info_span!("compile", package = %self.config.name.as_str());
let _enter = span.enter();
// Ensure that the package is compatible with this version of Gleam
if let Err(e) = self.config.check_gleam_compatibility() {
return e.into();
}
let artefact_directory = self.out.join(paths::ARTEFACT_DIRECTORY_NAME);
let codegen_required = if self.perform_codegen {
CodegenRequired::Yes
} else {
CodegenRequired::No
};
let loader = PackageLoader::new(
self.io.clone(),
self.ids.clone(),
self.mode,
self.root,
self.cached_warnings,
warnings,
codegen_required,
&artefact_directory,
self.target.target(),
&self.config.name,
stale_modules,
already_defined_modules,
incomplete_modules,
);
let loaded = if self.compile_modules {
match loader.run() {
Ok(loaded) => loaded,
Err(error) => return error.into(),
}
} else {
Loaded::empty()
};
let mut cached_module_names = Vec::new();
// Load the cached modules that have previously been compiled
for module in loaded.cached.into_iter() {
// Emit any cached warnings.
// Note that `self.cached_warnings` is set to `Ignore` (such as for
// dependency packages) then this field will not be populated.
if let Err(e) = self.emit_warnings(warnings, &module) {
return e.into();
}
cached_module_names.push(module.name.clone());
// Register the cached module so its type information etc can be
// used for compiling futher modules.
_ = existing_modules.insert(module.name.clone(), module);
}
if !loaded.to_compile.is_empty() {
// Print that work is being done
if self.perform_codegen {
telemetry.compiling_package(&self.config.name);
} else {
telemetry.checking_package(&self.config.name)
}
}
// Type check the modules that are new or have changed
tracing::info!(count=%loaded.to_compile.len(), "analysing_modules");
let outcome = analyse(
&self.config,
self.target.target(),
self.mode,
&self.ids,
loaded.to_compile,
existing_modules,
warnings,
self.target_support,
incomplete_modules,
);
let modules = match outcome {
Outcome::Ok(modules) => modules,
Outcome::PartialFailure(modules, error) => {
return Outcome::PartialFailure(
Compiled {
modules,
cached_module_names,
},
error,
);
}
Outcome::TotalFailure(error) => return Outcome::TotalFailure(error),
};
tracing::debug!("performing_code_generation");
// Inlining is currently disabled. See
// https://github.com/gleam-lang/gleam/pull/5010 for information.
// let modules = if self.perform_codegen {
// modules
// .into_iter()
// .map(|mut module| {
// module.ast = inline::module(module.ast, &existing_modules);
// module
// })
// .collect()
// } else {
// modules
// };
if let Err(error) = self.perform_codegen(&modules) {
return error.into();
}
if let Err(error) = self.encode_and_write_metadata(&modules) {
return error.into();
}
Outcome::Ok(Compiled {
modules,
cached_module_names,
})
}
fn compile_erlang_to_beam(
&mut self,
modules: &HashSet<Utf8PathBuf>,
) -> Result<Vec<EcoString>, Error> {
if modules.is_empty() {
tracing::debug!("no_erlang_to_compile");
return Ok(Vec::new());
}
tracing::debug!("compiling_erlang");
self.io
.compile_beam(self.out, self.lib, modules, self.subprocess_stdio)
.map(|modules| modules.iter().map(|str| EcoString::from(str)).collect())
}
fn copy_project_native_files(
&mut self,
destination_dir: &Utf8Path,
to_compile_modules: &mut HashSet<Utf8PathBuf>,
) -> Result<(), Error> {
tracing::debug!("copying_native_source_files");
// TODO: unit test
let priv_source = self.root.join("priv");
let priv_build = self.out.join("priv");
if self.io.is_directory(&priv_source) && !self.io.is_directory(&priv_build) {
tracing::debug!("linking_priv_to_build");
self.io.symlink_dir(&priv_source, &priv_build)?;
}
let copier = NativeFileCopier::new(
self.io.clone(),
self.root.clone(),
destination_dir,
self.check_module_conflicts,
);
let copied = copier.run()?;
to_compile_modules.extend(copied.to_compile.into_iter());
// If there are any Elixir files then we need to locate Elixir
// installed on this system for use in compilation.
if copied.any_elixir {
ElixirLibraries::make_available(
&self.io,
&self.lib.to_path_buf(),
self.subprocess_stdio,
)?;
}
Ok(())
}
fn encode_and_write_metadata(&mut self, modules: &[Module]) -> Result<()> {
if !self.write_metadata {
tracing::debug!("package_metadata_writing_disabled");
return Ok(());
}
if modules.is_empty() {
return Ok(());
}
let artefact_dir = self.out.join(paths::ARTEFACT_DIRECTORY_NAME);
tracing::debug!("writing_module_caches");
for module in modules {
let cache_files = CacheFiles::new(&artefact_dir, &module.name);
// Write cache file
let bytes = ModuleEncoder::new(&module.ast.type_info).encode()?;
self.io.write_bytes(&cache_files.cache_path, &bytes)?;
// Write cache metadata
let info = CacheMetadata {
mtime: module.mtime,
codegen_performed: self.perform_codegen,
dependencies: module.dependencies.clone(),
fingerprint: SourceFingerprint::new(&module.code),
line_numbers: module.ast.type_info.line_numbers.clone(),
};
self.io
.write_bytes(&cache_files.meta_path, &info.to_binary())?;
let cache_inline = bincode::serde::encode_to_vec(
&module.ast.type_info.inline_functions,
bincode::config::legacy(),
)
.expect("Failed to serialise inline functions");
self.io.write_bytes(&cache_files.inline_path, &cache_inline);
// Write warnings.
// Dependency packages don't get warnings persisted as the
// programmer doesn't want to be told every time about warnings they
// cannot fix directly.
if self.cached_warnings.should_use() {
let warnings = &module.ast.type_info.warnings;
let data = bincode::serde::encode_to_vec(warnings, bincode::config::legacy())
.expect("Serialise warnings");
self.io.write_bytes(&cache_files.warnings_path, &data)?;
}
}
Ok(())
}
fn perform_codegen(&mut self, modules: &[Module]) -> Result<()> {
if !self.perform_codegen {
tracing::debug!("skipping_codegen");
return Ok(());
}
match self.target {
TargetCodegenConfiguration::JavaScript {
emit_typescript_definitions,
prelude_location,
} => self.perform_javascript_codegen(
modules,
*emit_typescript_definitions,
prelude_location,
),
TargetCodegenConfiguration::Erlang { app_file } => {
self.perform_erlang_codegen(modules, app_file.as_ref())
}
}
}
fn perform_erlang_codegen(
&mut self,
modules: &[Module],
app_file_config: Option<&ErlangAppCodegenConfiguration>,
) -> Result<(), Error> {
let mut written = HashSet::new();
let build_dir = self.out.join(paths::ARTEFACT_DIRECTORY_NAME);
let include_dir = self.out.join("include");
let io = self.io.clone();
io.mkdir(&build_dir)?;
if self.copy_native_files {
self.copy_project_native_files(&build_dir, &mut written)?;
} else {
tracing::debug!("skipping_native_file_copying");
}
if self.compile_beam_bytecode && self.write_entrypoint {
self.render_erlang_entrypoint_module(&build_dir, &mut written)?;
} else {
tracing::debug!("skipping_entrypoint_generation");
}
// NOTE: This must come after `copy_project_native_files` to ensure that
// we overwrite any precompiled Erlang that was included in the Hex
// package. Otherwise we will build the potentially outdated precompiled
// version and not the newly compiled version.
Erlang::new(&build_dir, &include_dir).render(io.clone(), modules, self.root)?;
let native_modules: Vec<EcoString> = if self.compile_beam_bytecode {
written.extend(modules.iter().map(Module::compiled_erlang_path));
self.compile_erlang_to_beam(&written)?
} else {
tracing::debug!("skipping_erlang_bytecode_compilation");
Vec::new()
};
if let Some(config) = app_file_config {
ErlangApp::new(&self.out.join("ebin"), config).render(
io,
&self.config,
modules,
native_modules,
)?;
}
Ok(())
}
fn perform_javascript_codegen(
&mut self,
modules: &[Module],
typescript: bool,
prelude_location: &Utf8Path,
) -> Result<(), Error> {
let mut written = HashSet::new();
let typescript = if typescript {
TypeScriptDeclarations::Emit
} else {
TypeScriptDeclarations::None
};
JavaScript::new(&self.out, typescript, prelude_location, &self.root).render(
&self.io,
modules,
self.stdlib_package(),
)?;
if self.copy_native_files {
self.copy_project_native_files(&self.out, &mut written)?;
} else {
tracing::debug!("skipping_native_file_copying");
}
Ok(())
}
fn render_erlang_entrypoint_module(
&mut self,
out: &Utf8Path,
modules_to_compile: &mut HashSet<Utf8PathBuf>,
) -> Result<(), Error> {
let name = format!("{name}@@main.erl", name = self.config.name);
let path = out.join(&name);
// If the entrypoint module has already been created then we don't need
// to write and compile it again.
if self.io.is_file(&path) {
tracing::debug!("erlang_entrypoint_already_exists");
return Ok(());
}
let template = ErlangEntrypointModule {
application: &self.config.name,
};
let module = template.render().expect("Erlang entrypoint rendering");
self.io.write(&path, &module)?;
let _ = modules_to_compile.insert(name.into());
tracing::debug!("erlang_entrypoint_written");
Ok(())
}
fn emit_warnings(
&self,
warnings: &WarningEmitter,
module: &type_::ModuleInterface,
) -> Result<()> {
for warning in &module.warnings {
let src = self.io.read(&module.src_path)?;
warnings.emit(Warning::Type {
path: module.src_path.clone(),
src: src.into(),
warning: warning.clone(),
});
}
Ok(())
}
fn stdlib_package(&self) -> StdlibPackage {
if self.config.dependencies.contains_key("gleam_stdlib")
|| self.config.dev_dependencies.contains_key("gleam_stdlib")
{
StdlibPackage::Present
} else {
StdlibPackage::Missing
}
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum StdlibPackage {
Present,
Missing,
}
fn analyse(
package_config: &PackageConfig,
target: Target,
mode: Mode,
ids: &UniqueIdGenerator,
mut parsed_modules: Vec<UncompiledModule>,
module_types: &mut im::HashMap<EcoString, type_::ModuleInterface>,
warnings: &WarningEmitter,
target_support: TargetSupport,
incomplete_modules: &mut HashSet<EcoString>,
) -> Outcome<Vec<Module>, Error> {
let mut modules = Vec::with_capacity(parsed_modules.len() + 1);
let direct_dependencies = package_config.dependencies_for(mode).expect("Package deps");
let dev_dependencies = package_config.dev_dependencies.keys().cloned().collect();
// Insert the prelude
// DUPE: preludeinsertion
// TODO: Currently we do this here and also in the tests. It would be better
// to have one place where we create all this required state for use in each
// place.
let _ = module_types.insert(PRELUDE_MODULE_NAME.into(), type_::build_prelude(ids));
for UncompiledModule {
name,
code,
ast,
path,
mtime,
origin,
package,
dependencies,
extra,
} in parsed_modules
{
tracing::debug!(module = ?name, "Type checking");
let line_numbers = LineNumbers::new(&code);
let analysis = crate::analyse::ModuleAnalyzerConstructor {
target,
ids,
origin,
importable_modules: module_types,
warnings: &TypeWarningEmitter::new(path.clone(), code.clone(), warnings.clone()),
direct_dependencies: &direct_dependencies,
dev_dependencies: &dev_dependencies,
target_support,
package_config,
}
.infer_module(ast, line_numbers, path.clone());
match analysis {
Outcome::Ok(ast) => {
// Module has compiled successfully. Make sure it isn't marked as incomplete.
let _ = incomplete_modules.remove(&name.clone());
let mut module = Module {
dependencies,
origin,
extra,
mtime,
name,
code,
ast,
input_path: path,
};
module.attach_doc_and_module_comments();
// Register the types from this module so they can be imported into
// other modules.
let _ = module_types.insert(module.name.clone(), module.ast.type_info.clone());
// Check for empty modules and emit warning
// Only emit the empty module warning if the module has no definitions at all.
// Modules with only private definitions already emit their own warnings.
if module_types
.get(&module.name)
.map(|interface| interface.values.is_empty() && interface.types.is_empty())
.unwrap_or(false)
{
warnings.emit(crate::warning::Warning::EmptyModule {
path: module.input_path.clone(),
name: module.name.clone(),
});
}
// Register the successfully type checked module data so that it can be
// used for code generation and in the language server.
modules.push(module);
}
Outcome::PartialFailure(ast, errors) => {
let error = Error::Type {
names: Box::new(ast.names.clone()),
path: path.clone(),
src: code.clone(),
errors,
};
// Mark as incomplete so that this module isn't reloaded from cache.
let _ = incomplete_modules.insert(name.clone());
// Register the partially type checked module data so that it can be
// used in the language server.
modules.push(Module {
dependencies,
origin,
extra,
mtime,
name,
code,
ast,
input_path: path,
});
// WARNING: This cannot be used for code generation as the code has errors.
return Outcome::PartialFailure(modules, error);
}
Outcome::TotalFailure(errors) => {
return Outcome::TotalFailure(Error::Type {
names: Default::default(),
path: path.clone(),
src: code.clone(),
errors,
});
}
};
}
Outcome::Ok(modules)
}
#[derive(Debug)]
pub(crate) enum Input {
New(UncompiledModule),
Cached(CachedModule),
}
impl Input {
pub fn name(&self) -> &EcoString {
match self {
Input::New(m) => &m.name,
Input::Cached(m) => &m.name,
}
}
pub fn source_path(&self) -> &Utf8Path {
match self {
Input::New(m) => &m.path,
Input::Cached(m) => &m.source_path,
}
}
pub fn dependencies(&self) -> Vec<EcoString> {
match self {
Input::New(m) => m.dependencies.iter().map(|(n, _)| n.clone()).collect(),
Input::Cached(m) => m.dependencies.iter().map(|(n, _)| n.clone()).collect(),
}
}
/// Returns `true` if the input is [`New`].
///
/// [`New`]: Input::New
#[must_use]
pub(crate) fn is_new(&self) -> bool {
matches!(self, Self::New(..))
}
/// Returns `true` if the input is [`Cached`].
///
/// [`Cached`]: Input::Cached
#[must_use]
pub(crate) fn is_cached(&self) -> bool {
matches!(self, Self::Cached(..))
}
}
#[derive(Debug)]
pub(crate) struct CachedModule {
pub name: EcoString,
pub origin: Origin,
pub dependencies: Vec<(EcoString, SrcSpan)>,
pub source_path: Utf8PathBuf,
pub line_numbers: LineNumbers,
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
pub(crate) struct CacheMetadata {
pub mtime: SystemTime,
pub codegen_performed: bool,
pub dependencies: Vec<(EcoString, SrcSpan)>,
pub fingerprint: SourceFingerprint,
pub line_numbers: LineNumbers,
}
impl CacheMetadata {
pub fn to_binary(&self) -> Vec<u8> {
bincode::serde::encode_to_vec(self, bincode::config::legacy())
.expect("Serializing cache info")
}
pub fn from_binary(bytes: &[u8]) -> Result<Self, String> {
match bincode::serde::decode_from_slice(bytes, bincode::config::legacy()) {
Ok((data, _)) => Ok(data),
Err(e) => Err(e.to_string()),
}
}
}
#[derive(Debug, Default, PartialEq, Eq)]
pub(crate) struct Loaded {
pub to_compile: Vec<UncompiledModule>,
pub cached: Vec<type_::ModuleInterface>,
}
impl Loaded {
fn empty() -> Self {
Self {
to_compile: vec![],
cached: vec![],
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct UncompiledModule {
pub path: Utf8PathBuf,
pub name: EcoString,
pub code: EcoString,
pub mtime: SystemTime,
pub origin: Origin,
pub package: EcoString,
pub dependencies: Vec<(EcoString, SrcSpan)>,
pub ast: UntypedModule,
pub extra: ModuleExtra,
}
#[derive(Template)]
#[template(path = "gleam@@main.erl", escape = "none")]
struct ErlangEntrypointModule<'a> {
application: &'a str,
}
#[derive(Debug, Clone, Copy)]
pub enum CachedWarnings {
Use,
Ignore,
}
impl CachedWarnings {
pub(crate) fn should_use(&self) -> bool {
match self {
CachedWarnings::Use => true,
CachedWarnings::Ignore => false,
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum CheckModuleConflicts {
Check,
DoNotCheck,
}
impl CheckModuleConflicts {
pub(crate) fn should_check(&self) -> bool {
match self {
CheckModuleConflicts::Check => true,
CheckModuleConflicts::DoNotCheck => false,
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/project_compiler.rs | compiler-core/src/build/project_compiler.rs | use crate::{
Error, Result, Warning,
analyse::TargetSupport,
build::{
Mode, Module, Origin, Package, Target,
package_compiler::{self, PackageCompiler},
package_loader::StaleTracker,
project_compiler,
telemetry::Telemetry,
},
codegen::{self, ErlangApp},
config::PackageConfig,
dep_tree,
error::{FileIoAction, FileKind, ShellCommandFailureReason},
io::{BeamCompiler, Command, CommandExecutor, FileSystemReader, FileSystemWriter, Stdio},
manifest::{ManifestPackage, ManifestPackageSource},
metadata,
paths::{self, ProjectPaths},
type_::{self, ModuleFunction},
uid::UniqueIdGenerator,
version::COMPILER_VERSION,
warning::{self, WarningEmitter, WarningEmitterIO},
};
use ecow::EcoString;
use hexpm::version::Version;
use itertools::Itertools;
use pubgrub::Range;
use std::{
cmp,
collections::{HashMap, HashSet},
fmt::Write,
io::BufReader,
rc::Rc,
sync::Arc,
time::Instant,
};
use super::{
Codegen, Compile, ErlangAppCodegenConfiguration, Outcome,
elixir_libraries::ElixirLibraries,
package_compiler::{CachedWarnings, CheckModuleConflicts, Compiled},
};
use camino::{Utf8Path, Utf8PathBuf};
// On Windows we have to call rebar3 via a little wrapper script.
//
#[cfg(not(target_os = "windows"))]
const REBAR_EXECUTABLE: &str = "rebar3";
#[cfg(target_os = "windows")]
const REBAR_EXECUTABLE: &str = "rebar3.cmd";
#[cfg(not(target_os = "windows"))]
const ELIXIR_EXECUTABLE: &str = "elixir";
#[cfg(target_os = "windows")]
const ELIXIR_EXECUTABLE: &str = "elixir.bat";
#[derive(Debug)]
pub struct Options {
pub mode: Mode,
pub target: Option<Target>,
pub compile: Compile,
pub codegen: Codegen,
pub warnings_as_errors: bool,
pub root_target_support: TargetSupport,
pub no_print_progress: bool,
}
#[derive(Debug)]
pub struct Built {
pub root_package: Package,
pub module_interfaces: im::HashMap<EcoString, type_::ModuleInterface>,
compiled_dependency_modules: Vec<Module>,
}
impl Built {
pub fn get_main_function(
&self,
module: &EcoString,
target: Target,
) -> Result<ModuleFunction, Error> {
match self.module_interfaces.get(module) {
Some(module_data) => module_data.get_main_function(target),
None => Err(Error::ModuleDoesNotExist {
module: module.clone(),
suggestion: None,
}),
}
}
pub fn minimum_required_version(&self) -> Version {
self.module_interfaces
.values()
.map(|interface| &interface.minimum_required_version)
.reduce(|one_version, other_version| cmp::max(one_version, other_version))
.map(|minimum_required_version| minimum_required_version.clone())
.unwrap_or(Version::new(0, 1, 0))
}
}
#[derive(Debug)]
pub struct ProjectCompiler<IO> {
// The gleam.toml config for the root package of the project
pub config: PackageConfig,
pub packages: HashMap<String, ManifestPackage>,
importable_modules: im::HashMap<EcoString, type_::ModuleInterface>,
pub(crate) defined_modules: im::HashMap<EcoString, Utf8PathBuf>,
stale_modules: StaleTracker,
/// The set of modules that have had partial compilation done since the last
/// successful compilation.
incomplete_modules: HashSet<EcoString>,
warnings: WarningEmitter,
telemetry: &'static dyn Telemetry,
options: Options,
paths: ProjectPaths,
ids: UniqueIdGenerator,
pub io: IO,
/// We may want to silence subprocess stdout if we are running in LSP mode.
/// The language server talks over stdio so printing would break that.
pub subprocess_stdio: Stdio,
}
// TODO: test that tests cannot be imported into src
// TODO: test that dep cycles are not allowed between packages
impl<IO> ProjectCompiler<IO>
where
IO: CommandExecutor + FileSystemWriter + FileSystemReader + BeamCompiler + Clone,
{
pub fn new(
config: PackageConfig,
options: Options,
packages: Vec<ManifestPackage>,
telemetry: &'static dyn Telemetry,
warning_emitter: Rc<dyn WarningEmitterIO>,
paths: ProjectPaths,
io: IO,
) -> Self {
let packages = packages
.into_iter()
.map(|p| (p.name.to_string(), p))
.collect();
Self {
importable_modules: im::HashMap::new(),
defined_modules: im::HashMap::new(),
stale_modules: StaleTracker::default(),
incomplete_modules: HashSet::new(),
ids: UniqueIdGenerator::new(),
warnings: WarningEmitter::new(warning_emitter),
subprocess_stdio: Stdio::Inherit,
telemetry,
packages,
options,
config,
paths,
io,
}
}
pub fn mode(&self) -> Mode {
self.options.mode
}
pub fn target(&self) -> Target {
self.options.target.unwrap_or(self.config.target)
}
pub fn reset_state_for_new_compile_run(&mut self) {
// We make sure the stale module tracker is empty before we start, to
// avoid mistakenly thinking a module is stale due to outdated state
// from a previous build. A ProjectCompiler instance is re-used by the
// LSP engine so state could be reused if we don't reset it.
self.stale_modules.empty();
}
/// Compiles all packages in the project and returns the compiled
/// information from the root package
pub fn compile(mut self) -> Result<Built> {
self.reset_state_for_new_compile_run();
// Each package may specify a Gleam version that it supports, so we
// verify that this version is appropriate.
self.check_gleam_version()?;
// The JavaScript target requires a prelude module to be written.
self.write_prelude()?;
// Dependencies are compiled first.
let compiled_dependency_modules = self.compile_dependencies()?;
// We reset the warning count as we don't want to fail the build if a
// dependency has warnings, only if the root package does.
self.warnings.reset_count();
let root_package = self.compile_root_package().into_result()?;
// TODO: test
if self.options.warnings_as_errors && self.warnings.count() > 0 {
return Err(Error::ForbiddenWarnings {
count: self.warnings.count(),
});
}
Ok(Built {
root_package,
module_interfaces: self.importable_modules,
compiled_dependency_modules,
})
}
pub fn compile_root_package(&mut self) -> Outcome<Package, Error> {
let config = self.config.clone();
self.compile_gleam_package(&config, true, self.paths.root().to_path_buf())
.map(
|Compiled {
modules,
cached_module_names,
}| Package {
config,
modules,
cached_module_names,
},
)
}
/// Checks that version file found in the build directory matches the
/// current version of gleam. If not, we will clear the build directory
/// before continuing. This will ensure that upgrading gleam will not leave
/// one with confusing or hard to debug states.
pub fn check_gleam_version(&self) -> Result<(), Error> {
let build_path = self
.paths
.build_directory_for_target(self.mode(), self.target());
let version_path = self.paths.build_gleam_version(self.mode(), self.target());
if self.io.is_file(&version_path) {
let version = self.io.read(&version_path)?;
if version == COMPILER_VERSION {
return Ok(());
}
}
// Either file is missing our the versions do not match. Time to rebuild
tracing::info!("removing_build_state_from_different_gleam_version");
self.io.delete_directory(&build_path)?;
// Recreate build directory with new updated version file
self.io.mkdir(&build_path)?;
self.io
.write(&version_path, COMPILER_VERSION)
.map_err(|e| Error::FileIo {
action: FileIoAction::WriteTo,
kind: FileKind::File,
path: version_path,
err: Some(e.to_string()),
})
}
pub fn compile_dependencies(&mut self) -> Result<Vec<Module>, Error> {
assert!(
self.stale_modules.is_empty(),
"The project compiler stale tracker was not emptied from the previous compilation"
);
let sequence = order_packages(&self.packages)?;
let mut modules = vec![];
for name in sequence {
let compiled = self.load_cache_or_compile_package(&name)?;
modules.extend(compiled);
}
Ok(modules)
}
fn write_prelude(&self) -> Result<()> {
// Only the JavaScript target has a prelude to write.
if !self.target().is_javascript() {
return Ok(());
}
let build = self
.paths
.build_directory_for_target(self.mode(), self.target());
// Write the JavaScript prelude
let path = build.join("prelude.mjs");
if !self.io.is_file(&path) {
self.io.write(&path, crate::javascript::PRELUDE)?;
}
// Write the TypeScript prelude, if asked for
if self.config.javascript.typescript_declarations {
let path = build.join("prelude.d.mts");
if !self.io.is_file(&path) {
self.io.write(&path, crate::javascript::PRELUDE_TS_DEF)?;
}
}
Ok(())
}
fn load_cache_or_compile_package(&mut self, name: &str) -> Result<Vec<Module>, Error> {
// TODO: We could remove this clone if we split out the compilation of
// packages into their own classes and then only mutate self after we no
// longer need to have the package borrowed from self.packages.
let package = self.packages.get(name).expect("Missing package").clone();
let result = match usable_build_tools(&package)?.as_slice() {
&[BuildTool::Gleam] => self.compile_gleam_dep_package(&package),
&[BuildTool::Rebar3] => self.compile_rebar3_dep_package(&package).map(|_| vec![]),
&[BuildTool::Mix] => self.compile_mix_dep_package(&package).map(|_| vec![]),
&[BuildTool::Mix, BuildTool::Rebar3] => self
.compile_mix_dep_package(&package)
.or_else(|_| self.compile_rebar3_dep_package(&package))
.map(|_| vec![]),
_ => {
return Err(Error::UnsupportedBuildTool {
package: package.name.to_string(),
build_tools: package.build_tools.clone(),
});
}
};
// TODO: test. This one is not covered by the integration tests.
if result.is_err() {
tracing::debug!(package=%name, "removing_failed_build");
let path = self.paths.build_directory_for_package(
self.mode(),
self.target(),
package.application_name(),
);
self.io.delete_directory(&path)?;
}
result
}
// TODO: extract and unit test
fn compile_rebar3_dep_package(&mut self, package: &ManifestPackage) -> Result<(), Error> {
let application_name = package.application_name();
let package_name = &package.name;
let mode = self.mode();
let target = self.target();
let package_build = self
.paths
.build_directory_for_package(mode, target, application_name);
// TODO: test
if self.io.is_directory(&package_build) {
tracing::debug!(%package_name, "using_precompiled_rebar3_package");
return Ok(());
}
// TODO: test
if !self.options.codegen.should_codegen(false) {
tracing::debug!(%package_name, "skipping_rebar3_build_as_codegen_disabled");
return Ok(());
}
// TODO: test
if target != Target::Erlang {
tracing::debug!(%package_name, "skipping_rebar3_build_for_non_erlang_target");
return Ok(());
}
// Print that work is being done
self.telemetry.compiling_package(package_name);
let package = self.paths.build_packages_package(package_name);
let build_packages = self.paths.build_directory_for_target(mode, target);
let ebins = self.paths.build_packages_ebins_glob(mode, target);
let rebar3_path = |path: &Utf8Path| format!("../{}", path);
tracing::debug!("copying_package_to_build");
self.io.mkdir(&package_build)?;
self.io.copy_dir(&package, &package_build)?;
let env = vec![
("ERL_LIBS".to_string(), "../*/ebin".to_string()),
(
"REBAR_BARE_COMPILER_OUTPUT_DIR".to_string(),
package_build.to_string(),
),
("REBAR_PROFILE".to_string(), "prod".to_string()),
("REBAR_SKIP_PROJECT_PLUGINS".to_string(), "true".to_string()),
("TERM".to_string(), "dumb".to_string()),
];
let args = vec![
"bare".into(),
"compile".into(),
"--paths".into(),
"../*/ebin".into(),
];
let status = self.io.exec(Command {
program: REBAR_EXECUTABLE.into(),
args,
env,
cwd: Some(package_build),
stdio: self.subprocess_stdio,
})?;
if status == 0 {
Ok(())
} else {
Err(Error::ShellCommand {
program: "rebar3".into(),
reason: ShellCommandFailureReason::Unknown,
})
}
}
fn compile_mix_dep_package(&mut self, package: &ManifestPackage) -> Result<(), Error> {
let application_name = package.application_name();
let package_name = &package.name;
let mode = self.mode();
let target = self.target();
let mix_target = "prod";
let dest = self
.paths
.build_directory_for_package(mode, target, application_name);
// TODO: test
if self.io.is_directory(&dest) {
tracing::debug!(%package_name, "using_precompiled_mix_package");
return Ok(());
}
// TODO: test
if !self.options.codegen.should_codegen(false) {
tracing::debug!(%package_name, "skipping_mix_build_as_codegen_disabled");
return Ok(());
}
// TODO: test
if target != Target::Erlang {
tracing::debug!(%package_name, "skipping_mix_build_for_non_erlang_target");
return Ok(());
}
// Print that work is being done
self.telemetry.compiling_package(package_name);
let build_dir = self.paths.build_directory_for_target(mode, target);
let project_dir = self.paths.build_packages_package(package_name);
let mix_build_dir = project_dir.join("_build").join(mix_target);
let mix_build_lib_dir = mix_build_dir.join("lib");
let up = paths::unnest(&project_dir);
let mix_path = |path: &Utf8Path| up.join(path).to_string();
let ebins = self.paths.build_packages_ebins_glob(mode, target);
// Elixir core libs must be loaded
ElixirLibraries::make_available(&self.io, &build_dir, self.subprocess_stdio)?;
// Prevent Mix.Compilers.ApplicationTracer warnings
// mix would make this if it didn't exist, but we make it anyway as
// we need to link the compiled dependencies into there
self.io.mkdir(&mix_build_lib_dir)?;
let deps = &package.requirements;
for dep in deps {
// TODO: unit test
let dep_source = build_dir.join(dep.as_str());
let dep_dest = mix_build_lib_dir.join(dep.as_str());
if self.io.is_directory(&dep_source) && !self.io.is_directory(&dep_dest) {
tracing::debug!("linking_{}_to_build", dep);
self.io.symlink_dir(&dep_source, &dep_dest)?;
}
}
let env = vec![
("MIX_BUILD_PATH".to_string(), mix_path(&mix_build_dir)),
("MIX_ENV".to_string(), mix_target.to_string()),
("MIX_QUIET".to_string(), "1".to_string()),
("TERM".to_string(), "dumb".to_string()),
];
let args = vec![
"-pa".to_string(),
mix_path(&ebins),
"-S".to_string(),
"mix".to_string(),
"compile".to_string(),
"--no-deps-check".to_string(),
"--no-load-deps".to_string(),
"--no-protocol-consolidation".to_string(),
];
let status = self.io.exec(Command {
program: ELIXIR_EXECUTABLE.into(),
args,
env,
cwd: Some(project_dir),
stdio: self.subprocess_stdio,
})?;
if status == 0 {
// TODO: unit test
let source = mix_build_dir.join("lib").join(application_name.as_str());
if self.io.is_directory(&source) && !self.io.is_directory(&dest) {
tracing::debug!("linking_{}_to_build", application_name);
self.io.symlink_dir(&source, &dest)?;
}
Ok(())
} else {
Err(Error::ShellCommand {
program: "mix".into(),
reason: ShellCommandFailureReason::Unknown,
})
}
}
fn compile_gleam_dep_package(
&mut self,
package: &ManifestPackage,
) -> Result<Vec<Module>, Error> {
// TODO: Test
let package_root = match &package.source {
// If the path is relative it is relative to the root of the
// project, not to the current working directory. The language server
// could have the working directory and the project root in different
// places.
ManifestPackageSource::Local { path } if path.is_relative() => {
self.io.canonicalise(&self.paths.root().join(path))?
}
// If the path is absolute we can use it as-is.
ManifestPackageSource::Local { path } => path.clone(),
// Hex and Git packages are downloaded into the project's build
// directory.
ManifestPackageSource::Git { .. } | ManifestPackageSource::Hex { .. } => {
self.paths.build_packages_package(&package.name)
}
};
let config_path = package_root.join("gleam.toml");
let config = PackageConfig::read(config_path, &self.io)?;
self.compile_gleam_package(&config, false, package_root)
.into_result()
.map(|compiled| compiled.modules)
}
fn compile_gleam_package(
&mut self,
config: &PackageConfig,
is_root: bool,
root_path: Utf8PathBuf,
) -> Outcome<Compiled, Error> {
let out_path =
self.paths
.build_directory_for_package(self.mode(), self.target(), &config.name);
let lib_path = self
.paths
.build_directory_for_target(self.mode(), self.target());
let mode = if is_root { self.mode() } else { Mode::Prod };
let target = match self.target() {
Target::Erlang => {
let package_name_overrides = self
.packages
.values()
.flat_map(|p| {
let overriden = p.otp_app.as_ref()?;
Some((p.name.clone(), overriden.clone()))
})
.collect();
super::TargetCodegenConfiguration::Erlang {
app_file: Some(ErlangAppCodegenConfiguration {
include_dev_deps: is_root && self.mode().includes_dev_dependencies(),
package_name_overrides,
}),
}
}
Target::JavaScript => super::TargetCodegenConfiguration::JavaScript {
emit_typescript_definitions: self.config.javascript.typescript_declarations,
// This path is relative to each package output directory
prelude_location: Utf8PathBuf::from("../prelude.mjs"),
},
};
let mut compiler = PackageCompiler::new(
config,
mode,
&root_path,
&out_path,
&lib_path,
&target,
self.ids.clone(),
self.io.clone(),
);
compiler.write_metadata = true;
compiler.write_entrypoint = is_root;
compiler.perform_codegen = self.options.codegen.should_codegen(is_root);
compiler.compile_beam_bytecode = self.options.codegen.should_codegen(is_root);
compiler.compile_modules = !(self.options.compile == Compile::DepsOnly && is_root);
compiler.subprocess_stdio = self.subprocess_stdio;
compiler.target_support = if is_root {
// When compiling the root package it is context specific as to whether we need to
// enforce that all functions have an implementation for the current target.
// Typically we do, but if we are using `gleam run -m $module` to run a module that
// belongs to a dependency we don't need to enforce this as we don't want to fail
// compilation. It's impossible for a dependecy module to call functions from the root
// package, so it's OK if they could not be compiled.
self.options.root_target_support
} else {
// When compiling dependencies we don't enforce that all functions have an
// implementation for the current target. It is OK if they have APIs that are
// unaccessible so long as they are not used by the root package.
TargetSupport::NotEnforced
};
if is_root {
compiler.cached_warnings = CachedWarnings::Use;
// We only check for conflicting Gleam files if this is the root
// package, since Hex packages are bundled with the Gleam source files
// and compiled Erlang files next to each other.
compiler.check_module_conflicts = CheckModuleConflicts::Check;
} else {
compiler.cached_warnings = CachedWarnings::Ignore;
compiler.check_module_conflicts = CheckModuleConflicts::DoNotCheck;
};
// Compile project to Erlang or JavaScript source code
compiler.compile(
&mut self.warnings,
&mut self.importable_modules,
&mut self.defined_modules,
&mut self.stale_modules,
&mut self.incomplete_modules,
self.telemetry,
)
}
}
impl<IO> ProjectCompiler<IO> {
pub fn get_importable_modules(&self) -> &im::HashMap<EcoString, type_::ModuleInterface> {
&self.importable_modules
}
}
fn order_packages(packages: &HashMap<String, ManifestPackage>) -> Result<Vec<EcoString>, Error> {
dep_tree::toposort_deps(
packages
.values()
// Making sure that the package order is deterministic, to prevent different
// compilations of the same project compiling in different orders. This could impact
// any bugged outcomes, though not any where the compiler is working correctly, so it's
// mostly to aid debugging.
.sorted_by(|a, b| a.name.cmp(&b.name))
.map(|package| {
(
package.name.as_str().into(),
package
.requirements
.iter()
.map(|r| EcoString::from(r.as_ref()))
.collect(),
)
})
.collect(),
)
.map_err(convert_deps_tree_error)
}
fn convert_deps_tree_error(e: dep_tree::Error) -> Error {
match e {
dep_tree::Error::Cycle(packages) => Error::PackageCycle { packages },
}
}
#[derive(Debug, PartialEq, Clone, Copy)]
pub(crate) enum BuildTool {
Gleam,
Rebar3,
Mix,
}
/// Determine the build tool we should use to build this package
pub(crate) fn usable_build_tools(package: &ManifestPackage) -> Result<Vec<BuildTool>, Error> {
let mut rebar3_present = false;
let mut mix_present = false;
for tool in &package.build_tools {
match tool.as_str() {
"gleam" => return Ok(vec![BuildTool::Gleam]),
"rebar" => rebar3_present = true,
"rebar3" => rebar3_present = true,
"mix" => mix_present = true,
_ => (),
}
}
if mix_present && rebar3_present {
return Ok(vec![BuildTool::Mix, BuildTool::Rebar3]);
} else if mix_present {
return Ok(vec![BuildTool::Mix]);
} else if rebar3_present {
return Ok(vec![BuildTool::Rebar3]);
}
Err(Error::UnsupportedBuildTool {
package: package.name.to_string(),
build_tools: package.build_tools.clone(),
})
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/module_loader.rs | compiler-core/src/build/module_loader.rs | #[cfg(test)]
mod tests;
use std::{collections::HashSet, time::SystemTime};
use camino::{Utf8Path, Utf8PathBuf};
use ecow::EcoString;
use serde::{Deserialize, Serialize};
use super::{
Mode, Origin, SourceFingerprint, Target,
package_compiler::{CacheMetadata, CachedModule, Input, UncompiledModule},
package_loader::{CodegenRequired, GleamFile},
};
use crate::{
Error, Result,
error::{FileIoAction, FileKind},
io::{CommandExecutor, FileSystemReader, FileSystemWriter},
warning::{TypeWarningEmitter, WarningEmitter},
};
#[derive(Debug)]
pub(crate) struct ModuleLoader<'a, IO> {
pub io: IO,
pub warnings: &'a WarningEmitter,
pub mode: Mode,
pub target: Target,
pub codegen: CodegenRequired,
pub package_name: &'a EcoString,
pub artefact_directory: &'a Utf8Path,
pub origin: Origin,
/// The set of modules that have had partial compilation done since the last
/// successful compilation.
pub incomplete_modules: &'a HashSet<EcoString>,
}
impl<'a, IO> ModuleLoader<'a, IO>
where
IO: FileSystemReader + FileSystemWriter + CommandExecutor + Clone,
{
/// Load a module from the given path.
///
/// If the module has been compiled before and the source file has not been
/// changed since then, load the precompiled data instead.
///
/// Whether the module has changed or not is determined by comparing the
/// modification time of the source file with the value recorded in the
/// `.timestamp` file in the artefact directory.
pub fn load(&self, file: GleamFile) -> Result<Input> {
let name = file.module_name.clone();
let source_mtime = self.io.modification_time(&file.path)?;
let read_source = |name| self.read_source(file.path.clone(), name, source_mtime);
let meta = match self.read_cache_metadata(&file)? {
Some(meta) => meta,
None => return read_source(name).map(Input::New),
};
// The cache currently does not contain enough data to perform codegen,
// so if codegen is required in this compiler run then we must check
// that codegen has already been performed before using a cache.
if self.codegen.is_required() && !meta.codegen_performed {
tracing::debug!(?name, "codegen_required_cache_insufficient");
return read_source(name).map(Input::New);
}
// If the timestamp of the source is newer than the cache entry and
// the hash of the source differs from the one in the cache entry,
// then we need to recompile.
if meta.mtime < source_mtime {
let source_module = read_source(name.clone())?;
if meta.fingerprint != SourceFingerprint::new(&source_module.code) {
tracing::debug!(?name, "cache_stale");
return Ok(Input::New(source_module));
} else if self.mode == Mode::Lsp && self.incomplete_modules.contains(&name) {
// Since the lsp can have valid but incorrect intermediate code states between
// successful compilations, we need to invalidate the cache even if the fingerprint matches
tracing::debug!(?name, "cache_stale for lsp");
return Ok(Input::New(source_module));
}
}
Ok(Input::Cached(self.cached(file, meta)))
}
/// Read the cache metadata file from the artefact directory for the given
/// source file. If the file does not exist, return `None`.
fn read_cache_metadata(&self, source_file: &GleamFile) -> Result<Option<CacheMetadata>> {
let meta_path = source_file.cache_files(&self.artefact_directory).meta_path;
if !self.io.is_file(&meta_path) {
return Ok(None);
}
let binary = self.io.read_bytes(&meta_path)?;
let cache_metadata = CacheMetadata::from_binary(&binary).map_err(|e| -> Error {
Error::FileIo {
action: FileIoAction::Parse,
kind: FileKind::File,
path: meta_path,
err: Some(e),
}
})?;
Ok(Some(cache_metadata))
}
fn read_source(
&self,
path: Utf8PathBuf,
name: EcoString,
mtime: SystemTime,
) -> Result<UncompiledModule, Error> {
read_source(
self.io.clone(),
self.target,
self.origin,
path,
name,
self.package_name.clone(),
mtime,
self.warnings.clone(),
)
}
fn cached(&self, file: GleamFile, meta: CacheMetadata) -> CachedModule {
CachedModule {
dependencies: meta.dependencies,
source_path: file.path,
origin: self.origin,
name: file.module_name,
line_numbers: meta.line_numbers,
}
}
}
pub(crate) fn read_source<IO>(
io: IO,
target: Target,
origin: Origin,
path: Utf8PathBuf,
name: EcoString,
package_name: EcoString,
mtime: SystemTime,
emitter: WarningEmitter,
) -> Result<UncompiledModule>
where
IO: FileSystemReader + FileSystemWriter + CommandExecutor + Clone,
{
let code: EcoString = io.read(&path)?.into();
let parsed = crate::parse::parse_module(path.clone(), &code, &emitter).map_err(|error| {
Error::Parse {
path: path.clone(),
src: code.clone(),
error: Box::new(error),
}
})?;
let mut ast = parsed.module;
let extra = parsed.extra;
let dependencies = ast.dependencies(target);
ast.name = name.clone();
let module = UncompiledModule {
package: package_name,
dependencies,
origin,
extra,
mtime,
path,
name,
code,
ast,
};
Ok(module)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/native_file_copier.rs | compiler-core/src/build/native_file_copier.rs | #[cfg(test)]
mod tests;
use std::collections::{HashMap, HashSet};
use camino::{Utf8Path, Utf8PathBuf};
use ecow::{EcoString, eco_format};
use crate::{
Error, Result,
io::{DirWalker, FileSystemReader, FileSystemWriter},
paths::ProjectPaths,
};
use super::package_compiler::CheckModuleConflicts;
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct CopiedNativeFiles {
pub any_elixir: bool,
pub to_compile: Vec<Utf8PathBuf>,
}
pub(crate) struct NativeFileCopier<'a, IO> {
io: IO,
paths: ProjectPaths,
destination_dir: &'a Utf8Path,
seen_native_files: HashSet<Utf8PathBuf>,
seen_modules: HashMap<EcoString, Utf8PathBuf>,
to_compile: Vec<Utf8PathBuf>,
elixir_files_copied: bool,
check_module_conflicts: CheckModuleConflicts,
}
impl<'a, IO> NativeFileCopier<'a, IO>
where
IO: FileSystemReader + FileSystemWriter + Clone,
{
pub(crate) fn new(
io: IO,
root: &'a Utf8Path,
out: &'a Utf8Path,
check_module_conflicts: CheckModuleConflicts,
) -> Self {
Self {
io,
paths: ProjectPaths::new(root.into()),
destination_dir: out,
to_compile: Vec::new(),
seen_native_files: HashSet::new(),
seen_modules: HashMap::new(),
elixir_files_copied: false,
check_module_conflicts,
}
}
/// Copy native files from the given directory to the build directory.
///
/// Errors if any duplicate files are found.
///
/// Returns a list of files that need to be compiled (Elixir and Erlang).
///
pub fn run(mut self) -> Result<CopiedNativeFiles> {
self.io.mkdir(&self.destination_dir)?;
let src = self.paths.src_directory();
self.copy_files(&src)?;
let test = self.paths.test_directory();
if self.io.is_directory(&test) {
self.copy_files(&test)?;
}
let dev = self.paths.dev_directory();
if self.io.is_directory(&dev) {
self.copy_files(&dev)?;
}
// Sort for deterministic output
self.to_compile.sort_unstable();
Ok(CopiedNativeFiles {
to_compile: self.to_compile,
any_elixir: self.elixir_files_copied,
})
}
fn copy_files(&mut self, src_root: &Utf8Path) -> Result<()> {
let mut dir_walker = DirWalker::new(src_root.to_path_buf());
while let Some(path) = dir_walker.next_file(&self.io)? {
self.copy(path, &src_root)?;
}
Ok(())
}
fn copy(&mut self, file: Utf8PathBuf, src_root: &Utf8Path) -> Result<()> {
let extension = file.extension().unwrap_or_default();
let relative_path = file
.strip_prefix(src_root)
.expect("copy_native_files strip prefix")
.to_path_buf();
// No need to run duplicate native file checks for .gleam files, but we
// still need to check for conflicting `.gleam` and `.mjs` files, so we
// add a special case for `.gleam`.
if extension == "gleam" {
self.check_for_conflicting_javascript_modules(&relative_path)?;
self.check_for_conflicting_erlang_modules(&relative_path)?;
return Ok(());
}
// Skip unknown file formats that are not supported native files
if !crate::io::is_native_file_extension(extension) {
return Ok(());
}
let destination = self.destination_dir.join(&relative_path);
// Check that this native file was not already copied
self.check_for_duplicate(&relative_path)?;
// Check for JavaScript modules conflicting between each other within
// the same relative path. We need to do this as '.gleam' files can
// also cause a conflict, despite not being native files, as they are
// compiled to `.mjs`.
self.check_for_conflicting_javascript_modules(&relative_path)?;
// Check for Erlang modules conflicting between each other anywhere in
// the tree.
self.check_for_conflicting_erlang_modules(&relative_path)?;
// If the source file's mtime is older than the destination file's mtime
// then it has not changed and as such does not need to be copied.
//
// This makes no practical difference for JavaScript etc files, but for
// Erlang and Elixir files it mean we can skip compiling them.
if self.io.is_file(&destination)
&& self.io.modification_time(&file)? <= self.io.modification_time(&destination)?
{
tracing::debug!(?file, "skipping_unchanged_native_file_unchanged");
return Ok(());
}
tracing::debug!(?file, "copying_native_file");
// Ensure destination exists (subdir might not exist yet in the output)
if let Some(parent) = destination.parent() {
self.io.mkdir(parent)?;
}
self.io.copy(&file, &destination)?;
self.elixir_files_copied = self.elixir_files_copied || extension == "ex";
// BEAM native modules need to be compiled
if matches!(extension, "erl" | "ex") {
_ = self.to_compile.push(relative_path.clone());
}
Ok(())
}
fn check_for_duplicate(&mut self, relative_path: &Utf8PathBuf) -> Result<(), Error> {
if !self.seen_native_files.insert(relative_path.clone()) {
return Err(Error::DuplicateSourceFile {
file: relative_path.to_string(),
});
}
Ok(())
}
/// Gleam files are compiled to `.mjs` files, which must not conflict with
/// an FFI `.mjs` file with the same name, so we check for this case here.
fn check_for_conflicting_javascript_modules(
&mut self,
relative_path: &Utf8PathBuf,
) -> Result<(), Error> {
let mjs_path = match relative_path.extension() {
Some("gleam") => eco_format!("{}", relative_path.with_extension("mjs")),
Some("mjs") => eco_format!("{}", relative_path),
_ => return Ok(()),
};
// Insert the full relative `.mjs` path in `seen_modules` as there is
// no conflict if two `.mjs` files have the same name but are in
// different subpaths, unlike Erlang files.
let existing = self
.seen_modules
.insert(mjs_path.clone(), relative_path.clone());
// If there was no already existing one then there's no problem.
let Some(existing) = existing else {
return Ok(());
};
let existing_is_gleam = existing.extension() == Some("gleam");
if existing_is_gleam || relative_path.extension() == Some("gleam") {
let (gleam_file, native_file) = if existing_is_gleam {
(&existing, relative_path)
} else {
(relative_path, &existing)
};
return Err(Error::ClashingGleamModuleAndNativeFileName {
module: eco_format!("{}", gleam_file.with_extension("")),
gleam_file: gleam_file.clone(),
native_file: native_file.clone(),
});
}
// The only way for two `.mjs` files to clash is by having
// the exact same path.
assert_eq!(&existing, relative_path);
return Err(Error::DuplicateSourceFile {
file: existing.to_string(),
});
}
/// Erlang module files cannot have the same name regardless of their
/// relative positions within the project. Ensure we raise an error if the
/// user attempts to create `.erl` files with the same name.
fn check_for_conflicting_erlang_modules(
&mut self,
relative_path: &Utf8PathBuf,
) -> Result<(), Error> {
let erlang_module_name = match relative_path.extension() {
Some("erl") => {
eco_format!("{}", relative_path.file_name().expect("path has file name"))
}
Some("gleam") if self.check_module_conflicts.should_check() => relative_path
.with_extension("erl")
.as_str()
.replace("/", "@")
.into(),
_ => return Ok(()),
};
// Insert just the `.erl` module filename in `seen_modules` instead of
// its full relative path, because `.erl` files with the same name
// cause a conflict when targetting Erlang regardless of subpath.
if let Some(existing) = self
.seen_modules
.insert(erlang_module_name, relative_path.clone())
{
let existing_is_gleam = existing.extension() == Some("gleam");
if existing_is_gleam || relative_path.extension() == Some("gleam") {
let (gleam_file, native_file) = if existing_is_gleam {
(&existing, relative_path)
} else {
(relative_path, &existing)
};
return Err(Error::ClashingGleamModuleAndNativeFileName {
module: eco_format!("{}", gleam_file.with_extension("")),
gleam_file: gleam_file.clone(),
native_file: native_file.clone(),
});
}
return Err(Error::DuplicateNativeErlangModule {
module: eco_format!("{}", relative_path.file_stem().expect("path has file stem")),
first: existing,
second: relative_path.clone(),
});
}
Ok(())
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/tests.rs | compiler-core/src/build/tests.rs | use crate::{Error, manifest::ManifestPackage};
use super::project_compiler::{BuildTool, usable_build_tools};
#[test]
fn usable_build_tool_unknown() {
assert_eq!(
usable_build_tools(&ManifestPackage::default().with_build_tools(&["unknown"])),
Err(Error::UnsupportedBuildTool {
package: "".into(),
build_tools: vec!["unknown".into()],
})
)
}
#[test]
fn usable_build_tool_none() {
assert_eq!(
usable_build_tools(&ManifestPackage::default()),
Err(Error::UnsupportedBuildTool {
package: "".into(),
build_tools: vec![],
})
)
}
#[test]
fn usable_build_tool_only_mix() {
assert_eq!(
usable_build_tools(&ManifestPackage::default().with_build_tools(&["mix"])),
Ok(vec![BuildTool::Mix])
)
}
#[test]
fn usable_build_tool_only_rebar3() {
assert_eq!(
usable_build_tools(&ManifestPackage::default().with_build_tools(&["rebar3"])),
Ok(vec![BuildTool::Rebar3])
)
}
#[test]
fn usable_build_tool_only_gleam() {
assert_eq!(
usable_build_tools(&ManifestPackage::default().with_build_tools(&["gleam"])),
Ok(vec![BuildTool::Gleam])
)
}
#[test]
fn usable_build_tool_mix_then_rebar3() {
assert_eq!(
usable_build_tools(&ManifestPackage::default().with_build_tools(&["mix", "rebar3"])),
Ok(vec![BuildTool::Mix, BuildTool::Rebar3])
)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/telemetry.rs | compiler-core/src/build/telemetry.rs | use std::{
fmt::Debug,
time::{Duration, Instant},
};
use crate::{
Warning,
manifest::{PackageChanges, Resolved},
};
pub trait Telemetry: Debug {
fn waiting_for_build_directory_lock(&self);
fn running(&self, name: &str);
fn resolving_package_versions(&self);
fn resolved_package_versions(&self, changes: &PackageChanges);
fn downloading_package(&self, name: &str);
fn packages_downloaded(&self, start: Instant, count: usize);
fn compiled_package(&self, duration: Duration);
fn compiling_package(&self, name: &str);
fn checked_package(&self, duration: Duration);
fn checking_package(&self, name: &str);
}
#[derive(Debug, Clone, Copy)]
pub struct NullTelemetry;
impl Telemetry for NullTelemetry {
fn waiting_for_build_directory_lock(&self) {}
fn running(&self, name: &str) {}
fn resolving_package_versions(&self) {}
fn downloading_package(&self, _name: &str) {}
fn compiled_package(&self, _duration: Duration) {}
fn compiling_package(&self, _name: &str) {}
fn checked_package(&self, _duration: Duration) {}
fn checking_package(&self, _name: &str) {}
fn packages_downloaded(&self, _start: Instant, _count: usize) {}
fn resolved_package_versions(&self, _changes: &PackageChanges) {}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/package_loader.rs | compiler-core/src/build/package_loader.rs | #[cfg(test)]
mod tests;
use std::{
collections::{HashMap, HashSet},
time::{Duration, SystemTime},
};
use camino::{Utf8Path, Utf8PathBuf};
// TODO: emit warnings for cached modules even if they are not compiled again.
use ecow::EcoString;
use itertools::Itertools;
use vec1::Vec1;
use crate::{
Error, Result,
ast::SrcSpan,
build::{Module, Origin, module_loader::ModuleLoader},
config::PackageConfig,
dep_tree,
error::{FileIoAction, FileKind, ImportCycleLocationDetails},
io::{self, CommandExecutor, FileSystemReader, FileSystemWriter, files_with_extension},
metadata,
paths::ProjectPaths,
type_,
uid::UniqueIdGenerator,
warning::WarningEmitter,
};
use super::{
Mode, Target,
module_loader::read_source,
package_compiler::{
CacheMetadata, CachedModule, CachedWarnings, Input, Loaded, UncompiledModule,
},
};
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CodegenRequired {
Yes,
No,
}
impl CodegenRequired {
/// Returns `true` if the codegen required is [`Yes`].
///
/// [`Yes`]: CodegenRequired::Yes
#[must_use]
pub fn is_required(&self) -> bool {
matches!(self, Self::Yes)
}
}
#[derive(Debug)]
pub struct PackageLoader<'a, IO> {
io: IO,
ids: UniqueIdGenerator,
mode: Mode,
paths: ProjectPaths,
warnings: &'a WarningEmitter,
codegen: CodegenRequired,
artefact_directory: &'a Utf8Path,
package_name: &'a EcoString,
target: Target,
stale_modules: &'a mut StaleTracker,
already_defined_modules: &'a mut im::HashMap<EcoString, Utf8PathBuf>,
incomplete_modules: &'a HashSet<EcoString>,
cached_warnings: CachedWarnings,
}
impl<'a, IO> PackageLoader<'a, IO>
where
IO: FileSystemWriter + FileSystemReader + CommandExecutor + Clone,
{
pub(crate) fn new(
io: IO,
ids: UniqueIdGenerator,
mode: Mode,
root: &'a Utf8Path,
cached_warnings: CachedWarnings,
warnings: &'a WarningEmitter,
codegen: CodegenRequired,
artefact_directory: &'a Utf8Path,
target: Target,
package_name: &'a EcoString,
stale_modules: &'a mut StaleTracker,
already_defined_modules: &'a mut im::HashMap<EcoString, Utf8PathBuf>,
incomplete_modules: &'a HashSet<EcoString>,
) -> Self {
Self {
io,
ids,
mode,
paths: ProjectPaths::new(root.into()),
warnings,
codegen,
target,
package_name,
cached_warnings,
artefact_directory,
stale_modules,
already_defined_modules,
incomplete_modules,
}
}
pub(crate) fn run(mut self) -> Result<Loaded> {
// First read the source files. This will use the `ModuleLoader`, which
// will check the mtimes and hashes of sources and caches to determine
// which should be loaded.
let mut inputs = self.read_sources_and_caches()?;
// Check for any removed modules, by looking at cache files that don't exist in inputs.
// Delete the cache files for removed modules and mark them as stale
// to trigger refreshing dependent modules.
for module in CacheFiles::modules_with_meta_files(&self.io, &self.artefact_directory) {
if (!inputs.contains_key(&module)) {
tracing::debug!(%module, "module_removed");
CacheFiles::new(&self.artefact_directory, &module).delete(&self.io)?;
self.stale_modules.add(module);
}
}
// Determine order in which modules are to be processed
let mut dep_location_map = HashMap::new();
let deps = inputs
.values()
.map(|m| {
let name = m.name().clone();
let _ = dep_location_map.insert(name.clone(), m);
(name, m.dependencies())
})
// Making sure that the module order is deterministic, to prevent different
// compilations of the same project compiling in different orders. This could impact
// any bugged outcomes, though not any where the compiler is working correctly, so it's
// mostly to aid debugging.
.sorted_by(|(a, _), (b, _)| a.cmp(b))
.collect();
let sequence = dep_tree::toposort_deps(deps)
.map_err(|e| self.convert_deps_tree_error(e, dep_location_map))?;
// Now that we have loaded sources and caches we check to see if any of
// the caches need to be invalidated because their dependencies have
// changed.
let mut loaded = Loaded::default();
for name in sequence {
let input = inputs
.remove(&name)
.expect("Getting parsed module for name");
match input {
// A new uncached module is to be compiled
Input::New(module) => {
tracing::debug!(module = %module.name, "new_module_to_be_compiled");
self.stale_modules.add(module.name.clone());
loaded.to_compile.push(module);
}
// A cached module with dependencies that are stale must be
// recompiled as the changes in the dependencies may have affect
// the output, making the cache invalid.
Input::Cached(info) if self.stale_modules.includes_any(&info.dependencies) => {
tracing::debug!(module = %info.name, "stale_module_to_be_compiled");
self.stale_modules.add(info.name.clone());
let module = self.load_stale_module(info)?;
loaded.to_compile.push(module);
}
// A cached module with no stale dependencies can be used as-is
// and does not need to be recompiled.
Input::Cached(info) => {
tracing::debug!(module = %info.name, "module_to_load_from_cache");
let module = self.load_cached_module(info)?;
loaded.cached.push(module);
}
}
}
Ok(loaded)
}
fn load_cached_module(&self, info: CachedModule) -> Result<type_::ModuleInterface, Error> {
let cache_files = CacheFiles::new(&self.artefact_directory, &info.name);
let bytes = self.io.read_bytes(&cache_files.cache_path)?;
let mut module = metadata::ModuleDecoder::new(self.ids.clone()).read(bytes.as_slice())?;
if self.io.exists(&cache_files.inline_path) {
let bytes = self.io.read_bytes(&cache_files.inline_path)?;
module.inline_functions =
match bincode::serde::decode_from_slice(&bytes, bincode::config::legacy()) {
Ok((data, _)) => data,
Err(e) => {
return Err(Error::FileIo {
kind: FileKind::File,
action: FileIoAction::Parse,
path: cache_files.inline_path,
err: Some(e.to_string()),
});
}
};
}
// Load warnings
if self.cached_warnings.should_use() {
let path = cache_files.warnings_path;
if self.io.exists(&path) {
let bytes = self.io.read_bytes(&path)?;
module.warnings =
match bincode::serde::decode_from_slice(&bytes, bincode::config::legacy()) {
Ok((data, _)) => data,
Err(e) => {
return Err(Error::FileIo {
kind: FileKind::File,
action: FileIoAction::Parse,
path,
err: Some(e.to_string()),
});
}
};
}
}
Ok(module)
}
fn read_sources_and_caches(&self) -> Result<HashMap<EcoString, Input>> {
let span = tracing::info_span!("load");
let _enter = span.enter();
let mut inputs = Inputs::new(self.already_defined_modules);
let src = self.paths.src_directory();
let mut loader = ModuleLoader {
io: self.io.clone(),
warnings: self.warnings,
mode: self.mode,
target: self.target,
codegen: self.codegen,
package_name: self.package_name,
artefact_directory: self.artefact_directory,
origin: Origin::Src,
incomplete_modules: self.incomplete_modules,
};
// Src
for file in GleamFile::iterate_files_in_directory(&self.io, &src) {
match file {
Ok(file) => {
let input = loader.load(file)?;
inputs.insert(input)?;
}
Err(warning) => self.warnings.emit(warning),
}
}
// Test and dev
if self.mode.includes_dev_code() {
let test = self.paths.test_directory();
loader.origin = Origin::Test;
for file in GleamFile::iterate_files_in_directory(&self.io, &test) {
match file {
Ok(file) => {
let input = loader.load(file)?;
inputs.insert(input)?;
}
Err(warning) => self.warnings.emit(warning),
}
}
let dev = self.paths.dev_directory();
loader.origin = Origin::Dev;
for file in GleamFile::iterate_files_in_directory(&self.io, &dev) {
match file {
Ok(file) => {
let input = loader.load(file)?;
inputs.insert(input)?;
}
Err(warning) => self.warnings.emit(warning),
}
}
}
// If we are compiling for Erlang then modules all live in a single
// namespace. If we were to name a module the same as a module that
// is included in the standard Erlang distribution then this new
// Gleam module would overwrite the existing Erlang one, likely
// resulting in cryptic errors.
// This would most commonly happen for modules like "user" and
// "code". Emit an error so this never happens.
if self.target.is_erlang() {
for input in inputs.collection.values() {
ensure_gleam_module_does_not_overwrite_standard_erlang_module(&input)?;
}
}
Ok(inputs.collection)
}
fn load_stale_module(&self, cached: CachedModule) -> Result<UncompiledModule> {
let mtime = self.io.modification_time(&cached.source_path)?;
// We need to delete any existing cache files for this module.
// While we figured it out this time because the module has stale dependencies,
// next time the dependencies might no longer be stale, but we still need to be able to tell
// that this module needs to be recompiled until it successfully compiles at least once.
// This can happen if the stale dependency includes breaking changes.
CacheFiles::new(&self.artefact_directory, &cached.name).delete(&self.io)?;
read_source(
self.io.clone(),
self.target,
cached.origin,
cached.source_path,
cached.name,
self.package_name.clone(),
mtime,
self.warnings.clone(),
)
}
fn convert_deps_tree_error(
&self,
e: dep_tree::Error,
dep_location_map: HashMap<EcoString, &Input>,
) -> Error {
match e {
dep_tree::Error::Cycle(modules) => {
let modules = modules
.iter()
.enumerate()
.map(|(i, module)| {
// cycles are in order of reference so get next in list or loop back to first
let index_of_imported = if i == 0 { modules.len() - 1 } else { i - 1 };
let imported_module = modules
.get(index_of_imported)
.expect("importing module must exist");
let input = dep_location_map.get(module).expect("dependency must exist");
let location = match input {
Input::New(module) => {
let (_, location) = module
.dependencies
.iter()
.find(|d| &d.0 == imported_module)
.expect("import must exist for there to be a cycle");
ImportCycleLocationDetails {
location: *location,
path: module.path.clone(),
src: module.code.clone(),
}
}
Input::Cached(cached_module) => {
let (_, location) = cached_module
.dependencies
.iter()
.find(|d| &d.0 == imported_module)
.expect("import must exist for there to be a cycle");
let src = self
.io
.read(&cached_module.source_path)
.expect("failed to read source")
.into();
ImportCycleLocationDetails {
location: *location,
path: cached_module.source_path.clone(),
src,
}
}
};
(module.clone(), location)
})
.collect_vec();
Error::ImportCycle {
modules: Vec1::try_from(modules)
.expect("at least 1 module must exist in cycle"),
}
}
}
}
}
fn ensure_gleam_module_does_not_overwrite_standard_erlang_module(input: &Input) -> Result<()> {
// We only need to check uncached modules as it's not possible for these
// to have compiled successfully.
let Input::New(input) = input else {
return Ok(());
};
// These names were got with this Erlang
//
// ```erl
// file:write_file("names.txt", lists:join("\n",lists:map(fun(T) -> erlang:element(1, T) end, code:all_available()))).
// ```
//
match input.name.as_str() {
"alarm_handler"
| "application"
| "application_controller"
| "application_master"
| "application_starter"
| "appmon_info"
| "argparse"
| "array"
| "asn1_db"
| "asn1ct"
| "asn1ct_check"
| "asn1ct_constructed_ber_bin_v2"
| "asn1ct_constructed_per"
| "asn1ct_eval_ext"
| "asn1ct_func"
| "asn1ct_gen"
| "asn1ct_gen_ber_bin_v2"
| "asn1ct_gen_check"
| "asn1ct_gen_jer"
| "asn1ct_gen_per"
| "asn1ct_imm"
| "asn1ct_name"
| "asn1ct_parser2"
| "asn1ct_pretty_format"
| "asn1ct_rtt"
| "asn1ct_table"
| "asn1ct_tok"
| "asn1ct_value"
| "asn1rt_nif"
| "atomics"
| "auth"
| "base64"
| "beam_a"
| "beam_asm"
| "beam_block"
| "beam_bounds"
| "beam_call_types"
| "beam_clean"
| "beam_dict"
| "beam_digraph"
| "beam_disasm"
| "beam_flatten"
| "beam_jump"
| "beam_kernel_to_ssa"
| "beam_lib"
| "beam_listing"
| "beam_opcodes"
| "beam_ssa"
| "beam_ssa_alias"
| "beam_ssa_bc_size"
| "beam_ssa_bool"
| "beam_ssa_bsm"
| "beam_ssa_check"
| "beam_ssa_codegen"
| "beam_ssa_dead"
| "beam_ssa_lint"
| "beam_ssa_opt"
| "beam_ssa_pp"
| "beam_ssa_pre_codegen"
| "beam_ssa_private_append"
| "beam_ssa_recv"
| "beam_ssa_share"
| "beam_ssa_throw"
| "beam_ssa_type"
| "beam_trim"
| "beam_types"
| "beam_utils"
| "beam_validator"
| "beam_z"
| "binary"
| "c"
| "calendar"
| "cdv_atom_cb"
| "cdv_bin_cb"
| "cdv_detail_wx"
| "cdv_dist_cb"
| "cdv_ets_cb"
| "cdv_fun_cb"
| "cdv_gen_cb"
| "cdv_html_wx"
| "cdv_info_wx"
| "cdv_int_tab_cb"
| "cdv_mem_cb"
| "cdv_mod_cb"
| "cdv_multi_wx"
| "cdv_persistent_cb"
| "cdv_port_cb"
| "cdv_proc_cb"
| "cdv_sched_cb"
| "cdv_table_wx"
| "cdv_term_cb"
| "cdv_timer_cb"
| "cdv_virtual_list_wx"
| "cdv_wx"
| "cerl"
| "cerl_clauses"
| "cerl_inline"
| "cerl_prettypr"
| "cerl_trees"
| "code"
| "code_server"
| "compile"
| "core_lib"
| "core_lint"
| "core_parse"
| "core_pp"
| "core_scan"
| "counters"
| "cover"
| "cprof"
| "cpu_sup"
| "crashdump_viewer"
| "crypto"
| "crypto_ec_curves"
| "ct"
| "ct_config"
| "ct_config_plain"
| "ct_config_xml"
| "ct_conn_log_h"
| "ct_cover"
| "ct_default_gl"
| "ct_event"
| "ct_framework"
| "ct_ftp"
| "ct_gen_conn"
| "ct_groups"
| "ct_hooks"
| "ct_hooks_lock"
| "ct_logs"
| "ct_make"
| "ct_master"
| "ct_master_event"
| "ct_master_logs"
| "ct_master_status"
| "ct_netconfc"
| "ct_property_test"
| "ct_release_test"
| "ct_repeat"
| "ct_rpc"
| "ct_run"
| "ct_slave"
| "ct_snmp"
| "ct_ssh"
| "ct_suite"
| "ct_telnet"
| "ct_telnet_client"
| "ct_testspec"
| "ct_util"
| "cth_conn_log"
| "cth_log_redirect"
| "cth_surefire"
| "dbg"
| "dbg_debugged"
| "dbg_icmd"
| "dbg_idb"
| "dbg_ieval"
| "dbg_iload"
| "dbg_iserver"
| "dbg_istk"
| "dbg_wx_break"
| "dbg_wx_break_win"
| "dbg_wx_code"
| "dbg_wx_filedialog_win"
| "dbg_wx_interpret"
| "dbg_wx_mon"
| "dbg_wx_mon_win"
| "dbg_wx_settings"
| "dbg_wx_src_view"
| "dbg_wx_trace"
| "dbg_wx_trace_win"
| "dbg_wx_view"
| "dbg_wx_win"
| "dbg_wx_winman"
| "debugger"
| "dets"
| "dets_server"
| "dets_sup"
| "dets_utils"
| "dets_v9"
| "dialyzer"
| "dialyzer_analysis_callgraph"
| "dialyzer_behaviours"
| "dialyzer_callgraph"
| "dialyzer_cl"
| "dialyzer_cl_parse"
| "dialyzer_clean_core"
| "dialyzer_codeserver"
| "dialyzer_contracts"
| "dialyzer_coordinator"
| "dialyzer_cplt"
| "dialyzer_dataflow"
| "dialyzer_dep"
| "dialyzer_dot"
| "dialyzer_explanation"
| "dialyzer_gui_wx"
| "dialyzer_incremental"
| "dialyzer_iplt"
| "dialyzer_options"
| "dialyzer_plt"
| "dialyzer_succ_typings"
| "dialyzer_timing"
| "dialyzer_typegraph"
| "dialyzer_typesig"
| "dialyzer_utils"
| "dialyzer_worker"
| "diameter"
| "diameter_app"
| "diameter_callback"
| "diameter_capx"
| "diameter_codec"
| "diameter_codegen"
| "diameter_config"
| "diameter_config_sup"
| "diameter_dbg"
| "diameter_dict_parser"
| "diameter_dict_scanner"
| "diameter_dict_util"
| "diameter_dist"
| "diameter_etcp"
| "diameter_etcp_sup"
| "diameter_exprecs"
| "diameter_gen"
| "diameter_gen_acct_rfc6733"
| "diameter_gen_base_accounting"
| "diameter_gen_base_rfc3588"
| "diameter_gen_base_rfc6733"
| "diameter_gen_doic_rfc7683"
| "diameter_gen_relay"
| "diameter_info"
| "diameter_lib"
| "diameter_make"
| "diameter_misc_sup"
| "diameter_peer"
| "diameter_peer_fsm"
| "diameter_peer_fsm_sup"
| "diameter_reg"
| "diameter_sctp"
| "diameter_sctp_sup"
| "diameter_service"
| "diameter_service_sup"
| "diameter_session"
| "diameter_stats"
| "diameter_sup"
| "diameter_sync"
| "diameter_tcp"
| "diameter_tcp_sup"
| "diameter_traffic"
| "diameter_transport"
| "diameter_transport_sup"
| "diameter_types"
| "diameter_watchdog"
| "diameter_watchdog_sup"
| "dict"
| "digraph"
| "digraph_utils"
| "disk_log"
| "disk_log_1"
| "disk_log_server"
| "disk_log_sup"
| "disksup"
| "dist_ac"
| "dist_util"
| "docgen_edoc_xml_cb"
| "docgen_otp_specs"
| "docgen_xmerl_xml_cb"
| "docgen_xml_to_chunk"
| "dtls_connection"
| "dtls_connection_sup"
| "dtls_gen_connection"
| "dtls_handshake"
| "dtls_listener_sup"
| "dtls_packet_demux"
| "dtls_record"
| "dtls_server_session_cache_sup"
| "dtls_server_sup"
| "dtls_socket"
| "dtls_sup"
| "dtls_v1"
| "dyntrace"
| "edlin"
| "edlin_context"
| "edlin_expand"
| "edlin_key"
| "edlin_type_suggestion"
| "edoc"
| "edoc_cli"
| "edoc_data"
| "edoc_doclet"
| "edoc_doclet_chunks"
| "edoc_extract"
| "edoc_layout"
| "edoc_layout_chunks"
| "edoc_lib"
| "edoc_macros"
| "edoc_parser"
| "edoc_refs"
| "edoc_report"
| "edoc_run"
| "edoc_scanner"
| "edoc_specs"
| "edoc_tags"
| "edoc_types"
| "edoc_wiki"
| "eldap"
| "epp"
| "epp_dodger"
| "eprof"
| "erl2html2"
| "erl_abstract_code"
| "erl_anno"
| "erl_bif_types"
| "erl_bifs"
| "erl_bits"
| "erl_boot_server"
| "erl_comment_scan"
| "erl_compile"
| "erl_compile_server"
| "erl_ddll"
| "erl_distribution"
| "erl_epmd"
| "erl_error"
| "erl_erts_errors"
| "erl_eval"
| "erl_expand_records"
| "erl_features"
| "erl_init"
| "erl_internal"
| "erl_kernel_errors"
| "erl_lint"
| "erl_parse"
| "erl_posix_msg"
| "erl_pp"
| "erl_prettypr"
| "erl_prim_loader"
| "erl_recomment"
| "erl_reply"
| "erl_scan"
| "erl_signal_handler"
| "erl_stdlib_errors"
| "erl_syntax"
| "erl_syntax_lib"
| "erl_tar"
| "erl_tracer"
| "erl_types"
| "erlang"
| "erlsrv"
| "erpc"
| "error_handler"
| "error_logger"
| "error_logger_file_h"
| "error_logger_tty_h"
| "erts_alloc_config"
| "erts_code_purger"
| "erts_debug"
| "erts_dirty_process_signal_handler"
| "erts_internal"
| "erts_literal_area_collector"
| "escript"
| "et"
| "et_collector"
| "et_selector"
| "et_viewer"
| "et_wx_contents_viewer"
| "et_wx_viewer"
| "etop"
| "etop_tr"
| "etop_txt"
| "ets"
| "eunit"
| "eunit_autoexport"
| "eunit_data"
| "eunit_lib"
| "eunit_listener"
| "eunit_proc"
| "eunit_serial"
| "eunit_server"
| "eunit_striptests"
| "eunit_surefire"
| "eunit_test"
| "eunit_tests"
| "eunit_tty"
| "eval_bits"
| "file"
| "file_io_server"
| "file_server"
| "file_sorter"
| "filelib"
| "filename"
| "format_lib_supp"
| "fprof"
| "ftp"
| "ftp_app"
| "ftp_internal"
| "ftp_progress"
| "ftp_response"
| "ftp_sup"
| "gb_sets"
| "gb_trees"
| "gen"
| "gen_event"
| "gen_fsm"
| "gen_sctp"
| "gen_server"
| "gen_statem"
| "gen_tcp"
| "gen_tcp_socket"
| "gen_udp"
| "gen_udp_socket"
| "gl"
| "global"
| "global_group"
| "global_search"
| "glu"
| "group"
| "group_history"
| "heart"
| "http_chunk"
| "http_request"
| "http_response"
| "http_transport"
| "http_uri"
| "http_util"
| "httpc"
| "httpc_cookie"
| "httpc_handler"
| "httpc_handler_sup"
| "httpc_manager"
| "httpc_profile_sup"
| "httpc_request"
| "httpc_response"
| "httpc_sup"
| "httpd"
| "httpd_acceptor"
| "httpd_acceptor_sup"
| "httpd_cgi"
| "httpd_conf"
| "httpd_connection_sup"
| "httpd_custom"
| "httpd_custom_api"
| "httpd_esi"
| "httpd_example"
| "httpd_file"
| "httpd_instance_sup"
| "httpd_log"
| "httpd_logger"
| "httpd_manager"
| "httpd_misc_sup"
| "httpd_request"
| "httpd_request_handler"
| "httpd_response"
| "httpd_script_env"
| "httpd_socket"
| "httpd_sup"
| "httpd_util"
| "i"
| "inet"
| "inet6_sctp"
| "inet6_tcp"
| "inet6_tcp_dist"
| "inet6_tls_dist"
| "inet6_udp"
| "inet_config"
| "inet_db"
| "inet_dns"
| "inet_epmd_dist"
| "inet_epmd_socket"
| "inet_gethost_native"
| "inet_hosts"
| "inet_parse"
| "inet_res"
| "inet_sctp"
| "inet_tcp"
| "inet_tcp_dist"
| "inet_tls_dist"
| "inet_udp"
| "inets"
| "inets_app"
| "inets_lib"
| "inets_service"
| "inets_sup"
| "inets_trace"
| "init"
| "instrument"
| "int"
| "io"
| "io_lib"
| "io_lib_format"
| "io_lib_fread"
| "io_lib_pretty"
| "json"
| "kernel"
| "kernel_config"
| "kernel_refc"
| "lcnt"
| "leex"
| "lists"
| "local_tcp"
| "local_udp"
| "log_mf_h"
| "logger"
| "logger_backend"
| "logger_config"
| "logger_disk_log_h"
| "logger_filters"
| "logger_formatter"
| "logger_h_common"
| "logger_handler_watcher"
| "logger_olp"
| "logger_proxy"
| "logger_server"
| "logger_simple_h"
| "logger_std_h"
| "logger_sup"
| "make"
| "maps"
| "math"
| "megaco"
| "megaco_ber_encoder"
| "megaco_ber_media_gateway_control_v1"
| "megaco_ber_media_gateway_control_v2"
| "megaco_ber_media_gateway_control_v3"
| "megaco_binary_encoder"
| "megaco_binary_encoder_lib"
| "megaco_binary_name_resolver_v1"
| "megaco_binary_name_resolver_v2"
| "megaco_binary_name_resolver_v3"
| "megaco_binary_term_id"
| "megaco_binary_term_id_gen"
| "megaco_binary_transformer_v1"
| "megaco_binary_transformer_v2"
| "megaco_binary_transformer_v3"
| "megaco_compact_text_encoder"
| "megaco_compact_text_encoder_v1"
| "megaco_compact_text_encoder_v2"
| "megaco_compact_text_encoder_v3"
| "megaco_config"
| "megaco_config_misc"
| "megaco_digit_map"
| "megaco_edist_compress"
| "megaco_encoder"
| "megaco_erl_dist_encoder"
| "megaco_erl_dist_encoder_mc"
| "megaco_filter"
| "megaco_flex_scanner"
| "megaco_flex_scanner_handler"
| "megaco_messenger"
| "megaco_messenger_misc"
| "megaco_misc_sup"
| "megaco_monitor"
| "megaco_per_encoder"
| "megaco_per_media_gateway_control_v1"
| "megaco_per_media_gateway_control_v2"
| "megaco_per_media_gateway_control_v3"
| "megaco_pretty_text_encoder"
| "megaco_pretty_text_encoder_v1"
| "megaco_pretty_text_encoder_v2"
| "megaco_pretty_text_encoder_v3"
| "megaco_sdp"
| "megaco_stats"
| "megaco_sup"
| "megaco_tcp"
| "megaco_tcp_accept"
| "megaco_tcp_accept_sup"
| "megaco_tcp_connection"
| "megaco_tcp_connection_sup"
| "megaco_tcp_sup"
| "megaco_text_mini_decoder"
| "megaco_text_mini_parser"
| "megaco_text_parser_v1"
| "megaco_text_parser_v2"
| "megaco_text_parser_v3"
| "megaco_text_scanner"
| "megaco_timer"
| "megaco_trans_sender"
| "megaco_trans_sup"
| "megaco_transport"
| "megaco_udp"
| "megaco_udp_server"
| "megaco_udp_sup"
| "megaco_user"
| "megaco_user_default"
| "memsup"
| "merl"
| "merl_transform"
| "misc_supp"
| "mnesia"
| "mnesia_app"
| "mnesia_backend_type"
| "mnesia_backup"
| "mnesia_bup"
| "mnesia_checkpoint"
| "mnesia_checkpoint_sup"
| "mnesia_controller"
| "mnesia_dumper"
| "mnesia_event"
| "mnesia_ext_sup"
| "mnesia_frag"
| "mnesia_frag_hash"
| "mnesia_index"
| "mnesia_kernel_sup"
| "mnesia_late_loader"
| "mnesia_lib"
| "mnesia_loader"
| "mnesia_locker"
| "mnesia_log"
| "mnesia_monitor"
| "mnesia_recover"
| "mnesia_registry"
| "mnesia_rpc"
| "mnesia_schema"
| "mnesia_snmp_hook"
| "mnesia_sp"
| "mnesia_subscr"
| "mnesia_sup"
| "mnesia_text"
| "mnesia_tm"
| "mod_actions"
| "mod_alias"
| "mod_auth"
| "mod_auth_dets"
| "mod_auth_mnesia"
| "mod_auth_plain"
| "mod_auth_server"
| "mod_cgi"
| "mod_dir"
| "mod_disk_log"
| "mod_esi"
| "mod_get"
| "mod_head"
| "mod_log"
| "mod_range"
| "mod_responsecontrol"
| "mod_security"
| "mod_security_server"
| "mod_trace"
| "ms_transform"
| "msacc"
| "net"
| "net_adm"
| "net_kernel"
| "nteventlog"
| "observer"
| "observer_alloc_wx"
| "observer_app_wx"
| "observer_backend"
| "observer_html_lib"
| "observer_lib"
| "observer_perf_wx"
| "observer_port_wx"
| "observer_pro_wx"
| "observer_procinfo"
| "observer_sock_wx"
| "observer_sys_wx"
| "observer_trace_wx"
| "observer_traceoptions_wx"
| "observer_tv_table"
| "observer_tv_wx"
| "observer_wx"
| "orddict"
| "ordsets"
| "os"
| "os_mon"
| "os_mon_mib"
| "os_mon_sysinfo"
| "os_sup"
| "otp_internal"
| "peer"
| "persistent_term"
| "pg"
| "pg2"
| "pool"
| "prettypr"
| "prim_buffer"
| "prim_eval"
| "prim_file"
| "prim_inet"
| "prim_net"
| "prim_socket"
| "prim_tty"
| "prim_zip"
| "proc_lib"
| "proplists"
| "pubkey_cert"
| "pubkey_cert_records"
| "pubkey_crl"
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/elixir_libraries.rs | compiler-core/src/build/elixir_libraries.rs | use crate::{
Error,
error::ShellCommandFailureReason,
io::{Command, CommandExecutor, FileSystemReader, FileSystemWriter, Stdio},
};
use camino::Utf8PathBuf;
#[cfg(not(target_os = "windows"))]
const ELIXIR_EXECUTABLE: &str = "elixir";
#[cfg(target_os = "windows")]
const ELIXIR_EXECUTABLE: &str = "elixir.bat";
// These Elixir core libs will be loaded with the current project
const ELIXIR_LIBS: [&str; 4] = ["eex", "elixir", "logger", "mix"];
pub struct ElixirLibraries<'a, IO> {
io: &'a IO,
build_dir: &'a Utf8PathBuf,
subprocess_stdio: Stdio,
}
impl<'a, IO> ElixirLibraries<'a, IO> {
fn new(io: &'a IO, build_dir: &'a Utf8PathBuf, subprocess_stdio: Stdio) -> Self {
Self {
io,
build_dir,
subprocess_stdio,
}
}
}
impl<'a, IO> ElixirLibraries<'a, IO>
where
IO: CommandExecutor + FileSystemReader + FileSystemWriter + Clone,
{
pub fn make_available(
io: &'a IO,
build_dir: &'a Utf8PathBuf,
subprocess_stdio: Stdio,
) -> Result<(), Error> {
let it = Self::new(io, build_dir, subprocess_stdio);
let result = it.run();
if result.is_err() {
it.cleanup();
}
result
}
fn cleanup(&self) {
self.io
.delete_file(&self.paths_cache_path())
.expect("deleting paths cache in cleanup");
}
fn paths_cache_filename(&self) -> &'static str {
"gleam_elixir_paths"
}
fn paths_cache_path(&self) -> Utf8PathBuf {
self.build_dir.join(self.paths_cache_filename())
}
fn run(&self) -> Result<(), Error> {
// The pathfinder is a file in build/{target}/erlang
// It contains the full path for each Elixir core lib we need, new-line delimited
// The pathfinder saves us from repeatedly loading Elixir to get this info
let mut update_links = false;
let cache = self.paths_cache_path();
if !self.io.is_file(&cache) {
// The pathfinder must be written
// Any existing core lib links will get updated
update_links = true;
// TODO: test
let env = vec![("TERM".to_string(), "dumb".to_string())];
// Prepare the libs for Erlang's code:lib_dir function
let elixir_atoms: Vec<String> =
ELIXIR_LIBS.iter().map(|lib| format!(":{}", lib)).collect();
// Use Elixir to find its core lib paths and write the pathfinder file
let args = vec![
"--eval".to_string(),
format!(
":ok = File.write(~s({}), [{}] |> Stream.map(fn(lib) -> lib |> :code.lib_dir |> Path.expand end) |> Enum.join(~s(\\n)))",
self.paths_cache_filename(),
elixir_atoms.join(", "),
),
];
tracing::debug!("writing_elixir_paths_to_build");
let status = self.io.exec(Command {
program: ELIXIR_EXECUTABLE.into(),
args,
env,
cwd: Some(self.build_dir.clone()),
stdio: self.subprocess_stdio,
})?;
if status != 0 {
return Err(Error::ShellCommand {
program: "elixir".into(),
reason: ShellCommandFailureReason::Unknown,
});
}
}
// Each pathfinder line is a system path for an Elixir core library
let read_pathfinder = self.io.read(&cache)?;
for lib_path in read_pathfinder.split('\n') {
let source = Utf8PathBuf::from(lib_path);
let name = source.as_path().file_name().expect(&format!(
"Unexpanded path in {}",
self.paths_cache_filename()
));
let dest = self.build_dir.join(name);
let ebin = dest.join("ebin");
if !update_links || self.io.is_directory(&ebin) {
// Either links don't need updating
// Or this library is already linked
continue;
}
// TODO: unit test
if self.io.is_directory(&dest) {
// Delete the existing link
self.io.delete_directory(&dest)?;
}
tracing::debug!("linking_{}_to_build", name,);
self.io.symlink_dir(&source, &dest)?;
}
Ok(())
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/module_loader/tests.rs | compiler-core/src/build/module_loader/tests.rs | use super::*;
use crate::{
build::SourceFingerprint,
io::{FileSystemWriter, memory::InMemoryFileSystem},
line_numbers::LineNumbers,
};
use std::time::Duration;
#[test]
fn no_cache_present() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
fs.write(&Utf8Path::new("/src/main.gleam"), "const x = 1")
.unwrap();
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_new());
}
#[test]
fn cache_present_and_fresh() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
// The mtime of the source is older than that of the cache
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 0);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_cached());
}
#[test]
fn cache_present_and_stale() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
// The mtime of the source is newer than that of the cache
write_src(&fs, TEST_SOURCE_2, "/src/main.gleam", 2);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_new());
}
#[test]
fn cache_present_and_stale_but_source_is_the_same() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
// The mtime of the source is newer than that of the cache
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 2);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_cached());
}
#[test]
fn cache_present_and_stale_source_is_the_same_lsp_mode() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let mut loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
loader.mode = Mode::Lsp;
// The mtime of the source is newer than that of the cache
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 2);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_cached());
}
#[test]
fn cache_present_and_stale_source_is_the_same_lsp_mode_and_invalidated() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let mut incomplete_modules = HashSet::new();
let _ = incomplete_modules.insert("main".into());
let mut loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
loader.mode = Mode::Lsp;
// The mtime of the source is newer than that of the cache
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 2);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_new());
}
#[test]
fn cache_present_without_codegen_when_required() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let mut loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
loader.codegen = CodegenRequired::Yes;
// The mtime of the cache is newer than that of the source
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 0);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_new());
}
#[test]
fn cache_present_with_codegen_when_required() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let mut loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
loader.codegen = CodegenRequired::Yes;
// The mtime of the cache is newer than that of the source
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 0);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, true);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_cached());
}
#[test]
fn cache_present_without_codegen_when_not_required() {
let name = "package".into();
let artefact = Utf8Path::new("/artefact");
let fs = InMemoryFileSystem::new();
let warnings = WarningEmitter::null();
let incomplete_modules = HashSet::new();
let mut loader = make_loader(&warnings, &name, &fs, artefact, &incomplete_modules);
loader.codegen = CodegenRequired::No;
// The mtime of the cache is newer than that of the source
write_src(&fs, TEST_SOURCE_1, "/src/main.gleam", 0);
write_cache(&fs, TEST_SOURCE_1, "/artefact/main.cache_meta", 1, false);
let file = GleamFile::new("/src".into(), "/src/main.gleam".into());
let result = loader.load(file).unwrap();
assert!(result.is_cached());
}
const TEST_SOURCE_1: &'static str = "const x = 1";
const TEST_SOURCE_2: &'static str = "const x = 2";
fn write_cache(
fs: &InMemoryFileSystem,
source: &str,
path: &str,
seconds: u64,
codegen_performed: bool,
) {
let line_numbers = LineNumbers::new(source);
let cache_metadata = CacheMetadata {
mtime: SystemTime::UNIX_EPOCH + Duration::from_secs(seconds),
codegen_performed,
dependencies: vec![],
fingerprint: SourceFingerprint::new(source),
line_numbers,
};
let path = Utf8Path::new(path);
fs.write_bytes(&path, &cache_metadata.to_binary()).unwrap();
}
fn write_src(fs: &InMemoryFileSystem, source: &str, path: &str, seconds: u64) {
let path = Utf8Path::new(path);
fs.write(&path, source).unwrap();
fs.set_modification_time(&path, SystemTime::UNIX_EPOCH + Duration::from_secs(seconds));
}
fn make_loader<'a>(
warnings: &'a WarningEmitter,
package_name: &'a EcoString,
fs: &InMemoryFileSystem,
artefact: &'a Utf8Path,
incomplete_modules: &'a HashSet<EcoString>,
) -> ModuleLoader<'a, InMemoryFileSystem> {
ModuleLoader {
warnings,
io: fs.clone(),
mode: Mode::Dev,
target: Target::Erlang,
codegen: CodegenRequired::No,
package_name,
artefact_directory: &artefact,
origin: Origin::Src,
incomplete_modules,
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/native_file_copier/tests.rs | compiler-core/src/build/native_file_copier/tests.rs | use super::NativeFileCopier;
use crate::{
build::{native_file_copier::CopiedNativeFiles, package_compiler::CheckModuleConflicts},
io::{FileSystemWriter, memory::InMemoryFileSystem},
};
use std::{
collections::HashMap,
sync::OnceLock,
time::{Duration, SystemTime, UNIX_EPOCH},
};
use camino::{Utf8Path, Utf8PathBuf};
fn root() -> &'static Utf8PathBuf {
static ROOT: OnceLock<Utf8PathBuf> = OnceLock::new();
ROOT.get_or_init(|| Utf8PathBuf::from("/").to_owned())
}
fn root_out() -> &'static Utf8PathBuf {
static OUT: OnceLock<Utf8PathBuf> = OnceLock::new();
OUT.get_or_init(|| Utf8PathBuf::from("/out"))
}
#[test]
fn javascript_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.js"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.js"), "1".into()),
(Utf8PathBuf::from("/out/wibble.js"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn javascript_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.js"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.js"), "1".into()),
(Utf8PathBuf::from("/out/wibble.js"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn javascript_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.js"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.js"), "1".into()),
(Utf8PathBuf::from("/out/wibble.js"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn mjavascript_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.mjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/out/wibble.mjs"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn mjavascript_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.mjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/out/wibble.mjs"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn mjavascript_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.mjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/out/wibble.mjs"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn cjavascript_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.cjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.cjs"), "1".into()),
(Utf8PathBuf::from("/out/wibble.cjs"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn cjavascript_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.cjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.cjs"), "1".into()),
(Utf8PathBuf::from("/out/wibble.cjs"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn cjavascript_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.cjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.cjs"), "1".into()),
(Utf8PathBuf::from("/out/wibble.cjs"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn typescript_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.ts"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.ts"), "1".into()),
(Utf8PathBuf::from("/out/wibble.ts"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn typescript_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.ts"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.ts"), "1".into()),
(Utf8PathBuf::from("/out/wibble.ts"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn typescript_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.ts"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.ts"), "1".into()),
(Utf8PathBuf::from("/out/wibble.ts"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn all_javascript_files_are_copied_from_src_subfolders() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/abc/def/wibble.mjs"), "1")
.unwrap();
fs.write(&Utf8Path::new("/src/abc/ghi/wibble.js"), "2")
.unwrap();
fs.write(&Utf8Path::new("/src/abc/jkl/wibble.cjs"), "3")
.unwrap();
fs.write(&Utf8Path::new("/src/def/wobble.ts"), "4").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/abc/def/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/out/abc/def/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/src/abc/ghi/wibble.js"), "2".into()),
(Utf8PathBuf::from("/out/abc/ghi/wibble.js"), "2".into()),
(Utf8PathBuf::from("/src/abc/jkl/wibble.cjs"), "3".into()),
(Utf8PathBuf::from("/out/abc/jkl/wibble.cjs"), "3".into()),
(Utf8PathBuf::from("/src/def/wobble.ts"), "4".into()),
(Utf8PathBuf::from("/out/def/wobble.ts"), "4".into())
]),
fs.into_contents(),
);
}
#[test]
fn all_javascript_files_are_copied_from_test_subfolders() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/abc/def/wibble.mjs"), "1")
.unwrap();
fs.write(&Utf8Path::new("/test/abc/ghi/wibble.js"), "2")
.unwrap();
fs.write(&Utf8Path::new("/test/abc/jkl/wibble.cjs"), "3")
.unwrap();
fs.write(&Utf8Path::new("/test/def/wobble.ts"), "4")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/abc/def/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/out/abc/def/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/test/abc/ghi/wibble.js"), "2".into()),
(Utf8PathBuf::from("/out/abc/ghi/wibble.js"), "2".into()),
(Utf8PathBuf::from("/test/abc/jkl/wibble.cjs"), "3".into()),
(Utf8PathBuf::from("/out/abc/jkl/wibble.cjs"), "3".into()),
(Utf8PathBuf::from("/test/def/wobble.ts"), "4".into()),
(Utf8PathBuf::from("/out/def/wobble.ts"), "4".into())
]),
fs.into_contents(),
);
}
#[test]
fn all_javascript_files_are_copied_from_dev_subfolders() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/abc/def/wibble.mjs"), "1")
.unwrap();
fs.write(&Utf8Path::new("/dev/abc/ghi/wibble.js"), "2")
.unwrap();
fs.write(&Utf8Path::new("/dev/abc/jkl/wibble.cjs"), "3")
.unwrap();
fs.write(&Utf8Path::new("/dev/def/wobble.ts"), "4").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/abc/def/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/out/abc/def/wibble.mjs"), "1".into()),
(Utf8PathBuf::from("/dev/abc/ghi/wibble.js"), "2".into()),
(Utf8PathBuf::from("/out/abc/ghi/wibble.js"), "2".into()),
(Utf8PathBuf::from("/dev/abc/jkl/wibble.cjs"), "3".into()),
(Utf8PathBuf::from("/out/abc/jkl/wibble.cjs"), "3".into()),
(Utf8PathBuf::from("/dev/def/wobble.ts"), "4".into()),
(Utf8PathBuf::from("/out/def/wobble.ts"), "4".into())
]),
fs.into_contents(),
);
}
#[test]
fn erlang_header_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.hrl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.hrl"), "1".into()),
(Utf8PathBuf::from("/out/wibble.hrl"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn erlang_header_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.hrl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.hrl"), "1".into()),
(Utf8PathBuf::from("/out/wibble.hrl"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn erlang_header_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.hrl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.hrl"), "1".into()),
(Utf8PathBuf::from("/out/wibble.hrl"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn erlang_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.erl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert_eq!(copied.to_compile, vec![Utf8PathBuf::from("wibble.erl")]);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.erl"), "1".into()),
(Utf8PathBuf::from("/out/wibble.erl"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn erlang_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.erl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert_eq!(copied.to_compile, vec![Utf8PathBuf::from("wibble.erl")]);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.erl"), "1".into()),
(Utf8PathBuf::from("/out/wibble.erl"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn erlang_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.erl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert_eq!(copied.to_compile, vec![Utf8PathBuf::from("wibble.erl")]);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.erl"), "1".into()),
(Utf8PathBuf::from("/out/wibble.erl"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn elixir_files_are_copied_from_src() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.ex"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(copied.any_elixir);
assert_eq!(copied.to_compile, vec![Utf8PathBuf::from("wibble.ex")]);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.ex"), "1".into()),
(Utf8PathBuf::from("/out/wibble.ex"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn elixir_files_are_copied_from_test() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/wibble.ex"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(copied.any_elixir);
assert_eq!(copied.to_compile, vec![Utf8PathBuf::from("wibble.ex")]);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/wibble.ex"), "1".into()),
(Utf8PathBuf::from("/out/wibble.ex"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn elixir_files_are_copied_from_dev() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/wibble.ex"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(copied.any_elixir);
assert_eq!(copied.to_compile, vec![Utf8PathBuf::from("wibble.ex")]);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/wibble.ex"), "1".into()),
(Utf8PathBuf::from("/out/wibble.ex"), "1".into())
]),
fs.into_contents(),
);
}
#[test]
fn all_erlang_files_are_copied_from_src_subfolders() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/abc/def/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/src/abc/ghi/wibble_header.hrl"), "2")
.unwrap();
fs.write(&Utf8Path::new("/src/def/wobble.ex"), "3").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(copied.any_elixir);
assert_eq!(
copied.to_compile,
vec![
Utf8PathBuf::from("abc/def/wibble.erl"),
Utf8PathBuf::from("def/wobble.ex")
]
);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/abc/def/wibble.erl"), "1".into()),
(Utf8PathBuf::from("/out/abc/def/wibble.erl"), "1".into()),
(
Utf8PathBuf::from("/src/abc/ghi/wibble_header.hrl"),
"2".into()
),
(
Utf8PathBuf::from("/out/abc/ghi/wibble_header.hrl"),
"2".into()
),
(Utf8PathBuf::from("/src/def/wobble.ex"), "3".into()),
(Utf8PathBuf::from("/out/def/wobble.ex"), "3".into())
]),
fs.into_contents(),
);
}
#[test]
fn all_erlang_files_are_copied_from_test_subfolders() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/test/abc/def/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/test/abc/ghi/wibble_header.hrl"), "2")
.unwrap();
fs.write(&Utf8Path::new("/test/def/wobble.ex"), "3")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(copied.any_elixir);
assert_eq!(
copied.to_compile,
vec![
Utf8PathBuf::from("abc/def/wibble.erl"),
Utf8PathBuf::from("def/wobble.ex")
]
);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/test/abc/def/wibble.erl"), "1".into()),
(Utf8PathBuf::from("/out/abc/def/wibble.erl"), "1".into()),
(
Utf8PathBuf::from("/test/abc/ghi/wibble_header.hrl"),
"2".into()
),
(
Utf8PathBuf::from("/out/abc/ghi/wibble_header.hrl"),
"2".into()
),
(Utf8PathBuf::from("/test/def/wobble.ex"), "3".into()),
(Utf8PathBuf::from("/out/def/wobble.ex"), "3".into())
]),
fs.into_contents(),
);
}
#[test]
fn all_erlang_files_are_copied_from_dev_subfolders() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/abc/def/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/dev/abc/ghi/wibble_header.hrl"), "2")
.unwrap();
fs.write(&Utf8Path::new("/dev/def/wobble.ex"), "3").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(copied.any_elixir);
assert_eq!(
copied.to_compile,
vec![
Utf8PathBuf::from("abc/def/wibble.erl"),
Utf8PathBuf::from("def/wobble.ex")
]
);
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/dev/abc/def/wibble.erl"), "1".into()),
(Utf8PathBuf::from("/out/abc/def/wibble.erl"), "1".into()),
(
Utf8PathBuf::from("/dev/abc/ghi/wibble_header.hrl"),
"2".into()
),
(
Utf8PathBuf::from("/out/abc/ghi/wibble_header.hrl"),
"2".into()
),
(Utf8PathBuf::from("/dev/def/wobble.ex"), "3".into()),
(Utf8PathBuf::from("/out/def/wobble.ex"), "3".into())
]),
fs.into_contents(),
);
}
#[test]
fn other_files_are_ignored() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.cpp"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([(Utf8PathBuf::from("/src/wibble.cpp"), "1".into())]),
fs.into_contents(),
);
}
#[test]
fn files_do_not_get_copied_if_there_already_is_a_new_version() {
let fs = InMemoryFileSystem::new();
let out = Utf8Path::new("/out/wibble.mjs");
let src = Utf8Path::new("/src/wibble.mjs");
fs.write(&out, "in-out").unwrap();
fs.write(&src, "in-src").unwrap();
fs.set_modification_time(&out, UNIX_EPOCH + Duration::from_secs(1));
fs.set_modification_time(&src, UNIX_EPOCH);
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.mjs"), "in-src".into()),
(Utf8PathBuf::from("/out/wibble.mjs"), "in-out".into())
]),
fs.into_contents(),
);
}
#[test]
fn files_get_copied_if_the_previously_copied_version_is_older() {
let fs = InMemoryFileSystem::new();
let out = Utf8Path::new("/out/wibble.mjs");
let src = Utf8Path::new("/src/wibble.mjs");
fs.write(&out, "in-out").unwrap();
fs.write(&src, "in-src").unwrap();
fs.set_modification_time(&out, UNIX_EPOCH);
fs.set_modification_time(&src, UNIX_EPOCH + Duration::from_secs(1));
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
let copied = copier.run().unwrap();
assert!(!copied.any_elixir);
assert!(copied.to_compile.is_empty());
assert_eq!(
HashMap::from([
(Utf8PathBuf::from("/src/wibble.mjs"), "in-src".into()),
(Utf8PathBuf::from("/out/wibble.mjs"), "in-src".into())
]),
fs.into_contents(),
);
}
#[test]
fn duplicate_native_files_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.mjs"), "1").unwrap();
fs.write(&Utf8Path::new("/test/wibble.mjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_erlang_modules_in_src_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/a/b/c/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/src/e/f/wibble.erl"), "1")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_erlang_modules_in_src_and_test_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/a/b/c/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/test/e/f/wibble.erl"), "1")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_erlang_modules_in_src_and_dev_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/a/b/c/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/dev/e/f/wibble.erl"), "1")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_erlang_modules_in_dev_and_test_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/dev/a/b/c/wibble.erl"), "1")
.unwrap();
fs.write(&Utf8Path::new("/test/e/f/wibble.erl"), "1")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_gleam_and_javascript_modules_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.gleam"), "1").unwrap();
fs.write(&Utf8Path::new("/src/wibble.mjs"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn differently_nested_gleam_and_javascript_modules_with_same_name_are_ok() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/a/b/c/wibble.gleam"), "1")
.unwrap();
fs.write(&Utf8Path::new("/src/d/e/wibble.mjs"), "1")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_ok());
}
#[test]
fn conflicting_gleam_and_erlang_modules_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.gleam"), "1").unwrap();
fs.write(&Utf8Path::new("/src/wibble.erl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_nested_gleam_and_erlang_modules_result_in_an_error() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble/wobble.gleam"), "1")
.unwrap();
fs.write(&Utf8Path::new("/src/wibble@wobble.erl"), "1")
.unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_err());
}
#[test]
fn conflicting_nested_gleam_file_does_not_conflict_with_root_erlang_file() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble/wobble.gleam"), "1")
.unwrap();
fs.write(&Utf8Path::new("/src/wobble.erl"), "1").unwrap();
let copier = NativeFileCopier::new(fs.clone(), root(), root_out(), CheckModuleConflicts::Check);
assert!(copier.run().is_ok());
}
#[test]
fn conflicting_gleam_and_erlang_modules_produce_no_error_in_dependency() {
let fs = InMemoryFileSystem::new();
fs.write(&Utf8Path::new("/src/wibble.gleam"), "1").unwrap();
fs.write(&Utf8Path::new("/src/wibble.erl"), "1").unwrap();
let copier = NativeFileCopier::new(
fs.clone(),
root(),
root_out(),
CheckModuleConflicts::DoNotCheck,
);
assert!(copier.run().is_ok());
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/build/package_loader/tests.rs | compiler-core/src/build/package_loader/tests.rs | use ecow::{EcoString, eco_format};
use hexpm::version::Version;
use super::*;
use crate::{
Warning,
build::SourceFingerprint,
io::{FileSystemWriter, memory::InMemoryFileSystem},
line_numbers,
parse::extra::ModuleExtra,
warning::NullWarningEmitterIO,
};
use std::time::Duration;
#[derive(Debug)]
struct LoaderTestOutput {
to_compile: Vec<EcoString>,
cached: Vec<EcoString>,
warnings: Vec<Warning>,
}
const TEST_SOURCE_1: &'static str = "const x = 1";
const TEST_SOURCE_2: &'static str = "const x = 2";
fn write_src(fs: &InMemoryFileSystem, path: &str, seconds: u64, src: &str) {
let path = Utf8Path::new(path);
fs.write(&path, src).unwrap();
fs.set_modification_time(&path, SystemTime::UNIX_EPOCH + Duration::from_secs(seconds));
}
fn write_cache(
fs: &InMemoryFileSystem,
name: &str,
seconds: u64,
deps: Vec<(EcoString, SrcSpan)>,
src: &str,
) {
let line_numbers = line_numbers::LineNumbers::new(src);
let mtime = SystemTime::UNIX_EPOCH + Duration::from_secs(seconds);
let cache_metadata = CacheMetadata {
mtime,
codegen_performed: true,
dependencies: deps,
fingerprint: SourceFingerprint::new(src),
line_numbers: line_numbers.clone(),
};
let artefact_name = name.replace("/", "@");
let path = Utf8Path::new("/artefact").join(format!("{artefact_name}.cache_meta"));
fs.write_bytes(&path, &cache_metadata.to_binary()).unwrap();
let cache = crate::type_::ModuleInterface {
name: name.into(),
origin: Origin::Src,
package: "my_package".into(),
types: Default::default(),
types_value_constructors: Default::default(),
values: Default::default(),
accessors: Default::default(),
line_numbers: line_numbers.clone(),
is_internal: false,
src_path: Utf8PathBuf::from(format!("/src/{}.gleam", name)),
warnings: vec![],
minimum_required_version: Version::new(0, 1, 0),
type_aliases: Default::default(),
documentation: Default::default(),
contains_echo: false,
references: Default::default(),
inline_functions: Default::default(),
};
let path = Utf8Path::new("/artefact").join(format!("{artefact_name}.cache"));
fs.write_bytes(
&path,
&metadata::ModuleEncoder::new(&cache).encode().unwrap(),
)
.unwrap();
}
fn run_loader(fs: InMemoryFileSystem, root: &Utf8Path, artefact: &Utf8Path) -> LoaderTestOutput {
let mut defined = im::HashMap::new();
let ids = UniqueIdGenerator::new();
let (emitter, warnings) = WarningEmitter::vector();
let loader = PackageLoader {
io: fs.clone(),
ids,
mode: Mode::Dev,
paths: ProjectPaths::new(root.into()),
warnings: &emitter,
codegen: CodegenRequired::Yes,
artefact_directory: &artefact,
package_name: &"my_package".into(),
target: Target::JavaScript,
stale_modules: &mut StaleTracker::default(),
already_defined_modules: &mut defined,
incomplete_modules: &mut HashSet::new(),
cached_warnings: CachedWarnings::Ignore,
};
let loaded = loader.run().unwrap();
LoaderTestOutput {
to_compile: loaded.to_compile.into_iter().map(|m| m.name).collect(),
cached: loaded.cached.into_iter().map(|m| m.name).collect(),
warnings: warnings.take(),
}
}
#[test]
fn no_modules() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
}
#[test]
fn one_src_module() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
write_src(&fs, "/src/main.gleam", 0, "const x = 1");
let loaded = run_loader(fs, root, artefact);
assert_eq!(loaded.to_compile, vec![EcoString::from("main")]);
assert!(loaded.cached.is_empty());
}
#[test]
fn one_test_module() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
write_src(&fs, "/test/main.gleam", 0, "const x = 1");
let loaded = run_loader(fs, root, artefact);
assert_eq!(loaded.to_compile, vec![EcoString::from("main")]);
assert!(loaded.cached.is_empty());
}
#[test]
fn one_dev_module() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
write_src(&fs, "/dev/main.gleam", 0, "const x = 1");
let loaded = run_loader(fs, root, artefact);
assert_eq!(loaded.to_compile, vec![EcoString::from("main")]);
assert!(loaded.cached.is_empty());
}
#[test]
fn importing() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
write_src(&fs, "/src/three.gleam", 0, "import two");
write_src(&fs, "/src/one.gleam", 0, "");
write_src(&fs, "/src/two.gleam", 0, "import one");
let loaded = run_loader(fs, root, artefact);
assert_eq!(
loaded.to_compile,
vec![
EcoString::from("one"),
EcoString::from("two"),
EcoString::from("three")
]
);
assert!(loaded.cached.is_empty());
}
#[test]
fn reading_cache() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
write_src(&fs, "/src/one.gleam", 0, TEST_SOURCE_1);
write_cache(&fs, "one", 0, vec![], TEST_SOURCE_1);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert_eq!(loaded.cached, vec![EcoString::from("one")]);
}
#[test]
fn module_is_stale_if_cache_older() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
write_src(&fs, "/src/one.gleam", 1, TEST_SOURCE_2);
write_cache(&fs, "one", 0, vec![], TEST_SOURCE_1);
let loaded = run_loader(fs, root, artefact);
assert_eq!(loaded.to_compile, vec![EcoString::from("one")]);
assert!(loaded.cached.is_empty());
}
#[test]
fn module_is_stale_if_deps_are_stale() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/src/one.gleam", 1, TEST_SOURCE_2);
write_cache(&fs, "one", 0, vec![], TEST_SOURCE_1);
// Cache is fresh but dep is stale
write_src(&fs, "/src/two.gleam", 1, "import one");
write_cache(
&fs,
"two",
2,
vec![(EcoString::from("one"), SrcSpan { start: 0, end: 0 })],
"import one",
);
// Cache is fresh
write_src(&fs, "/src/three.gleam", 1, TEST_SOURCE_1);
write_cache(&fs, "three", 2, vec![], TEST_SOURCE_1);
let loaded = run_loader(fs, root, artefact);
assert_eq!(
loaded.to_compile,
vec![EcoString::from("one"), EcoString::from("two")]
);
assert_eq!(loaded.cached, vec![EcoString::from("three")]);
}
#[test]
fn module_is_stale_if_deps_removed() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Source is removed, cache is present
write_cache(&fs, "nested/one", 0, vec![], TEST_SOURCE_1);
// Cache is fresh but dep is removed
write_src(&fs, "/src/two.gleam", 1, "import one");
write_cache(
&fs,
"two",
2,
vec![(EcoString::from("nested/one"), SrcSpan { start: 0, end: 0 })],
"import nested/one",
);
let loaded = run_loader(fs, root, artefact);
assert_eq!(loaded.to_compile, vec![EcoString::from("two")]);
}
#[test]
fn module_continues_to_be_stale_if_deps_get_updated() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/src/one.gleam", 1, TEST_SOURCE_2);
write_cache(&fs, "one", 0, vec![], TEST_SOURCE_1);
// Cache is fresh but dep is stale
write_src(&fs, "/src/two.gleam", 1, "import one");
write_cache(
&fs,
"two",
2,
vec![(EcoString::from("one"), SrcSpan { start: 0, end: 0 })],
"import one",
);
// Cache is fresh
write_src(&fs, "/src/three.gleam", 1, TEST_SOURCE_1);
write_cache(&fs, "three", 2, vec![], TEST_SOURCE_1);
let _loaded1 = run_loader(fs.clone(), root, artefact);
// update the dependency
write_cache(&fs, "one", 3, vec![], TEST_SOURCE_2);
let loaded2 = run_loader(fs, root, artefact);
assert_eq!(loaded2.to_compile, vec![EcoString::from("two")]);
assert_eq!(
loaded2.cached,
vec![EcoString::from("one"), EcoString::from("three")]
);
}
#[test]
fn invalid_module_name() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/src/One.gleam", 1, TEST_SOURCE_2);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
assert_eq!(
loaded.warnings,
vec![Warning::InvalidSource {
path: Utf8PathBuf::from("/src/One.gleam"),
}],
);
}
#[test]
fn invalid_nested_module_name() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/src/1/one.gleam", 1, TEST_SOURCE_2);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
assert_eq!(
loaded.warnings,
vec![Warning::InvalidSource {
path: Utf8PathBuf::from("/src/1/one.gleam"),
}],
);
}
#[test]
fn invalid_module_name_in_test() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/test/One.gleam", 1, TEST_SOURCE_2);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
assert_eq!(
loaded.warnings,
vec![Warning::InvalidSource {
path: Utf8PathBuf::from("/test/One.gleam"),
}],
);
}
#[test]
fn invalid_nested_module_name_in_test() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/test/1/one.gleam", 1, TEST_SOURCE_2);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
assert_eq!(
loaded.warnings,
vec![Warning::InvalidSource {
path: Utf8PathBuf::from("/test/1/one.gleam"),
}],
);
}
#[test]
fn invalid_module_name_in_dev() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/dev/One.gleam", 1, TEST_SOURCE_2);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
assert_eq!(
loaded.warnings,
vec![Warning::InvalidSource {
path: Utf8PathBuf::from("/dev/One.gleam"),
}],
);
}
#[test]
fn invalid_nested_module_name_in_dev() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Cache is stale
write_src(&fs, "/dev/1/one.gleam", 1, TEST_SOURCE_2);
let loaded = run_loader(fs, root, artefact);
assert!(loaded.to_compile.is_empty());
assert!(loaded.cached.is_empty());
assert_eq!(
loaded.warnings,
vec![Warning::InvalidSource {
path: Utf8PathBuf::from("/dev/1/one.gleam"),
}],
);
}
#[test]
fn cache_files_are_removed_when_source_removed() {
let fs = InMemoryFileSystem::new();
let root = Utf8Path::new("/");
let artefact = Utf8Path::new("/artefact");
// Source is removed, cache is present
write_cache(&fs, "nested/one", 0, vec![], TEST_SOURCE_1);
_ = run_loader(fs.clone(), root, artefact);
assert_eq!(fs.files().len(), 0);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/call_graph/into_dependency_order_tests.rs | compiler-core/src/call_graph/into_dependency_order_tests.rs | use super::*;
use crate::{
ast::{Arg, Function, ModuleConstant, Publicity},
type_::{
Deprecation,
expression::{Implementations, Purity},
},
};
use ecow::EcoString;
type FuncInput = (&'static str, &'static [&'static str], &'static str);
type ConstInput = (&'static str, &'static str);
fn parse_and_order(
functions: &[FuncInput],
constants: &[ConstInput],
) -> Result<Vec<Vec<EcoString>>, Error> {
let functions = functions
.iter()
.map(|(name, arguments, src)| Function {
name: Some((SrcSpan::default(), EcoString::from(*name))),
arguments: arguments
.iter()
.map(|name| Arg {
names: crate::ast::ArgNames::Named {
name: EcoString::from(*name),
location: Default::default(),
},
location: Default::default(),
annotation: None,
type_: (),
})
.collect_vec(),
body: crate::parse::parse_statement_sequence(src)
.expect("syntax error")
.to_vec(),
location: Default::default(),
body_start: None,
return_annotation: None,
publicity: Publicity::Public,
deprecation: Deprecation::NotDeprecated,
end_position: src.len() as u32,
return_type: (),
documentation: None,
external_erlang: None,
external_javascript: None,
implementations: Implementations {
gleam: true,
uses_erlang_externals: true,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
purity: Purity::Impure,
})
.collect_vec();
let constants = constants
.iter()
.map(|(name, value)| {
let const_value = crate::parse::parse_const_value(value).expect("syntax error");
ModuleConstant {
documentation: None,
location: Default::default(),
publicity: Publicity::Public,
name: EcoString::from(*name),
name_location: SrcSpan::default(),
annotation: None,
value: Box::from(const_value),
implementations: Implementations {
gleam: true,
uses_erlang_externals: true,
uses_javascript_externals: false,
can_run_on_erlang: true,
can_run_on_javascript: true,
},
type_: (),
deprecation: Deprecation::NotDeprecated,
}
})
.collect_vec();
Ok(into_dependency_order(functions, constants)?
.into_iter()
.map(|level| {
level
.into_iter()
.map(|function| match function {
CallGraphNode::Function(f) => f.name.map(|(_, name)| name).unwrap(),
CallGraphNode::ModuleConstant(c) => c.name,
})
.collect_vec()
})
.collect())
}
#[test]
fn empty() {
let functions = [];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
Vec::<Vec<EcoString>>::new()
);
}
#[test]
fn no_deps() {
let functions = [
("a", [].as_slice(), "1"),
("b", [].as_slice(), r#""ok""#),
("c", [].as_slice(), r#"1"#),
("d", [].as_slice(), r#"1.0"#),
("e", [].as_slice(), r#"todo"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["b"], vec!["c"], vec!["d"], vec!["e"]]
);
}
#[test]
fn one_dep() {
let functions = [
("a", [].as_slice(), "1"),
("b", [].as_slice(), r#"c"#),
("c", [].as_slice(), r#"0"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["c"], vec!["b"]]
);
}
#[test]
fn unknown_vars() {
let functions = [
("a", [].as_slice(), "1"),
("b", [].as_slice(), r#"Nil"#),
("c", [].as_slice(), r#"Ok"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["b"], vec!["c"]]
);
}
#[test]
fn calling_function() {
let functions = [
("a", [].as_slice(), r#"b()"#),
("b", [].as_slice(), r#"c(1, 2)"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["c"], vec!["b"], vec!["a"]]
);
}
#[test]
fn ref_in_call_argument() {
let functions = [
("a", [].as_slice(), r#"c(1, b())"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn sequence() {
let functions = [
("a", [].as_slice(), r#"c({ 1 2 b })"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn tuple() {
let functions = [
("a", [].as_slice(), r#"#(b, c, 1)"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn pipeline() {
let functions = [
("a", [].as_slice(), r#"1 |> b |> c"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn list() {
let functions = [
("a", [].as_slice(), r#"[b, b, c, 1]"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn list_spread() {
let functions = [
("a", [].as_slice(), r#"[b, b, ..c]"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn record_access() {
let functions = [
("a", [].as_slice(), "1"),
("b", [].as_slice(), r#"b().wibble"#),
("c", [].as_slice(), r#"123"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["b"], vec!["c"]]
);
}
#[test]
fn binop() {
let functions = [
("a", [].as_slice(), r#"1 + a() + 2 / b() * 4"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn bit_arrays() {
let functions = [
("a", [].as_slice(), r#"<<b, c>>"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn tuple_index() {
let functions = [
("a", [].as_slice(), r#"b.0"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn record_update() {
let functions = [
("a", [].as_slice(), r#"Wibble(..b, wobble: c())"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn negate() {
let functions = [
("a", [].as_slice(), r#"!c()"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn use_() {
let functions = [
("a", [].as_slice(), r#"use x <- c"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn use_shadowing() {
let functions = [
("a", [].as_slice(), r#"123"#),
("b", [].as_slice(), r#"{ use c <- a c }"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["b"], vec!["c"]]
);
}
#[test]
fn fn_argument_shadowing() {
let functions = &[
("a", [].as_slice(), r#"fn(b) { c b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn fn_argument_shadowing_then_not() {
let functions = [
("a", [].as_slice(), r#"{ fn(b) { c b } b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn let_var() {
let functions = [
("a", [].as_slice(), r#"{ let c = b c }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn pattern_int() {
let functions = [("a", [].as_slice(), r#"{ let 1 = x }"#)];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"]]
);
}
#[test]
fn pattern_float() {
let functions = [("a", [].as_slice(), r#"{ let 1.0 = x }"#)];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"]]
);
}
#[test]
fn pattern_string() {
let functions = [("a", [].as_slice(), r#"{ let "1.0" = x }"#)];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"]]
);
}
#[test]
fn pattern_underscore() {
let functions = [("a", [].as_slice(), r#"{ let _ = x }"#)];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"]]
);
}
#[test]
fn pattern_concat() {
let functions = [
("a", [].as_slice(), r#"{ let "a" <> c = b c }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn pattern_tuple() {
let functions = [
("a", [].as_slice(), r#"{ let #(a, c) = b a c }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn pattern_list() {
let functions = [
("a", [].as_slice(), r#"{ let [a, c] = b a c }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn pattern_list_spread() {
let functions = [
("a", [].as_slice(), r#"{ let [a, ..c] = b a c }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn pattern_bit_array_segment_size_var_usage() {
let functions = [
(
"a",
[].as_slice(),
r#"{ let <<y:size(b), _:unit(3)>> = c y }"#,
),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn pattern_assign() {
let functions = [
("a", [].as_slice(), r#"{ let 1 as b = c b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn pattern_constructor() {
let functions = [
("a", [].as_slice(), r#"{ let Ok(b) = c b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn scope_reset() {
let functions = [
("a", [].as_slice(), r#"{ let x = { let b = 1 b } b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn case_subject() {
let functions = [
("a", [].as_slice(), r#"case b { _ -> 1 }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn case_subjects() {
let functions = [
("a", [].as_slice(), r#"case b, c { _, _ -> 1 }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["c"], vec!["a"]]
);
}
#[test]
fn case_pattern_shadow() {
let functions = [
("a", [].as_slice(), r#"case 1 { b -> b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["b"], vec!["c"]]
);
}
#[test]
fn case_use_in_clause() {
let functions = [
("a", [].as_slice(), r#"case 1 { _ -> b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn case_clause_doesnt_shadow_later_clauses() {
let functions = [
("a", [].as_slice(), r#"case 1 { b -> 1 _ -> b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn case_clause_doesnt_shadow_after() {
let functions = [
("a", [].as_slice(), r#"{ case 1 { b -> 1 } b }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn guard() {
let functions = [
("a", [].as_slice(), r#"case 1 { _ if b -> 1 }"#),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn big_guard() {
let functions = [
(
"a",
[].as_slice(),
r#"case 1 { _ if 1 == 2 || x != #(Ok(b), 123) -> 1 }"#,
),
("b", [].as_slice(), r#"123"#),
("c", [].as_slice(), "1"),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b"], vec!["a"], vec!["c"]]
);
}
#[test]
fn duplicate_external_function_name() {
let functions = [("c", [].as_slice(), "1"), ("c", [].as_slice(), "1")];
_ = parse_and_order(functions.as_slice(), [].as_slice()).unwrap_err();
}
#[test]
fn duplicate_function_name() {
let functions = [
("b", [].as_slice(), r#"123456"#),
("b", [].as_slice(), r#"123456"#),
];
_ = parse_and_order(functions.as_slice(), [].as_slice()).unwrap_err();
}
#[test]
fn more_complex_cycle() {
let functions = [
("a1", [].as_slice(), r#"{ a2 }"#),
("a2", [].as_slice(), r#"{ a3 a1 }"#),
("a3", [].as_slice(), r#"{ a1 }"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a2", "a3", "a1"]]
);
}
#[test]
fn function_argument_shadowing() {
let functions = [
("a", ["b"].as_slice(), r#"b"#),
("b", [].as_slice(), r#"Nil"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["a"], vec!["b"]]
);
}
#[test]
fn constants_and_functions() {
let functions = [
("a", ["b"].as_slice(), r#"b"#),
("b", [].as_slice(), r#"c"#),
];
let constants = [("d", r#"c"#), ("c", r#"a"#)];
assert_eq!(
parse_and_order(functions.as_slice(), constants.as_slice()).unwrap(),
vec![vec!["a"], vec!["c"], vec!["b"], vec!["d"]]
);
}
// https://github.com/gleam-lang/gleam/issues/2275
#[test]
fn bug_2275() {
let functions = [
("one", [].as_slice(), r#"two one"#),
("two", [].as_slice(), r#"two"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["two"], vec!["one"]]
);
}
#[test]
fn let_assert_message() {
let functions = [
("a", [].as_slice(), r#"{ let assert True = False as b() }"#),
("b", [].as_slice(), r#"a()"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b", "a"]]
);
}
#[test]
fn assert_subject() {
let functions = [
("a", [].as_slice(), r#"{ assert b() }"#),
("b", [].as_slice(), r#"a()"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b", "a"]]
);
}
#[test]
fn assert_message() {
let functions = [
("a", [].as_slice(), r#"{ assert False as b() }"#),
("b", [].as_slice(), r#"a()"#),
];
assert_eq!(
parse_and_order(functions.as_slice(), [].as_slice()).unwrap(),
vec![vec!["b", "a"]]
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/docs/source_links.rs | compiler-core/src/docs/source_links.rs | use crate::{
ast::SrcSpan,
build,
config::{PackageConfig, Repository},
line_numbers::LineNumbers,
paths::ProjectPaths,
};
use camino::{Utf8Component, Utf8Path, Utf8PathBuf};
pub struct SourceLinker {
line_numbers: LineNumbers,
url_pattern: Option<(String, String)>,
}
impl SourceLinker {
pub fn new(
paths: &ProjectPaths,
project_config: &PackageConfig,
module: &build::Module,
) -> Self {
let path = paths
.src_directory()
.join(module.name.as_str())
.strip_prefix(paths.root())
.expect("path is not in root")
.with_extension("gleam");
let path_in_repo = match project_config
.repository
.as_ref()
.map(|r| r.path())
.unwrap_or_default()
{
Some(repo_path) => to_url_path(&Utf8PathBuf::from(repo_path).join(path)),
_ => to_url_path(&path),
}
.unwrap_or_default();
let tag = project_config.tag_for_version(&project_config.version);
let url_pattern = project_config
.repository
.as_ref()
.map(|r| match r {
Repository::GitHub { user, repo, .. } => Some((
format!("https://github.com/{user}/{repo}/blob/{tag}/{path_in_repo}#L"),
"-L".into(),
)),
Repository::GitLab { user, repo, .. } => Some((
format!("https://gitlab.com/{user}/{repo}/-/blob/{tag}/{path_in_repo}#L"),
"-".into(),
)),
Repository::BitBucket { user, repo, .. } => Some((
format!("https://bitbucket.com/{user}/{repo}/src/{tag}/{path_in_repo}#lines-"),
":".into(),
)),
Repository::Codeberg { user, repo, .. } => Some((
format!("https://codeberg.org/{user}/{repo}/src/tag/{tag}/{path_in_repo}#L"),
"-".into(),
)),
Repository::SourceHut { user, repo, .. } => Some((
format!("https://git.sr.ht/~{user}/{repo}/tree/{tag}/item/{path_in_repo}#L"),
"-".into(),
)),
Repository::Tangled { user, repo, .. } => Some((
format!("https://tangled.sh/{user}/{repo}/tree/{tag}/{path_in_repo}#L"),
"-".into(),
)),
Repository::Gitea {
user, repo, host, ..
}
| Repository::Forgejo {
user, repo, host, ..
} => {
let string_host = host.to_string();
let cleaned_host = string_host.trim_end_matches('/');
Some((
format!("{cleaned_host}/{user}/{repo}/src/tag/{tag}/{path_in_repo}#L",),
"-L".into(),
))
}
Repository::Custom { .. } => None,
})
.unwrap_or_default();
SourceLinker {
line_numbers: LineNumbers::new(&module.code),
url_pattern,
}
}
pub fn url(&self, span: SrcSpan) -> String {
match &self.url_pattern {
Some((base, line_sep)) => {
let start_line = self.line_numbers.line_number(span.start);
let end_line = self.line_numbers.line_number(span.end);
if start_line == end_line {
format!("{base}{start_line}")
} else {
format!("{base}{start_line}{line_sep}{end_line}")
}
}
None => "".into(),
}
}
}
fn to_url_path(path: &Utf8Path) -> Option<String> {
let mut buf = String::new();
for c in path.components() {
if let Utf8Component::Normal(s) = c {
buf.push_str(s);
}
buf.push('/');
}
let _ = buf.pop();
Some(buf)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/docs/tests.rs | compiler-core/src/docs/tests.rs | use std::{
collections::{HashMap, HashSet},
time::SystemTime,
};
use super::{
Dependency, DependencyKind, DocumentationConfig, SearchData, SearchItem, SearchItemType,
SearchProgrammingLanguage,
printer::{PrintOptions, Printer},
source_links::SourceLinker,
};
use crate::{
build::{
self, Mode, NullTelemetry, Origin, PackageCompiler, StaleTracker,
TargetCodegenConfiguration,
},
config::{DocsPage, PackageConfig, Repository},
docs::{DocContext, search_item_for_module, search_item_for_type, search_item_for_value},
io::{FileSystemWriter, memory::InMemoryFileSystem},
paths::ProjectPaths,
type_,
uid::UniqueIdGenerator,
version::COMPILER_VERSION,
warning::WarningEmitter,
};
use camino::Utf8PathBuf;
use ecow::{EcoString, eco_format};
use hexpm::version::Version;
use http::Uri;
use itertools::Itertools;
use serde_json::to_string as serde_to_string;
#[derive(Default)]
struct CompileWithMarkdownPagesOpts {
hex_publish: Option<DocContext>,
}
fn compile_with_markdown_pages(
config: PackageConfig,
modules: Vec<(&str, &str)>,
markdown_pages: Vec<(&str, &str)>,
opts: CompileWithMarkdownPagesOpts,
) -> EcoString {
let fs = InMemoryFileSystem::new();
for (name, src) in modules {
fs.write(&Utf8PathBuf::from(format!("/src/{name}")), src)
.unwrap();
}
// We're saving the pages under a different `InMemoryFileSystem` for these
// tests so we don't have to juggle with borrows and lifetimes.
// The package compiler is going to take ownership of `fs` but later
// `generate_html` also needs a `FileSystemReader` to go and read the
// markdown pages' content.
let pages_fs = InMemoryFileSystem::new();
for (title, src) in markdown_pages.iter() {
pages_fs
.write(&Utf8PathBuf::from(format!("{title}.md")), src)
.unwrap();
}
let ids = UniqueIdGenerator::new();
let mut type_manifests = im::HashMap::new();
let mut defined_modules = im::HashMap::new();
let warnings = WarningEmitter::null();
let target = TargetCodegenConfiguration::Erlang { app_file: None };
let root = Utf8PathBuf::from("/");
let build = root.join("build");
let lib = root.join("lib");
let paths = ProjectPaths::new(root.clone());
let mut compiler =
PackageCompiler::new(&config, Mode::Dev, &root, &build, &lib, &target, ids, fs);
compiler.write_entrypoint = false;
compiler.write_metadata = false;
compiler.compile_beam_bytecode = true;
let mut modules = compiler
.compile(
&warnings,
&mut type_manifests,
&mut defined_modules,
&mut StaleTracker::default(),
&mut HashSet::new(),
&NullTelemetry,
)
.unwrap()
.modules;
for module in &mut modules {
module.attach_doc_and_module_comments();
}
let docs_pages = markdown_pages
.into_iter()
.map(|(title, _)| DocsPage {
title: (*title).into(),
path: format!("{title}.html"),
source: format!("{title}.md").into(),
})
.collect_vec();
super::generate_html(
&paths,
DocumentationConfig {
package_config: &config,
dependencies: HashMap::new(),
analysed: &modules,
docs_pages: &docs_pages,
rendering_timestamp: SystemTime::UNIX_EPOCH,
context: if let Some(doc_context) = opts.hex_publish {
doc_context
} else {
DocContext::HexPublish
},
},
pages_fs,
)
.into_iter()
.filter(|file| file.path.extension() == Some("html"))
.sorted_by(|a, b| a.path.cmp(&b.path))
.flat_map(|file| {
Some(format!(
"//// {}\n\n{}\n\n",
file.path.as_str(),
file.content
.text()?
.replace(COMPILER_VERSION, "GLEAM_VERSION_HERE")
))
})
.collect::<String>()
.chars()
.collect()
}
pub fn compile(config: PackageConfig, modules: Vec<(&str, &str)>) -> EcoString {
compile_with_markdown_pages(
config,
modules,
vec![],
CompileWithMarkdownPagesOpts::default(),
)
}
fn compile_documentation(
module_name: &str,
module_src: &str,
modules: Vec<(&str, &str, &str)>,
dependency_kind: DependencyKind,
options: PrintOptions,
) -> EcoString {
let module = type_::tests::compile_module(module_name, module_src, None, modules.clone())
.expect("Module should compile successfully");
let mut config = PackageConfig::default();
config.name = "thepackage".into();
let paths = ProjectPaths::new("/".into());
let build_module = build::Module {
name: "main".into(),
code: module_src.into(),
mtime: SystemTime::now(),
input_path: "/".into(),
origin: Origin::Src,
ast: module,
extra: Default::default(),
dependencies: Default::default(),
};
let source_links = SourceLinker::new(&paths, &config, &build_module);
let module = build_module.ast;
let dependencies = modules
.iter()
.map(|(package, _, _)| {
(
EcoString::from(*package),
Dependency {
version: Version::new(1, 0, 0),
kind: dependency_kind,
},
)
})
.collect();
let mut printer = Printer::new(
module.type_info.package.clone(),
module.name.clone(),
&module.names,
&dependencies,
);
printer.set_options(options);
let types = printer.type_definitions(&source_links, &module.definitions);
let values = printer.value_definitions(&source_links, &module.definitions);
let mut output = EcoString::new();
output.push_str("---- SOURCE CODE\n");
for (_package, name, src) in modules {
output.push_str(&format!("-- {name}.gleam\n{src}\n\n"));
}
output.push_str("-- ");
output.push_str(module_name);
output.push_str(".gleam\n");
output.push_str(module_src);
if !types.is_empty() {
output.push_str("\n\n---- TYPES");
}
for type_ in types {
output.push_str("\n\n--- ");
output.push_str(type_.name);
if !type_.documentation.is_empty() {
output.push('\n');
output.push_str(&type_.documentation);
}
output.push_str("\n<pre><code>");
output.push_str(&type_.definition);
output.push_str("</code></pre>");
if !type_.constructors.is_empty() {
output.push_str("\n\n-- CONSTRUCTORS");
}
for constructor in type_.constructors {
output.push_str("\n\n");
if !constructor.documentation.is_empty() {
output.push_str(&constructor.documentation);
output.push('\n');
}
output.push_str("<pre><code>");
output.push_str(&constructor.definition);
output.push_str("</code></pre>");
}
}
if !values.is_empty() {
output.push_str("\n\n---- VALUES");
}
for value in values {
output.push_str("\n\n--- ");
output.push_str(value.name);
if !value.documentation.is_empty() {
output.push('\n');
output.push_str(&value.documentation);
}
output.push_str("\n<pre><code>");
output.push_str(&value.definition);
output.push_str("</code></pre>");
}
output
}
macro_rules! assert_documentation {
($src:literal $(,)?) => {
assert_documentation!($src, PrintOptions::all());
};
($src:literal, $options:expr $(,)?) => {
let output = compile_documentation("main", $src, Vec::new(), DependencyKind::Hex, $options);
insta::assert_snapshot!(output);
};
($(($name:expr, $module_src:literal)),+, $src:literal $(,)?) => {
let output = compile_documentation(
"main",
$src,
vec![$(("thepackage", $name, $module_src)),*],
DependencyKind::Hex,
PrintOptions::all(),
);
insta::assert_snapshot!(output);
};
($(($name:expr, $module_src:literal)),+, $src:literal, $options:expr $(,)?) => {
let output = compile_documentation(
"main",
$src,
vec![$(("thepackage", $name, $module_src)),*],
DependencyKind::Hex,
$options,
);
insta::assert_snapshot!(output);
};
($(($name:expr, $module_src:literal)),+, $main_module:literal, $src:literal, $options:expr $(,)?) => {
let output = compile_documentation(
$main_module,
$src,
vec![$(("thepackage", $name, $module_src)),*],
DependencyKind::Hex,
$options,
);
insta::assert_snapshot!(output);
};
($(($package:expr, $name:expr, $module_src:literal)),+, $src:literal $(,)?) => {
let output = compile_documentation(
"main",
$src,
vec![$(($package, $name, $module_src)),*],
DependencyKind::Hex,
PrintOptions::all(),
);
insta::assert_snapshot!(output);
};
($(($package:expr, $name:expr, $module_src:literal)),+, $src:literal, $options:expr $(,)?) => {
let output = compile_documentation(
"main",
$src,
vec![$(($package, $name, $module_src)),*],
DependencyKind::Hex,
$options,
);
insta::assert_snapshot!(output);
};
(git: $(($package:expr, $name:expr, $module_src:literal)),+, $src:literal, $options:expr $(,)?) => {
let output = compile_documentation(
"main",
$src,
vec![$(($package, $name, $module_src)),*],
DependencyKind::Git,
$options,
);
insta::assert_snapshot!(output);
};
(path: $(($package:expr, $name:expr, $module_src:literal)),+, $src:literal, $options:expr $(,)?) => {
let output = compile_documentation(
"main",
$src,
vec![$(($package, $name, $module_src)),*],
DependencyKind::Path,
$options,
);
insta::assert_snapshot!(output);
};
}
#[test]
fn hello_docs() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
r#"
/// Here is some documentation
pub fn one() {
1
}
"#,
)];
insta::assert_snapshot!(compile(config, modules));
}
#[test]
fn ignored_argument_is_called_arg() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![("app.gleam", "pub fn one(_) { 1 }")];
insta::assert_snapshot!(compile(config, modules));
}
// https://github.com/gleam-lang/gleam/issues/2347
#[test]
fn tables() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
r#"
/// | heading 1 | heading 2 |
/// |--------------|--------------|
/// | row 1 cell 1 | row 1 cell 2 |
/// | row 2 cell 1 | row 2 cell 2 |
///
pub fn one() {
1
}
"#,
)];
insta::assert_snapshot!(compile(config, modules));
}
// https://github.com/gleam-lang/gleam/issues/2202
#[test]
fn long_function_wrapping() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
r#"
pub type Option(t) {
Some(t)
None
}
/// Returns the first value if it is `Some`, otherwise evaluates the given
/// function for a fallback value.
///
pub fn lazy_or(first: Option(a), second: fn() -> Option(a)) -> Option(a) {
case first {
Some(_) -> first
None -> second()
}
}
"#,
)];
insta::assert_snapshot!(compile(config, modules));
}
#[test]
fn internal_definitions_are_not_included() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
r#"
@internal
pub const wibble = 1
@internal
pub type Wibble = Int
@internal
pub type Wobble { Wobble }
@internal
pub fn one() { 1 }
"#,
)];
insta::assert_snapshot!(compile(config, modules));
}
// https://github.com/gleam-lang/gleam/issues/2561
#[test]
fn discarded_arguments_are_not_shown() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![("app.gleam", "pub fn discard(_discarded: a) -> Int { 1 }")];
insta::assert_snapshot!(compile(config, modules));
}
// https://github.com/gleam-lang/gleam/issues/2631
#[test]
fn docs_of_a_type_constructor_are_not_used_by_the_following_function() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
r#"
pub type Wibble {
Wobble(
/// Documentation!!
wabble: Int,
)
}
pub fn main() { todo }
"#,
)];
insta::assert_snapshot!(compile(config, modules));
}
#[test]
fn markdown_code_from_standalone_pages_is_not_trimmed() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let pages = vec![(
"one",
"
This is an example code snippet that should be indented
```gleam
pub fn indentation_test() {
todo as \"This line should be indented by two spaces\"
}
```",
)];
insta::assert_snapshot!(compile_with_markdown_pages(
config,
vec![],
pages,
CompileWithMarkdownPagesOpts::default()
));
}
#[test]
fn markdown_code_from_function_comment_is_trimmed() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
"
/// Here's an example code snippet:
/// ```
/// wibble
/// |> wobble
/// ```
///
pub fn indentation_test() {
todo
}
",
)];
insta::assert_snapshot!(compile(config, modules));
}
#[test]
fn markdown_code_from_module_comment_is_trimmed() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
"
//// Here's an example code snippet:
//// ```
//// wibble
//// |> wobble
//// ```
////
",
)];
insta::assert_snapshot!(compile(config, modules));
}
#[test]
fn doc_for_commented_definitions_is_not_included_in_next_constant() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
"
/// Not included!
// pub fn wibble() {}
/// Included!
pub const wobble = 1
",
)];
assert!(!compile(config, modules).contains("Not included!"));
}
#[test]
fn doc_for_commented_definitions_is_not_included_in_next_type() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
"
/// Not included!
// pub fn wibble() {}
/// Included!
pub type Wibble {
/// Wobble!
Wobble
}
",
)];
assert!(!compile(config, modules).contains("Not included!"));
}
#[test]
fn doc_for_commented_definitions_is_not_included_in_next_function() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
"
/// Not included!
// pub fn wibble() {}
/// Included!
pub fn wobble(arg) {}
",
)];
assert!(!compile(config, modules).contains("Not included!"));
}
#[test]
fn doc_for_commented_definitions_is_not_included_in_next_type_alias() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![(
"app.gleam",
"
/// Not included!
// pub fn wibble() {}
/// Included!
pub type Wibble = Int
",
)];
assert!(!compile(config, modules).contains("Not included!"));
}
#[test]
fn source_link_for_github_repository() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
config.repository = Some(Repository::GitHub {
user: "wibble".to_string(),
repo: "wobble".to_string(),
path: None,
tag_prefix: None,
});
let modules = vec![("app.gleam", "pub type Wibble = Int")];
assert!(
compile(config, modules)
.contains("https://github.com/wibble/wobble/blob/v0.1.0/src/app.gleam#L1")
);
}
#[test]
fn source_link_for_github_repository_with_path_and_tag_prefix() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
config.repository = Some(Repository::GitHub {
user: "wibble".to_string(),
repo: "wobble".to_string(),
path: Some("path/to/package".to_string()),
tag_prefix: Some("subdir-".into()),
});
let modules = vec![("app.gleam", "pub type Wibble = Int")];
assert!(compile(config, modules).contains(
"https://github.com/wibble/wobble/blob/subdir-v0.1.0/path/to/package/src/app.gleam#L1"
));
}
#[test]
fn canonical_link() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![
(
"app.gleam",
r#"
/// Here is some documentation
pub fn one() {
1
}
"#,
),
(
"gleam/otp/actor.gleam",
r#"
/// Here is some documentation
pub fn one() {
1
}
"#,
),
];
let pages = vec![(
"LICENSE",
r#"
# LICENSE
"#,
)];
insta::assert_snapshot!(compile_with_markdown_pages(
config,
modules,
pages,
CompileWithMarkdownPagesOpts::default()
));
}
#[test]
fn no_hex_publish() {
let mut config = PackageConfig::default();
config.name = EcoString::from("test_project_name");
let modules = vec![
(
"app.gleam",
r#"
/// Here is some documentation
pub fn one() {
1
}
"#,
),
(
"gleam/otp/actor.gleam",
r#"
/// Here is some documentation
pub fn one() {
1
}
"#,
),
];
let pages = vec![(
"LICENSE",
r#"
# LICENSE
"#,
)];
insta::assert_snapshot!(compile_with_markdown_pages(
config,
modules,
pages,
CompileWithMarkdownPagesOpts {
hex_publish: Some(DocContext::Build)
}
));
}
fn create_sample_search_data() -> SearchData {
SearchData {
items: vec![
SearchItem {
type_: SearchItemType::Module,
parent_title: "gleam/option".to_string(),
title: "gleam/option".to_string(),
content: "".to_string(),
reference: "gleam/option.html".to_string(),
},
SearchItem {
type_: SearchItemType::Type,
parent_title: "gleam/option".to_string(),
title: "Option".to_string(),
content: "`Option` represents a value that may be present or not. `Some` means the value is present, `None` means the value is not.".to_string(),
reference: "gleam/option.html#Option".to_string(),
},
SearchItem {
type_: SearchItemType::Value,
parent_title: "gleam/option".to_string(),
title: "unwrap".to_string(),
content: "Extracts the value from an `Option`, returning a default value if there is none.".to_string(),
reference: "gleam/option.html#unwrap".to_string(),
},
SearchItem {
type_: SearchItemType::Value,
parent_title: "gleam/dynamic/decode".to_string(),
title: "bool".to_string(),
content: "A decoder that decodes `Bool` values.\n\n # Examples\n\n \n let result = decode.run(dynamic.from(True), decode.bool)\n assert result == Ok(True)\n \n".to_string(),
reference: "gleam/dynamic/decode.html#bool".to_string(),
},
],
programming_language: SearchProgrammingLanguage::Gleam,
}
}
#[test]
fn ensure_search_data_matches_exdocs_search_data_model_specification() {
let data = create_sample_search_data();
let json = serde_to_string(&data).unwrap();
let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
// Ensure output of SearchData matches specification
assert!(parsed.is_object());
let obj = parsed.as_object().unwrap();
assert!(obj.contains_key("items"));
assert!(obj.contains_key("proglang"));
// Ensure output of SearchItem matches specification
let items = obj.get("items").unwrap().as_array().unwrap();
for item in items {
let item = item.as_object().unwrap();
assert!(item.contains_key("type"));
assert!(item.contains_key("parentTitle"));
assert!(item.contains_key("title"));
assert!(item.contains_key("doc"));
assert!(item.contains_key("ref"));
}
}
#[test]
fn output_of_search_data_json() {
let data = create_sample_search_data();
let json = serde_to_string(&data).unwrap();
insta::assert_snapshot!(json);
}
const ONLY_LINKS: PrintOptions = PrintOptions {
print_highlighting: false,
print_html: true,
};
const NONE: PrintOptions = PrintOptions {
print_highlighting: false,
print_html: false,
};
#[test]
fn highlight_function_definition() {
assert_documentation!(
"
pub fn wibble(list: List(Int), generic: a, function: fn(a) -> b) -> #(a, b) { todo }
"
);
}
#[test]
fn highlight_constant_definition() {
assert_documentation!(
"
pub const x = 22
"
);
}
#[test]
fn highlight_type_alias() {
assert_documentation!(
"
pub type Option(a) = Result(a, Nil)
"
);
}
#[test]
fn highlight_custom_type() {
assert_documentation!(
"
pub type Wibble(a, b) {
Wibble(a, i: Int)
Wobble(b: b, c: String)
}
"
);
}
#[test]
fn highlight_opaque_custom_type() {
assert_documentation!(
"
pub opaque type Wibble(a, b) {
Wibble(a, i: Int)
Wobble(b: b, c: String)
}
"
);
}
// https://github.com/gleam-lang/gleam/issues/2629
#[test]
fn print_type_variables_in_function_signatures() {
assert_documentation!(
"
pub type Dict(key, value)
pub fn insert(dict: Dict(key, value), key: key, value: value) -> Dict(key, value) {
dict
}
",
NONE
);
}
// https://github.com/gleam-lang/gleam/issues/828
#[test]
fn print_qualified_names_from_other_modules() {
assert_documentation!(
(
"gleam/option",
"
pub type Option(t) {
Some(t)
None
}
"
),
"
import gleam/option.{type Option, Some, None}
pub fn from_option(o: Option(t), e: e) -> Result(t, e) {
case o {
Some(t) -> Ok(t)
None -> Error(e)
}
}
",
NONE
);
}
// https://github.com/gleam-lang/gleam/issues/3461
#[test]
fn link_to_type_in_same_module() {
assert_documentation!(
"
pub type Dict(a, b)
pub fn new() -> Dict(a, b) { todo }
",
ONLY_LINKS
);
}
// https://github.com/gleam-lang/gleam/issues/3461
#[test]
fn link_to_type_in_different_module() {
assert_documentation!(
("gleam/dict", "pub type Dict(a, b)"),
"
import gleam/dict
pub fn make_dict() -> dict.Dict(a, b) { todo }
",
ONLY_LINKS
);
}
#[test]
fn link_to_type_in_different_module_from_nested_module() {
assert_documentation!(
("gleam/dict", "pub type Dict(a, b)"),
"gleam/dynamic/decode",
"
import gleam/dict
pub fn decode_dict() -> dict.Dict(a, b) { todo }
",
ONLY_LINKS
);
}
#[test]
fn link_to_type_in_different_module_from_nested_module_with_shared_path() {
assert_documentation!(
("gleam/dynamic", "pub type Dynamic"),
"gleam/dynamic/decode",
"
import gleam/dynamic
pub type Dynamic = dynamic.Dynamic
",
ONLY_LINKS
);
}
// https://github.com/gleam-lang/gleam/issues/3461
#[test]
fn link_to_type_in_different_package() {
assert_documentation!(
("gleam_stdlib", "gleam/dict", "pub type Dict(a, b)"),
"
import gleam/dict
pub fn make_dict() -> dict.Dict(a, b) { todo }
",
ONLY_LINKS
);
}
#[test]
fn no_link_to_type_in_git_dependency() {
assert_documentation!(
git: ("gleam_stdlib", "gleam/dict", "pub type Dict(a, b)"),
"
import gleam/dict
pub fn make_dict() -> dict.Dict(a, b) { todo }
",
ONLY_LINKS
);
}
#[test]
fn no_link_to_type_in_path_dependency() {
assert_documentation!(
path: ("gleam_stdlib", "gleam/dict", "pub type Dict(a, b)"),
"
import gleam/dict
pub fn make_dict() -> dict.Dict(a, b) { todo }
",
ONLY_LINKS
);
}
#[test]
fn no_links_to_prelude_types() {
assert_documentation!(
"
pub fn int_to_string(i: Int) -> String { todo }
",
ONLY_LINKS
);
}
#[test]
fn generated_type_variables() {
assert_documentation!(
"
pub fn wibble(_a, _b, _c, _d) {
todo
}
",
NONE
);
}
#[test]
fn generated_type_variables_mixed_with_existing_variables() {
assert_documentation!(
"
pub fn wibble(_a: b, _b: a, _c, _d) {
todo
}
",
NONE
);
}
#[test]
fn generated_type_variables_with_existing_variables_coming_afterwards() {
assert_documentation!(
"
pub fn wibble(_a, _b, _c: b, _d: a) {
todo
}
",
NONE
);
}
#[test]
fn generated_type_variables_do_not_take_into_account_other_definitions() {
assert_documentation!(
"
pub fn wibble(_a: a, _b: b, _c: c) -> d {
todo
}
pub fn identity(x) { x }
",
NONE
);
}
#[test]
fn internal_type_reexport_in_same_module_as_parameter() {
assert_documentation!(
"
@internal
pub type Internal
pub type External =
List(Internal)
",
ONLY_LINKS
);
}
#[test]
fn internal_type_reexport_in_same_module_as_parameter_colours() {
assert_documentation!(
"
@internal
pub type Internal
pub type External =
List(Internal)
",
);
}
#[test]
fn internal_type_reexport_in_same_module() {
assert_documentation!(
"
@internal
pub type Internal
pub type External =
Internal
",
ONLY_LINKS
);
}
#[test]
fn internal_type_reexport_in_different_module() {
assert_documentation!(
("other", "@internal pub type Internal"),
"
import other
pub type External =
other.Internal
",
ONLY_LINKS
);
}
#[test]
fn public_type_reexport_in_different_internal_module() {
assert_documentation!(
("thepackage/internal/other", "pub type Internal"),
"
import thepackage/internal/other
pub type External =
other.Internal
",
ONLY_LINKS
);
}
#[test]
fn use_reexport_from_other_package() {
assert_documentation!(
("some_package", "some_package/internal", "pub type Internal"),
(
"some_package",
"some_package/api",
"
import some_package/internal
pub type External = internal.Internal
"
),
"
import some_package/api
pub fn do_thing(value: api.External) {
value
}
",
ONLY_LINKS
);
}
#[test]
fn function_uses_reexport_of_internal_type() {
assert_documentation!(
("thepackage/internal", "pub type Internal"),
"
import thepackage/internal
pub type External = internal.Internal
pub fn do_thing(value: internal.Internal) -> External {
value
}
",
ONLY_LINKS
);
}
#[test]
fn function_uses_reexport_of_internal_type_in_other_module() {
assert_documentation!(
("thepackage/internal", "pub type Internal"),
(
"thepackage/something",
"
import thepackage/internal
pub type External = internal.Internal
"
),
"
import thepackage/something
pub fn do_thing(value: something.External) {
value
}
",
ONLY_LINKS
);
}
#[test]
fn constructor_with_long_types_and_many_fields() {
assert_documentation!(
("option", "pub type Option(a)"),
"
import option
pub type Uri {
Uri(
scheme: option.Option(String),
userinfo: option.Option(String),
host: option.Option(String),
port: option.Option(Int),
path: String,
query: option.Option(String),
fragment: option.Option(String)
)
}
",
NONE
);
}
#[test]
fn constructor_with_long_types_and_many_fields_that_need_splitting() {
assert_documentation!(
("option", "pub type Option(a)"),
"
import option
pub type TypeWithAVeryLoooooooooooooooooooongName
pub type Wibble {
Wibble(
wibble: #(TypeWithAVeryLoooooooooooooooooooongName, TypeWithAVeryLoooooooooooooooooooongName),
wobble: option.Option(String),
)
}
",
NONE
);
}
#[test]
fn gitea_repository_url_has_no_double_slash() {
let repo = Repository::Forgejo {
host: "https://code.example.org/".parse::<Uri>().unwrap(),
user: "person".into(),
repo: "forgejo_bug".into(),
path: None,
tag_prefix: None,
};
assert_eq!(repo.url(), "https://code.example.org/person/forgejo_bug");
}
#[test]
fn long_function_with_no_arguments_parentheses_are_not_split() {
assert_documentation!(
"
pub fn aaaaaaaaaaaaaaaaaaaaaaaaaaaa() -> aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa {
todo
}
",
NONE
);
}
#[test]
fn forgejo_single_line_definition() {
let mut config = PackageConfig::default();
let repo = Repository::Forgejo {
host: "https://code.example.org/".parse::<Uri>().unwrap(),
user: "wibble".into(),
repo: "wobble".into(),
path: None,
tag_prefix: None,
};
config.name = EcoString::from("test_project_name");
config.repository = Some(repo);
let modules = vec![("app.gleam", "pub type Wibble = Int")];
let html = compile(config, modules);
assert!(
html.contains("https://code.example.org/wibble/wobble/src/tag/v0.1.0/src/app.gleam#L1")
);
}
#[test]
fn forgejo_multiple_line_definition() {
let mut config = PackageConfig::default();
let repo = Repository::Forgejo {
host: "https://code.example.org/".parse::<Uri>().unwrap(),
user: "wibble".into(),
repo: "wobble".into(),
path: None,
tag_prefix: None,
};
config.name = EcoString::from("test_project_name");
config.repository = Some(repo);
let modules = vec![("app.gleam", "pub type Wibble \n\n= Int")];
let html = compile(config, modules);
assert!(
html.contains("https://code.example.org/wibble/wobble/src/tag/v0.1.0/src/app.gleam#L1-L3")
);
}
fn generate_search_data(module_name: &str, module_src: &str) -> EcoString {
let module = type_::tests::compile_module(module_name, module_src, None, Vec::new())
.expect("Module should compile successfully");
let mut config = PackageConfig::default();
config.name = "thepackage".into();
let paths = ProjectPaths::new("/".into());
let build_module = build::Module {
name: "main".into(),
code: module_src.into(),
mtime: SystemTime::now(),
input_path: "/".into(),
origin: Origin::Src,
ast: module,
extra: Default::default(),
dependencies: Default::default(),
};
let source_links = SourceLinker::new(&paths, &config, &build_module);
let module = &build_module.ast;
dbg!(&module.documentation);
let dependencies = HashMap::new();
let mut printer = Printer::new(
module.type_info.package.clone(),
module.name.clone(),
&module.names,
&dependencies,
);
let mut search_items = Vec::new();
let types = printer.type_definitions(&source_links, &module.definitions);
let values = printer.value_definitions(&source_links, &module.definitions);
search_items.push(search_item_for_module(&build_module));
for type_ in types {
search_items.push(search_item_for_type(module_name, &type_));
}
for value in values {
search_items.push(search_item_for_value(module_name, &value));
}
let mut output = EcoString::new();
output.push_str("------ SOURCE CODE\n");
output.push_str(module_src);
output.push_str("\n------------------------------------\n\n");
for item in search_items {
let SearchItem {
type_,
parent_title,
title,
content,
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/docs/printer.rs | compiler-core/src/docs/printer.rs | use std::{
collections::{HashMap, HashSet},
ops::Deref,
};
use ecow::{EcoString, eco_format};
use itertools::Itertools;
use crate::{
ast::{
ArgNames, CustomType, Function, Publicity, RecordConstructorArg, SrcSpan, TypeAlias,
TypedArg, TypedDefinitions, TypedModuleConstant, TypedRecordConstructor,
},
docvec,
pretty::{Document, Documentable, break_, join, line, nil, zero_width_string},
type_::{
Deprecation, PRELUDE_MODULE_NAME, PRELUDE_PACKAGE_NAME, Type, TypeVar,
printer::{Names, PrintMode},
},
};
use super::{
Dependency, DependencyKind, DocsValues, TypeConstructor, TypeConstructorArg, TypeDefinition,
markdown_documentation, source_links::SourceLinker, text_documentation,
};
#[derive(Clone, Copy)]
pub struct PrintOptions {
pub print_highlighting: bool,
pub print_html: bool,
}
impl PrintOptions {
pub fn all() -> Self {
Self {
print_highlighting: true,
print_html: true,
}
}
}
pub struct Printer<'a> {
options: PrintOptions,
names: &'a Names,
package: EcoString,
module: EcoString,
/// Type variables which don't have annotated names and we have generated
/// names for.
printed_type_variables: HashMap<u64, EcoString>,
/// Names of type variables that we have generated or printed in a given
/// definition. This ensures that we have no duplicate generated names.
printed_type_variable_names: HashSet<EcoString>,
/// An incrementing number used to generate the next type variable name.
/// `0` becomes `a`, `1` becomes `b`, etc.
next_type_variable_id: u64,
dependencies: &'a HashMap<EcoString, Dependency>,
}
impl Printer<'_> {
pub fn new<'a>(
package: EcoString,
module: EcoString,
names: &'a Names,
dependencies: &'a HashMap<EcoString, Dependency>,
) -> Printer<'a> {
Printer {
options: PrintOptions::all(),
names,
package,
module,
printed_type_variables: HashMap::new(),
printed_type_variable_names: HashSet::new(),
next_type_variable_id: 0,
dependencies,
}
}
// This is currently only used in the tests, though it might be useful in
// application code in future. If it is needed, simply remove this attribute.
#[cfg(test)]
pub fn set_options(&mut self, options: PrintOptions) {
self.options = options;
}
pub fn type_definitions<'a>(
&mut self,
source_links: &SourceLinker,
definitions: &'a TypedDefinitions,
) -> Vec<TypeDefinition<'a>> {
let mut type_definitions = vec![];
for CustomType {
location,
name,
publicity,
constructors,
documentation,
deprecation,
opaque,
parameters,
..
} in &definitions.custom_types
{
if !publicity.is_public() {
continue;
}
type_definitions.push(TypeDefinition {
name,
definition: print(self.custom_type(name, parameters, constructors, *opaque)),
raw_definition: self
.raw(|this| this.custom_type(name, parameters, constructors, *opaque)),
documentation: markdown_documentation(documentation),
text_documentation: text_documentation(documentation),
deprecation_message: match deprecation {
Deprecation::NotDeprecated => "".to_string(),
Deprecation::Deprecated { message } => message.to_string(),
},
constructors: if *opaque {
Vec::new()
} else {
constructors
.iter()
.map(|constructor| TypeConstructor {
definition: print(self.record_constructor(constructor)),
raw_definition: self.raw(|this| this.record_constructor(constructor)),
documentation: markdown_documentation(&constructor.documentation),
text_documentation: text_documentation(&constructor.documentation),
arguments: constructor
.arguments
.iter()
.filter_map(|arg| arg.label.as_ref().map(|(_, label)| (arg, label)))
.map(|(argument, label)| TypeConstructorArg {
name: label.trim_end().to_string(),
doc: markdown_documentation(&argument.doc),
text_documentation: text_documentation(&argument.doc),
})
.filter(|arg| !arg.doc.is_empty())
.collect(),
})
.collect()
},
source_url: source_links.url(*location),
opaque: *opaque,
})
}
for TypeAlias {
location,
alias: name,
parameters,
type_,
publicity,
documentation,
deprecation,
..
} in &definitions.type_aliases
{
if !publicity.is_public() {
continue;
}
type_definitions.push(TypeDefinition {
name,
definition: print(self.type_alias(name, type_, parameters).group()),
raw_definition: self.raw(|this| this.type_alias(name, type_, parameters).group()),
documentation: markdown_documentation(documentation),
text_documentation: text_documentation(documentation),
constructors: vec![],
source_url: source_links.url(*location),
deprecation_message: match deprecation {
Deprecation::NotDeprecated => "".to_string(),
Deprecation::Deprecated { message } => message.to_string(),
},
opaque: false,
})
}
type_definitions.sort();
type_definitions
}
/// Print a definition without HTML highlighting, such as for search data
fn raw<'a, F>(&mut self, definition: F) -> String
where
F: FnOnce(&mut Self) -> Document<'a>,
{
let options = self.options;
// Turn off highlighting for this definition
self.options = PrintOptions {
print_highlighting: false,
print_html: false,
};
let result = print(definition(self));
// Restore previous options
self.options = options;
format!("```\n{result}\n```")
}
pub fn value_definitions<'a>(
&mut self,
source_links: &SourceLinker,
definitions: &'a TypedDefinitions,
) -> Vec<DocsValues<'a>> {
let mut value_definitions = vec![];
for Function {
location,
name,
arguments,
publicity,
deprecation,
return_type,
documentation,
..
} in &definitions.functions
{
let Some((_, name)) = name else { continue };
if !publicity.is_public() {
continue;
}
// Ensure that any type variables we printed in previous definitions don't
// affect our printing of this definition. Two type variables in different
// definitions can have the same name without clashing.
self.printed_type_variable_names.clear();
self.next_type_variable_id = 0;
value_definitions.push(DocsValues {
name,
definition: print(self.function_signature(name, arguments, return_type)),
raw_definition: self
.raw(|this| this.function_signature(name, arguments, return_type)),
documentation: markdown_documentation(documentation),
text_documentation: text_documentation(documentation),
source_url: source_links.url(*location),
deprecation_message: match deprecation {
Deprecation::NotDeprecated => "".to_string(),
Deprecation::Deprecated { message } => message.to_string(),
},
})
}
for TypedModuleConstant {
documentation,
location,
publicity,
name,
type_,
deprecation,
..
} in &definitions.constants
{
if !publicity.is_public() {
continue;
}
value_definitions.push(DocsValues {
name,
definition: print(self.constant(name, type_)),
raw_definition: self.raw(|this| this.constant(name, type_)),
documentation: markdown_documentation(documentation),
text_documentation: text_documentation(documentation),
source_url: source_links.url(*location),
deprecation_message: match deprecation {
Deprecation::NotDeprecated => "".to_string(),
Deprecation::Deprecated { message } => message.to_string(),
},
})
}
value_definitions.sort();
value_definitions
}
fn custom_type<'a>(
&mut self,
name: &'a str,
parameters: &'a [(SrcSpan, EcoString)],
constructors: &'a [TypedRecordConstructor],
opaque: bool,
) -> Document<'a> {
let arguments = if parameters.is_empty() {
nil()
} else {
Self::wrap_arguments(
parameters
.iter()
.map(|(_, parameter)| self.variable(parameter)),
)
};
let keywords = if opaque {
"pub opaque type "
} else {
"pub type "
};
let type_head = docvec![self.keyword(keywords), self.title(name), arguments];
if constructors.is_empty() || opaque {
return type_head;
}
let constructors = constructors
.iter()
.map(|constructor| {
line()
.append(self.record_constructor(constructor))
.nest(INDENT)
})
.collect_vec();
docvec![type_head, " {", constructors, line(), "}"]
}
pub fn record_constructor<'a>(
&mut self,
constructor: &'a TypedRecordConstructor,
) -> Document<'a> {
if constructor.arguments.is_empty() {
return self.title(&constructor.name);
}
let arguments = constructor.arguments.iter().map(
|RecordConstructorArg { label, type_, .. }| match label {
Some((_, label)) => self
.variable(label)
.append(": ")
.append(self.type_(type_, PrintMode::Normal)),
None => self.type_(type_, PrintMode::Normal),
},
);
let arguments = Self::wrap_arguments(arguments);
docvec![self.title(&constructor.name), arguments].group()
}
fn type_alias<'a>(
&mut self,
name: &'a str,
type_: &Type,
parameters: &[(SrcSpan, EcoString)],
) -> Document<'a> {
let parameters = if parameters.is_empty() {
nil()
} else {
let arguments = parameters
.iter()
.map(|(_, parameter)| self.variable(parameter));
Self::wrap_arguments(arguments)
};
docvec![
self.keyword("pub type "),
self.title(name),
parameters,
" =",
line()
.append(self.type_(type_, PrintMode::ExpandAliases))
.nest(INDENT)
]
}
fn constant<'a>(&mut self, name: &'a str, type_: &Type) -> Document<'a> {
self.register_local_type_variable_names(type_);
docvec![
self.keyword("pub const "),
self.title(name),
": ",
self.type_(type_, PrintMode::Normal)
]
}
fn function_signature<'a>(
&mut self,
name: &'a str,
arguments: &'a [TypedArg],
return_type: &Type,
) -> Document<'a> {
for argument in arguments {
self.register_local_type_variable_names(&argument.type_);
}
self.register_local_type_variable_names(return_type);
let arguments = if arguments.is_empty() {
"()".to_doc()
} else {
Self::wrap_arguments(arguments.iter().map(|argument| {
let name = self.variable(self.argument_name(argument));
docvec![name, ": ", self.type_(&argument.type_, PrintMode::Normal)].group()
}))
};
docvec![
self.keyword("pub fn "),
self.title(name),
arguments,
" -> ",
self.type_(return_type, PrintMode::Normal)
]
.group()
}
fn argument_name<'a>(&self, arg: &'a TypedArg) -> Document<'a> {
match &arg.names {
ArgNames::Named { name, .. } => name.to_doc(),
ArgNames::NamedLabelled { label, name, .. } => docvec![label, " ", name],
// We remove the underscore from discarded function arguments since we don't want to
// expose this kind of detail: https://github.com/gleam-lang/gleam/issues/2561
ArgNames::Discard { name, .. } => match name.strip_prefix('_').unwrap_or(name) {
"" => "arg".to_doc(),
name => name.to_doc(),
},
ArgNames::LabelledDiscard { label, name, .. } => {
docvec![label, " ", name.strip_prefix('_').unwrap_or(name).to_doc()]
}
}
}
fn wrap_arguments<'a>(arguments: impl IntoIterator<Item = Document<'a>>) -> Document<'a> {
break_("(", "(")
.append(join(arguments, break_(",", ", ")))
.nest_if_broken(INDENT)
.append(break_(",", ""))
.append(")")
}
fn type_arguments<'a>(arguments: impl IntoIterator<Item = Document<'a>>) -> Document<'a> {
break_("", "")
.append(join(arguments, break_(",", ", ")))
.nest_if_broken(INDENT)
.append(break_(",", ""))
.group()
.surround("(", ")")
}
fn type_(&mut self, type_: &Type, print_mode: PrintMode) -> Document<'static> {
match type_ {
Type::Named {
package,
module,
name,
arguments,
publicity,
..
} => {
let name = match print_mode {
// If we are printing a type for a type alias, and the alias
// is reexporting an internal type, we want to show that it
// is aliasing that internal type, rather than showing it as
// aliasing itself.
PrintMode::ExpandAliases if *package == self.package => {
self.named_type_name(publicity, package, module, name)
}
// If we are printing a type alias which aliases an internal
// type from a different package, we still want to print the
// public name for that type. If we are not printing a type
// alias at all, we also want to use the public name.
PrintMode::ExpandAliases | PrintMode::Normal => {
// If we are using a reexported internal type, we want to
// print it public name, whether it is from this package
// or otherwise.
if let Some((module, alias)) =
self.names.reexport_alias(module.clone(), name.clone())
{
self.named_type_name(&Publicity::Public, package, module, alias)
} else {
self.named_type_name(publicity, package, module, name)
}
}
};
if arguments.is_empty() {
name
} else {
name.append(Self::type_arguments(
arguments
.iter()
.map(|argument| self.type_(argument, PrintMode::Normal)),
))
}
}
Type::Fn { arguments, return_ } => docvec![
self.keyword("fn"),
Self::type_arguments(
arguments
.iter()
.map(|argument| self.type_(argument, PrintMode::Normal))
),
" -> ",
self.type_(return_, PrintMode::Normal)
],
Type::Tuple { elements } => docvec![
"#",
Self::type_arguments(
elements
.iter()
.map(|element| self.type_(element, PrintMode::Normal))
),
],
Type::Var { type_ } => match type_.as_ref().borrow().deref() {
TypeVar::Link { type_ } => self.type_(type_, PrintMode::Normal),
TypeVar::Unbound { id } | TypeVar::Generic { id } => {
let name = self.type_variable(*id);
self.variable(name)
}
},
}
}
fn type_variable(&mut self, id: u64) -> EcoString {
if let Some(name) = self.names.get_type_variable(id) {
return name.clone();
}
if let Some(name) = self.printed_type_variables.get(&id) {
return name.clone();
}
loop {
let name = self.next_letter();
if !self.printed_type_variable_names.contains(&name) {
_ = self.printed_type_variable_names.insert(name.clone());
_ = self.printed_type_variables.insert(id, name.clone());
return name;
}
}
}
// Copied from the `next_letter` method of the `type_::printer`.
fn next_letter(&mut self) -> EcoString {
let alphabet_length = 26;
let char_offset = b'a';
let mut chars = vec![];
let mut n;
let mut rest = self.next_type_variable_id;
loop {
n = rest % alphabet_length;
rest = rest / alphabet_length;
chars.push((n as u8 + char_offset) as char);
if rest == 0 {
break;
}
rest -= 1
}
self.next_type_variable_id += 1;
chars.into_iter().rev().collect()
}
fn named_type_name(
&self,
publicity: &Publicity,
package: &str,
module: &str,
name: &EcoString,
) -> Document<'static> {
// There's no documentation page for the prelude
if package == PRELUDE_PACKAGE_NAME && module == PRELUDE_MODULE_NAME {
return self.title(name);
}
// Internal types don't get linked
if !publicity.is_public() {
return docvec![self.comment("@internal ".to_doc()), self.title(name)];
}
// Linking to a type within the same page
if package == self.package && module == self.module {
return self.link(eco_format!("#{name}"), self.title(name), None);
}
// Linking to a module within the package
if package == self.package {
// If we are linking to the current package, we might be viewing the
// documentation locally and so we need to generate a relative link.
let mut module_path = module.split('/').peekable();
let mut current_module = self.module.split('/');
// The documentation page for the final segment of the module is just
// an html file by itself, so it doesn't form part of the path and doesn't
// need to be backtracked using `..`.
let module_name = module_path.next_back().unwrap_or(module);
_ = current_module.next_back();
// The two modules might have some sharer part of the path, which we
// don't need to traverse back through. However, if the two modules are
// something like `gleam/a/wibble/wobble` and `gleam/b/wibble/wobble`,
// the `wibble` folders are two different folders despite being at the
// same position with the same name.
let mut encountered_different_path = false;
let mut path = Vec::new();
// Calculate how far backwards in the directory tree we need to walk
for segment in current_module {
// If this is still part of the shared path, we can just skip it:
// no need to go back and forth through the same directory in the
// path!
if !encountered_different_path && module_path.peek() == Some(&segment) {
_ = module_path.next();
} else {
encountered_different_path = true;
path.push("..");
}
}
// Once we have walked backwards, we walk forwards again to the correct
// page.
path.extend(module_path);
path.push(module_name);
let qualified_name = docvec![
self.variable(EcoString::from(module_name)),
".",
self.title(name)
];
let title = eco_format!("{module}.{{type {name}}}");
return self.link(
eco_format!("{path}.html#{name}", path = path.join("/")),
qualified_name,
Some(title),
);
}
let module_name = module.split('/').next_back().unwrap_or(module);
let qualified_name = docvec![
self.variable(EcoString::from(module_name)),
".",
self.title(name)
];
let title = eco_format!("{module}.{{type {name}}}");
// We can't reliably link to documentation if the type is from a path
// or git dependency
match self.dependencies.get(package) {
Some(Dependency {
kind: DependencyKind::Hex,
version,
}) => self.link(
eco_format!("https://hexdocs.pm/{package}/{version}/{module}.html#{name}"),
qualified_name,
Some(title),
),
Some(_) | None => self.span_with_title(qualified_name, title),
}
}
/// Walk a type and register all the type variable names which occur within
/// it. This is to ensure that when generating type variable names for
/// unannotated arguments, we don't print any that clash with existing names.
///
/// We preregister all names before actually printing anything, because we
/// could run into code like this:
///
/// ```gleam
/// pub fn wibble(_, _: a) -> b {}
/// ```
///
/// If we did not preregister the type variables in this case, we would end
/// up printing `fn wibble(_: a, _: a) -> b` which is not correct.
///
fn register_local_type_variable_names(&mut self, type_: &Type) {
match type_ {
Type::Named { arguments, .. } => {
for argument in arguments {
self.register_local_type_variable_names(argument);
}
}
Type::Fn { arguments, return_ } => {
for argument in arguments {
self.register_local_type_variable_names(argument);
}
self.register_local_type_variable_names(return_);
}
Type::Var { type_ } => match type_.borrow().deref() {
TypeVar::Link { type_ } => self.register_local_type_variable_names(type_),
TypeVar::Unbound { id } | TypeVar::Generic { id } => {
if let Some(name) = self.names.get_type_variable(*id) {
_ = self.printed_type_variable_names.insert(name.clone());
}
}
},
Type::Tuple { elements } => {
for element in elements {
self.register_local_type_variable_names(element);
}
}
}
}
fn keyword<'a>(&self, keyword: impl Documentable<'a>) -> Document<'a> {
self.colour_span(keyword, "keyword")
}
fn comment<'a>(&self, name: impl Documentable<'a>) -> Document<'a> {
self.colour_span(name, "comment")
}
fn title<'a>(&self, name: impl Documentable<'a>) -> Document<'a> {
self.colour_span(name, "title")
}
fn variable<'a>(&self, name: impl Documentable<'a>) -> Document<'a> {
self.colour_span(name, "variable")
}
fn colour_span<'a>(
&self,
name: impl Documentable<'a>,
colour_class: &'static str,
) -> Document<'a> {
if !self.options.print_highlighting {
return name.to_doc();
}
name.to_doc().surround(
zero_width_string(eco_format!(r#"<span class="hljs-{colour_class}">"#)),
zero_width_string("</span>".into()),
)
}
fn link<'a>(
&self,
href: EcoString,
name: impl Documentable<'a>,
title: Option<EcoString>,
) -> Document<'a> {
if !self.options.print_html {
return name.to_doc();
}
let opening_tag = if let Some(title) = title {
eco_format!(r#"<a href="{href}" title="{title}">"#)
} else {
eco_format!(r#"<a href="{href}">"#)
};
name.to_doc().surround(
zero_width_string(opening_tag),
zero_width_string("</a>".into()),
)
}
fn span_with_title<'a>(&self, name: impl Documentable<'a>, title: EcoString) -> Document<'a> {
if !self.options.print_html {
return name.to_doc();
}
name.to_doc().surround(
zero_width_string(eco_format!(r#"<span title="{title}">"#)),
zero_width_string("</span>".into()),
)
}
}
const MAX_COLUMNS: isize = 65;
const INDENT: isize = 2;
fn print(doc: Document<'_>) -> String {
doc.to_pretty_string(MAX_COLUMNS)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/analyse/imports.rs | compiler-core/src/analyse/imports.rs | use ecow::EcoString;
use crate::{
ast::{Publicity, SrcSpan, UnqualifiedImport, UntypedImport},
build::Origin,
reference::{EntityKind, ReferenceKind},
type_::{
Environment, Error, ModuleInterface, Problems, ValueConstructorVariant, Warning,
error::InvalidImportKind,
},
};
use super::Imported;
#[derive(Debug)]
pub struct Importer<'context, 'problems> {
origin: Origin,
environment: Environment<'context>,
problems: &'problems mut Problems,
}
impl<'context, 'problems> Importer<'context, 'problems> {
pub fn new(
origin: Origin,
environment: Environment<'context>,
problems: &'problems mut Problems,
) -> Self {
Self {
origin,
environment,
problems,
}
}
pub fn run<'code>(
origin: Origin,
env: Environment<'context>,
imports: &'code [UntypedImport],
problems: &'problems mut Problems,
) -> Environment<'context> {
let mut importer = Self::new(origin, env, problems);
for import in imports {
importer.register_import(import)
}
importer.environment
}
fn register_import(&mut self, import: &UntypedImport) {
let location = import.location;
let name = import.module.clone();
// Find imported module
let Some(module_info) = self.environment.importable_modules.get(&name) else {
self.problems.error(Error::UnknownModule {
location,
name: name.clone(),
suggestions: self.environment.suggest_modules(&name, Imported::Module),
});
return;
};
if let Err(e) = self.check_for_invalid_imports(module_info, location) {
self.problems.error(e);
}
if let Err(e) = self.register_module(import, module_info) {
self.problems.error(e);
return;
}
// Insert unqualified imports into scope
let module_name = &module_info.name;
for type_ in &import.unqualified_types {
self.register_unqualified_type(type_, module_name.clone(), module_info);
}
for value in &import.unqualified_values {
self.register_unqualified_value(value, module_name.clone(), module_info);
}
}
fn register_unqualified_type(
&mut self,
import: &UnqualifiedImport,
module_name: EcoString,
module: &ModuleInterface,
) {
let imported_name = import.as_name.as_ref().unwrap_or(&import.name);
// Register the unqualified import if it is a type constructor
let Some(type_info) = module.get_public_type(&import.name) else {
// TODO: refine to a type specific error
self.problems.error(Error::UnknownModuleType {
location: import.location,
name: import.name.clone(),
module_name: module.name.clone(),
type_constructors: module.public_type_names(),
value_with_same_name: module.get_public_value(&import.name).is_some(),
});
return;
};
let type_info = type_info.clone().with_location(import.location);
self.environment.names.type_in_scope(
imported_name.clone(),
type_info.type_.as_ref(),
&type_info.parameters,
);
self.environment.references.register_type(
imported_name.clone(),
EntityKind::ImportedType {
module: module_name,
},
import.location,
Publicity::Private,
);
self.environment.references.register_type_reference(
type_info.module.clone(),
import.name.clone(),
imported_name,
import.imported_name_location,
ReferenceKind::Import,
);
if let Err(e) = self
.environment
.insert_type_constructor(imported_name.clone(), type_info)
{
self.problems.error(e);
}
}
fn register_unqualified_value(
&mut self,
import: &UnqualifiedImport,
module_name: EcoString,
module: &ModuleInterface,
) {
let import_name = &import.name;
let location = import.location;
let used_name = import.as_name.as_ref().unwrap_or(&import.name);
// Register the unqualified import if it is a value
let variant = match module.get_public_value(import_name) {
Some(value) => {
let implementations = value.variant.implementations();
// Check the target support of the imported value
if self.environment.target_support.is_enforced()
&& !implementations.supports(self.environment.target)
{
self.problems.error(Error::UnsupportedExpressionTarget {
target: self.environment.target,
location,
})
}
self.environment.insert_variable(
used_name.clone(),
value.variant.clone(),
value.type_.clone(),
value.publicity,
value.deprecation.clone(),
);
&value.variant
}
None => {
self.problems.error(Error::UnknownModuleValue {
location,
name: import_name.clone(),
module_name: module.name.clone(),
value_constructors: module.public_value_names(),
type_with_same_name: module.get_public_type(import_name).is_some(),
context: crate::type_::error::ModuleValueUsageContext::UnqualifiedImport,
});
return;
}
};
match variant {
ValueConstructorVariant::Record { name, module, .. } => {
self.environment.names.named_constructor_in_scope(
module.clone(),
name.clone(),
used_name.clone(),
);
self.environment.references.register_value(
used_name.clone(),
EntityKind::ImportedConstructor {
module: module_name,
},
location,
Publicity::Private,
);
self.environment.references.register_value_reference(
module.clone(),
import_name.clone(),
used_name,
import.imported_name_location,
ReferenceKind::Import,
);
}
ValueConstructorVariant::ModuleConstant { module, .. }
| ValueConstructorVariant::ModuleFn { module, .. } => {
self.environment.references.register_value(
used_name.clone(),
EntityKind::ImportedValue {
module: module_name,
},
location,
Publicity::Private,
);
self.environment.references.register_value_reference(
module.clone(),
import_name.clone(),
used_name,
import.imported_name_location,
ReferenceKind::Import,
);
}
ValueConstructorVariant::LocalVariable { .. } => {}
};
// Check if value already was imported
if let Some(previous) = self.environment.unqualified_imported_names.get(used_name) {
self.problems.error(Error::DuplicateImport {
location,
previous_location: *previous,
name: import_name.clone(),
});
return;
}
// Register the name as imported so it can't be imported a
// second time in future
let _ = self
.environment
.unqualified_imported_names
.insert(used_name.clone(), location);
}
/// Check for invalid imports, such as `src` importing `test` or `dev`.
fn check_for_invalid_imports(
&mut self,
module_info: &ModuleInterface,
location: SrcSpan,
) -> Result<(), Error> {
if self.origin.is_src()
&& self
.environment
.dev_dependencies
.contains(&module_info.package)
{
return Err(Error::SrcImportingDevDependency {
importing_module: self.environment.current_module.clone(),
imported_module: module_info.name.clone(),
package: module_info.package.clone(),
location,
});
}
let kind = match (self.origin, module_info.origin) {
// `src` cannot import `test` or `dev`
(Origin::Src, Origin::Test) => InvalidImportKind::SrcImportingTest,
(Origin::Src, Origin::Dev) => InvalidImportKind::SrcImportingDev,
// `dev` cannot import `test`
(Origin::Dev, Origin::Test) => InvalidImportKind::DevImportingTest,
_ => return Ok(()),
};
Err(Error::InvalidImport {
location,
importing_module: self.environment.current_module.clone(),
imported_module: module_info.name.clone(),
kind,
})
}
fn register_module(
&mut self,
import: &UntypedImport,
import_info: &'context ModuleInterface,
) -> Result<(), Error> {
let Some(used_name) = import.used_name() else {
return Ok(());
};
self.check_not_a_duplicate_import(&used_name, import.location)?;
if let Some(alias_location) = import.alias_location() {
self.environment.references.register_aliased_module(
used_name.clone(),
import.module.clone(),
alias_location,
import.location,
);
} else {
self.environment.references.register_module(
used_name.clone(),
import.module.clone(),
import.location,
);
}
// Insert imported module into scope
let _ = self
.environment
.imported_modules
.insert(used_name.clone(), (import.location, import_info));
// Register this module as being imported
//
// Emit a warning if the module had already been imported.
// This isn't an error so long as the modules have different local aliases. In Gleam v2
// this will likely become an error.
if let Some(previous) = self.environment.names.imported_module(
import.module.clone(),
used_name,
import.location,
) {
self.problems.warning(Warning::ModuleImportedTwice {
name: import.module.clone(),
first: previous,
second: import.location,
});
}
Ok(())
}
fn check_not_a_duplicate_import(
&self,
used_name: &EcoString,
location: SrcSpan,
) -> Result<(), Error> {
// Check if a module was already imported with this name
if let Some((previous_location, _)) = self.environment.imported_modules.get(used_name) {
return Err(Error::DuplicateImport {
location,
previous_location: *previous_location,
name: used_name.clone(),
});
}
Ok(())
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/analyse/tests.rs | compiler-core/src/analyse/tests.rs | use super::*;
#[test]
fn module_name_validation() {
assert!(validate_module_name(&"dream".into()).is_ok());
assert!(validate_module_name(&"gleam".into()).is_err());
assert!(validate_module_name(&"gleam/ok".into()).is_ok());
assert!(validate_module_name(&"ok/gleam".into()).is_ok());
assert!(validate_module_name(&"type".into()).is_err());
assert!(validate_module_name(&"pub".into()).is_err());
assert!(validate_module_name(&"ok/type".into()).is_err());
assert!(validate_module_name(&"ok/pub".into()).is_err());
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/analyse/name.rs | compiler-core/src/analyse/name.rs | use std::sync::OnceLock;
use ecow::{EcoString, eco_format};
use regex::Regex;
use crate::{
ast::{ArgNames, SrcSpan},
strings::{to_snake_case, to_upper_camel_case},
type_::Problems,
};
use super::{Error, Named};
static VALID_NAME_PATTERN: OnceLock<Regex> = OnceLock::new();
fn valid_name(name: &EcoString) -> bool {
// Some of the internally generated variables (such as `_capture` and `_use0`)
// start with underscores, so we allow underscores here.
let valid_name_pattern = VALID_NAME_PATTERN
.get_or_init(|| Regex::new("^_?[a-z][a-z0-9_]*$").expect("Regex is correct"));
valid_name_pattern.is_match(name)
}
static VALID_DISCARD_PATTERN: OnceLock<Regex> = OnceLock::new();
fn valid_discard_name(name: &EcoString) -> bool {
let valid_discard_pattern = VALID_DISCARD_PATTERN
.get_or_init(|| Regex::new("^_[a-z0-9_]*$").expect("Regex is correct"));
valid_discard_pattern.is_match(name)
}
static VALID_UPNAME_PATTERN: OnceLock<Regex> = OnceLock::new();
fn valid_upname(name: &EcoString) -> bool {
let valid_upname_pattern = VALID_UPNAME_PATTERN
.get_or_init(|| Regex::new("^[A-Z][A-Za-z0-9]*$").expect("Regex is correct"));
valid_upname_pattern.is_match(name)
}
pub fn check_name_case(location: SrcSpan, name: &EcoString, kind: Named) -> Result<(), Error> {
let valid = match kind {
Named::Type | Named::TypeAlias | Named::CustomTypeVariant => valid_upname(name),
Named::Variable
| Named::TypeVariable
| Named::Argument
| Named::Label
| Named::Constant
| Named::Function => valid_name(name),
Named::Discard => valid_discard_name(name),
};
if valid {
return Ok(());
}
Err(Error::BadName {
location,
kind,
name: name.clone(),
})
}
pub fn correct_name_case(name: &EcoString, kind: Named) -> EcoString {
match kind {
Named::Type | Named::TypeAlias | Named::CustomTypeVariant => to_upper_camel_case(name),
Named::Variable
| Named::TypeVariable
| Named::Argument
| Named::Label
| Named::Constant
| Named::Function => to_snake_case(name),
Named::Discard => eco_format!("_{}", to_snake_case(name)),
}
}
pub fn check_argument_names(names: &ArgNames, problems: &mut Problems) {
match names {
ArgNames::Discard { name, location } => {
if let Err(error) = check_name_case(*location, name, Named::Discard) {
problems.error(error);
}
}
ArgNames::LabelledDiscard {
label,
label_location,
name,
name_location,
} => {
if let Err(error) = check_name_case(*label_location, label, Named::Label) {
problems.error(error);
}
if let Err(error) = check_name_case(*name_location, name, Named::Discard) {
problems.error(error);
}
}
ArgNames::Named { name, location } => {
if let Err(error) = check_name_case(*location, name, Named::Argument) {
problems.error(error);
}
}
ArgNames::NamedLabelled {
name,
name_location,
label,
label_location,
} => {
if let Err(error) = check_name_case(*label_location, label, Named::Label) {
problems.error(error);
}
if let Err(error) = check_name_case(*name_location, name, Named::Argument) {
problems.error(error);
}
}
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/prelude.rs | compiler-core/src/type_/prelude.rs | use hexpm::version::Version;
use strum::{EnumIter, IntoEnumIterator};
use crate::{
ast::{Publicity, SrcSpan},
build::Origin,
line_numbers::LineNumbers,
uid::UniqueIdGenerator,
};
use super::{
ModuleInterface, Opaque, References, Type, TypeConstructor, TypeValueConstructor,
TypeValueConstructorField, TypeVar, TypeVariantConstructors, ValueConstructor,
ValueConstructorVariant,
};
use crate::type_::Deprecation::NotDeprecated;
use std::{cell::RefCell, collections::HashMap, sync::Arc};
const BIT_ARRAY: &str = "BitArray";
const BOOL: &str = "Bool";
const FLOAT: &str = "Float";
const INT: &str = "Int";
pub const LIST: &str = "List";
const NIL: &str = "Nil";
const RESULT: &str = "Result";
const STRING: &str = "String";
const UTF_CODEPOINT: &str = "UtfCodepoint";
pub const PRELUDE_PACKAGE_NAME: &str = "";
pub const PRELUDE_MODULE_NAME: &str = "gleam";
pub fn is_prelude_module(module: &str) -> bool {
module == PRELUDE_MODULE_NAME
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, EnumIter)]
pub enum PreludeType {
BitArray,
Bool,
Float,
Int,
List,
Nil,
Result,
String,
UtfCodepoint,
}
impl PreludeType {
pub fn name(self) -> &'static str {
match self {
PreludeType::BitArray => BIT_ARRAY,
PreludeType::Bool => BOOL,
PreludeType::Float => FLOAT,
PreludeType::Int => INT,
PreludeType::List => LIST,
PreludeType::Nil => NIL,
PreludeType::Result => RESULT,
PreludeType::String => STRING,
PreludeType::UtfCodepoint => UTF_CODEPOINT,
}
}
}
pub fn int() -> Arc<Type> {
Arc::new(Type::Named {
publicity: Publicity::Public,
name: INT.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
arguments: vec![],
inferred_variant: None,
})
}
pub fn float() -> Arc<Type> {
Arc::new(Type::Named {
arguments: vec![],
publicity: Publicity::Public,
name: FLOAT.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
inferred_variant: None,
})
}
pub fn bool() -> Arc<Type> {
bool_with_variant(None)
}
pub fn bool_with_variant(variant: Option<bool>) -> Arc<Type> {
let variant = match variant {
Some(true) => Some(0),
Some(false) => Some(1),
None => None,
};
Arc::new(Type::Named {
arguments: vec![],
publicity: Publicity::Public,
name: BOOL.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
inferred_variant: variant,
})
}
pub fn string() -> Arc<Type> {
Arc::new(Type::Named {
arguments: vec![],
publicity: Publicity::Public,
name: STRING.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
inferred_variant: None,
})
}
pub fn nil() -> Arc<Type> {
Arc::new(Type::Named {
arguments: vec![],
publicity: Publicity::Public,
name: NIL.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
inferred_variant: None,
})
}
pub fn list(t: Arc<Type>) -> Arc<Type> {
Arc::new(Type::Named {
publicity: Publicity::Public,
name: LIST.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
arguments: vec![t],
inferred_variant: None,
})
}
pub fn result(a: Arc<Type>, e: Arc<Type>) -> Arc<Type> {
result_with_variant(a, e, None)
}
fn result_with_variant(a: Arc<Type>, e: Arc<Type>, variant_index: Option<u16>) -> Arc<Type> {
Arc::new(Type::Named {
publicity: Publicity::Public,
name: RESULT.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
arguments: vec![a, e],
inferred_variant: variant_index,
})
}
pub fn tuple(elements: Vec<Arc<Type>>) -> Arc<Type> {
Arc::new(Type::Tuple { elements })
}
pub fn fn_(arguments: Vec<Arc<Type>>, return_: Arc<Type>) -> Arc<Type> {
Arc::new(Type::Fn { return_, arguments })
}
pub fn named(
package: &str,
module: &str,
name: &str,
publicity: Publicity,
arguments: Vec<Arc<Type>>,
) -> Arc<Type> {
Arc::new(Type::Named {
publicity,
package: package.into(),
module: module.into(),
name: name.into(),
arguments,
inferred_variant: None,
})
}
pub fn bit_array() -> Arc<Type> {
Arc::new(Type::Named {
arguments: vec![],
publicity: Publicity::Public,
name: BIT_ARRAY.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
inferred_variant: None,
})
}
pub fn utf_codepoint() -> Arc<Type> {
Arc::new(Type::Named {
arguments: vec![],
publicity: Publicity::Public,
name: UTF_CODEPOINT.into(),
module: PRELUDE_MODULE_NAME.into(),
package: PRELUDE_PACKAGE_NAME.into(),
inferred_variant: None,
})
}
pub fn generic_var(id: u64) -> Arc<Type> {
Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Generic { id })),
})
}
pub fn unbound_var(id: u64) -> Arc<Type> {
Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Unbound { id })),
})
}
#[cfg(test)]
pub fn link(type_: Arc<Type>) -> Arc<Type> {
Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Link { type_ })),
})
}
pub fn build_prelude(ids: &UniqueIdGenerator) -> ModuleInterface {
let value = |variant, type_| ValueConstructor {
publicity: Publicity::Public,
deprecation: NotDeprecated,
variant,
type_,
};
let mut prelude = ModuleInterface {
name: PRELUDE_MODULE_NAME.into(),
package: "".into(),
origin: Origin::Src,
types: HashMap::new(),
types_value_constructors: HashMap::new(),
values: HashMap::new(),
accessors: HashMap::new(),
is_internal: false,
warnings: vec![],
// prelude doesn't have real src
src_path: "".into(),
// prelude doesn't have real line numbers
line_numbers: LineNumbers::new(""),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
};
for t in PreludeType::iter() {
match t {
PreludeType::BitArray => {
let v = TypeConstructor {
origin: Default::default(),
parameters: vec![],
type_: bit_array(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
};
let _ = prelude.types.insert(BIT_ARRAY.into(), v.clone());
}
PreludeType::Bool => {
let _ = prelude.types_value_constructors.insert(
BOOL.into(),
TypeVariantConstructors {
type_parameters_ids: vec![],
variants: vec![
TypeValueConstructor {
name: "True".into(),
parameters: vec![],
documentation: None,
},
TypeValueConstructor {
name: "False".into(),
parameters: vec![],
documentation: None,
},
],
opaque: Opaque::NotOpaque,
},
);
let _ = prelude.values.insert(
"True".into(),
value(
ValueConstructorVariant::Record {
documentation: None,
module: PRELUDE_MODULE_NAME.into(),
name: "True".into(),
field_map: None,
arity: 0,
location: SrcSpan::default(),
variants_count: 2,
variant_index: 0,
},
bool_with_variant(Some(true)),
),
);
let _ = prelude.values.insert(
"False".into(),
value(
ValueConstructorVariant::Record {
documentation: None,
module: PRELUDE_MODULE_NAME.into(),
name: "False".into(),
field_map: None,
arity: 0,
location: SrcSpan::default(),
variants_count: 2,
variant_index: 1,
},
bool_with_variant(Some(false)),
),
);
let _ = prelude.types.insert(
BOOL.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![],
type_: bool(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
}
PreludeType::Float => {
let _ = prelude.types.insert(
FLOAT.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![],
type_: float(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
}
PreludeType::Int => {
let _ = prelude.types.insert(
INT.into(),
TypeConstructor {
parameters: vec![],
type_: int(),
origin: Default::default(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
}
PreludeType::List => {
let list_parameter = generic_var(ids.next());
let _ = prelude.types.insert(
LIST.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![list_parameter.clone()],
type_: list(list_parameter),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
}
PreludeType::Nil => {
let _ = prelude.values.insert(
NIL.into(),
value(
ValueConstructorVariant::Record {
documentation: None,
module: PRELUDE_MODULE_NAME.into(),
name: NIL.into(),
arity: 0,
field_map: None,
location: SrcSpan::default(),
variants_count: 1,
variant_index: 0,
},
nil(),
),
);
let _ = prelude.types.insert(
NIL.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![],
type_: nil(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
let _ = prelude.types_value_constructors.insert(
NIL.into(),
TypeVariantConstructors {
type_parameters_ids: vec![],
variants: vec![TypeValueConstructor {
name: "Nil".into(),
parameters: vec![],
documentation: None,
}],
opaque: Opaque::NotOpaque,
},
);
}
PreludeType::Result => {
let result_value_id = ids.next();
let result_error_id = ids.next();
let result_value = generic_var(result_value_id);
let result_error = generic_var(result_error_id);
let _ = prelude.types.insert(
RESULT.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![result_value.clone(), result_error.clone()],
type_: result(result_value.clone(), result_error.clone()),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
let _ = prelude.types_value_constructors.insert(
RESULT.into(),
TypeVariantConstructors {
type_parameters_ids: vec![result_value_id, result_error_id],
variants: vec![
TypeValueConstructor {
name: "Ok".into(),
parameters: vec![TypeValueConstructorField {
type_: result_value,
label: None,
documentation: None,
}],
documentation: None,
},
TypeValueConstructor {
name: "Error".into(),
parameters: vec![TypeValueConstructorField {
type_: result_error,
label: None,
documentation: None,
}],
documentation: None,
},
],
opaque: Opaque::NotOpaque,
},
);
let ok = generic_var(ids.next());
let error = generic_var(ids.next());
let _ = prelude.values.insert(
"Ok".into(),
value(
ValueConstructorVariant::Record {
documentation: None,
module: PRELUDE_MODULE_NAME.into(),
name: "Ok".into(),
field_map: None,
arity: 1,
location: SrcSpan::default(),
variants_count: 2,
variant_index: 0,
},
fn_(vec![ok.clone()], result_with_variant(ok, error, Some(0))),
),
);
let ok = generic_var(ids.next());
let error = generic_var(ids.next());
let _ = prelude.values.insert(
"Error".into(),
value(
ValueConstructorVariant::Record {
documentation: None,
module: PRELUDE_MODULE_NAME.into(),
name: "Error".into(),
field_map: None,
arity: 1,
location: SrcSpan::default(),
variants_count: 2,
variant_index: 1,
},
fn_(vec![error.clone()], result_with_variant(ok, error, Some(1))),
),
);
}
PreludeType::String => {
let _ = prelude.types.insert(
STRING.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![],
type_: string(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
}
PreludeType::UtfCodepoint => {
let _ = prelude.types.insert(
UTF_CODEPOINT.into(),
TypeConstructor {
origin: Default::default(),
parameters: vec![],
type_: utf_codepoint(),
module: PRELUDE_MODULE_NAME.into(),
publicity: Publicity::Public,
deprecation: NotDeprecated,
documentation: None,
},
);
}
}
}
prelude
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/environment.rs | compiler-core/src/type_/environment.rs | use pubgrub::Range;
use crate::{
analyse::TargetSupport,
ast::{PIPE_VARIABLE, Publicity},
build::Target,
error::edit_distance,
reference::{EntityKind, ReferenceTracker},
uid::UniqueIdGenerator,
};
use super::*;
use std::collections::HashMap;
#[derive(Debug)]
pub struct EnvironmentArguments<'a> {
pub ids: UniqueIdGenerator,
pub current_package: EcoString,
pub gleam_version: Option<Range<Version>>,
pub current_module: EcoString,
pub target: Target,
pub importable_modules: &'a im::HashMap<EcoString, ModuleInterface>,
pub target_support: TargetSupport,
pub current_origin: Origin,
pub dev_dependencies: &'a HashSet<EcoString>,
}
impl<'a> EnvironmentArguments<'a> {
pub fn build(self) -> Environment<'a> {
Environment::new(self)
}
}
#[derive(Debug)]
pub struct Environment<'a> {
pub current_package: EcoString,
pub origin: Origin,
/// The gleam version range required by the current package as stated in its
/// gleam.toml
pub gleam_version: Option<Range<Version>>,
pub current_module: EcoString,
pub target: Target,
pub ids: UniqueIdGenerator,
previous_id: u64,
/// Names of types or values that have been imported an unqualified fashion
/// from other modules. Used to prevent multiple imports using the same name.
pub unqualified_imported_names: HashMap<EcoString, SrcSpan>,
pub unqualified_imported_types: HashMap<EcoString, SrcSpan>,
pub importable_modules: &'a im::HashMap<EcoString, ModuleInterface>,
/// Modules that have been imported by the current module, along with the
/// location of the import statement where they were imported.
pub imported_modules: HashMap<EcoString, (SrcSpan, &'a ModuleInterface)>,
/// Values defined in the current function (or the prelude)
pub scope: im::HashMap<EcoString, ValueConstructor>,
// The names of all the ignored variables and arguments in scope:
// `let _var = 10` `pub fn main(_var) { todo }`.
pub discarded_names: im::HashMap<EcoString, SrcSpan>,
/// Types defined in the current module (or the prelude)
pub module_types: HashMap<EcoString, TypeConstructor>,
/// Mapping from types to constructor names in the current module (or the prelude)
pub module_types_constructors: HashMap<EcoString, TypeVariantConstructors>,
pub module_type_aliases: HashMap<EcoString, TypeAliasConstructor>,
/// Values defined in the current module (or the prelude)
pub module_values: HashMap<EcoString, ValueConstructor>,
/// Accessors defined in the current module
pub accessors: HashMap<EcoString, AccessorsMap>,
/// local_variable_usages is a stack of scopes. When a local variable is created it is
/// added to the top scope. When a local variable is used we crawl down the scope
/// stack for a variable with that name and mark it as used.
pub local_variable_usages: Vec<HashMap<EcoString, VariableUsage>>,
/// Used to determine if all functions/constants need to support the current
/// compilation target.
pub target_support: TargetSupport,
pub names: Names,
/// Wether we ran into an `echo` or not while analysing the current module.
pub echo_found: bool,
pub references: ReferenceTracker,
pub dev_dependencies: &'a HashSet<EcoString>,
}
#[derive(Debug)]
pub struct VariableUsage {
origin: VariableOrigin,
location: SrcSpan,
usages: usize,
recursive_usages: usize,
}
impl<'a> Environment<'a> {
pub fn new(
EnvironmentArguments {
ids,
current_package,
gleam_version,
current_module,
target,
importable_modules,
target_support,
current_origin: origin,
dev_dependencies,
}: EnvironmentArguments<'a>,
) -> Self {
let prelude = importable_modules
.get(PRELUDE_MODULE_NAME)
.expect("Unable to find prelude in importable modules");
let names = Self::build_names(prelude, importable_modules);
Self {
current_package,
gleam_version,
previous_id: ids.next(),
ids,
origin,
target,
module_types: prelude.types.clone(),
module_types_constructors: prelude.types_value_constructors.clone(),
module_values: HashMap::new(),
imported_modules: HashMap::new(),
unqualified_imported_names: HashMap::new(),
unqualified_imported_types: HashMap::new(),
accessors: prelude.accessors.clone(),
scope: prelude.values.clone().into(),
discarded_names: im::HashMap::new(),
importable_modules,
current_module,
local_variable_usages: vec![HashMap::new()],
target_support,
names,
module_type_aliases: HashMap::new(),
echo_found: false,
references: ReferenceTracker::new(),
dev_dependencies,
}
}
fn build_names(
prelude: &ModuleInterface,
importable_modules: &im::HashMap<EcoString, ModuleInterface>,
) -> Names {
let mut names = Names::new();
// Insert prelude types and values into scope
for name in prelude.values.keys() {
names.named_constructor_in_scope(
PRELUDE_MODULE_NAME.into(),
name.clone(),
name.clone(),
);
}
for name in prelude.types.keys() {
names.named_type_in_scope(PRELUDE_MODULE_NAME.into(), name.clone(), name.clone());
}
// Find potential type aliases which reexport internal types
for module in importable_modules.values() {
// Internal modules are not part of the public API so they are also
// not considered.
if module.is_internal {
continue;
}
for (alias_name, alias) in module.type_aliases.iter() {
// An alias can only be a public reexport if it is public.
if alias.publicity.is_public() {
names.maybe_register_reexport_alias(&module.package, alias_name, alias);
}
}
}
names
}
}
#[derive(Debug)]
pub struct ScopeResetData {
local_values: im::HashMap<EcoString, ValueConstructor>,
discarded_names: im::HashMap<EcoString, SrcSpan>,
}
impl Environment<'_> {
pub fn in_new_scope<T, E>(
&mut self,
problems: &mut Problems,
process_scope: impl FnOnce(&mut Self, &mut Problems) -> Result<T, E>,
) -> Result<T, E> {
// Record initial scope state
let initial = self.open_new_scope();
// Process scope
let result = process_scope(self, problems);
self.close_scope(initial, result.is_ok(), problems);
// Return result of typing the scope
result
}
pub fn open_new_scope(&mut self) -> ScopeResetData {
let local_values = self.scope.clone();
let discarded_names = self.discarded_names.clone();
self.local_variable_usages.push(HashMap::new());
ScopeResetData {
local_values,
discarded_names,
}
}
pub fn close_scope(
&mut self,
data: ScopeResetData,
was_successful: bool,
problems: &mut Problems,
) {
let ScopeResetData {
local_values,
discarded_names,
} = data;
let unused = self
.local_variable_usages
.pop()
.expect("There was no top entity scope.");
// We only check for unused entities if the scope was successfully
// processed. If it was not then any seemingly unused entities may have
// been used beyond the point where the error occurred, so we don't want
// to incorrectly warn about them.
if was_successful {
self.handle_unused_variables(unused, problems);
}
self.scope = local_values;
self.discarded_names = discarded_names;
}
pub fn next_uid(&mut self) -> u64 {
let id = self.ids.next();
self.previous_id = id;
id
}
pub fn previous_uid(&self) -> u64 {
self.previous_id
}
/// Create a new unbound type that is a specific type, we just don't
/// know which one yet.
///
pub fn new_unbound_var(&mut self) -> Arc<Type> {
unbound_var(self.next_uid())
}
/// Create a new generic type that can stand in for any type.
///
pub fn new_generic_var(&mut self) -> Arc<Type> {
generic_var(self.next_uid())
}
/// Insert a variable in the current scope.
///
pub fn insert_local_variable(
&mut self,
name: EcoString,
location: SrcSpan,
origin: VariableOrigin,
type_: Arc<Type>,
) {
let _ = self.scope.insert(
name,
ValueConstructor::local_variable(location, origin, type_),
);
}
/// Insert a variable in the current scope.
///
pub fn insert_variable(
&mut self,
name: EcoString,
variant: ValueConstructorVariant,
type_: Arc<Type>,
publicity: Publicity,
deprecation: Deprecation,
) {
let _ = self.scope.insert(
name,
ValueConstructor {
publicity,
deprecation,
variant,
type_,
},
);
}
/// Insert (or overwrites) a value into the current module.
///
pub fn insert_module_value(&mut self, name: EcoString, value: ValueConstructor) {
let _ = self.module_values.insert(name, value);
}
/// Lookup a variable in the current scope.
///
pub fn get_variable(&self, name: &EcoString) -> Option<&ValueConstructor> {
self.scope.get(name)
}
/// Lookup a module constant in the current scope.
///
pub fn get_module_const(&mut self, name: &EcoString) -> Option<&ValueConstructor> {
self.increment_usage(name);
self.module_values
.get(name)
.filter(|ValueConstructor { variant, .. }| {
matches!(variant, ValueConstructorVariant::ModuleConstant { .. })
})
}
/// Map a type in the current scope.
/// Errors if the module already has a type with that name, unless the type is from the
/// prelude.
///
pub fn insert_type_constructor(
&mut self,
type_name: EcoString,
info: TypeConstructor,
) -> Result<(), Error> {
let name = type_name.clone();
let location = info.origin;
match self.module_types.insert(type_name, info) {
None => Ok(()),
Some(prelude_type) if is_prelude_module(&prelude_type.module) => Ok(()),
Some(previous) => Err(Error::DuplicateTypeName {
name,
location,
previous_location: previous.origin,
}),
}
}
/// Map a type alias in the current scope.
/// Errors if the module already has a type with that name, unless the type is from the
/// prelude.
///
pub fn insert_type_alias(
&mut self,
type_name: EcoString,
info: TypeAliasConstructor,
) -> Result<(), Error> {
let name = type_name.clone();
let location = info.origin;
match self.module_type_aliases.insert(type_name, info) {
None => Ok(()),
Some(prelude_type) if is_prelude_module(&prelude_type.module) => Ok(()),
Some(previous) => Err(Error::DuplicateTypeName {
name,
location,
previous_location: previous.origin,
}),
}
}
pub fn assert_unique_type_name(
&mut self,
name: &EcoString,
location: SrcSpan,
) -> Result<(), Error> {
match self.module_types.get(name) {
None => Ok(()),
Some(prelude_type) if is_prelude_module(&prelude_type.module) => Ok(()),
Some(previous) => Err(Error::DuplicateTypeName {
name: name.clone(),
location,
previous_location: previous.origin,
}),
}
}
/// Map a type to constructors in the current scope.
///
pub fn insert_type_to_constructors(
&mut self,
type_name: EcoString,
constructors: TypeVariantConstructors,
) {
let _ = self
.module_types_constructors
.insert(type_name, constructors);
}
/// Lookup a type in the current scope.
///
pub fn get_type_constructor(
&mut self,
module: &Option<(EcoString, SrcSpan)>,
name: &EcoString,
) -> Result<&TypeConstructor, UnknownTypeConstructorError> {
match module {
None => self
.module_types
.get(name)
.ok_or_else(|| UnknownTypeConstructorError::Type {
name: name.clone(),
hint: self.unknown_type_hint(name),
}),
Some((module_name, _)) => {
let (_, module) = self.imported_modules.get(module_name).ok_or_else(|| {
UnknownTypeConstructorError::Module {
name: module_name.clone(),
suggestions: self
.suggest_modules(module_name, Imported::Type(name.clone())),
}
})?;
self.references
.register_module_reference(module_name.clone());
module.get_public_type(name).ok_or_else(|| {
UnknownTypeConstructorError::ModuleType {
name: name.clone(),
module_name: module.name.clone(),
type_constructors: module.public_type_names(),
imported_type_as_value: false,
}
})
}
}
}
fn unknown_type_hint(&self, type_name: &EcoString) -> UnknownTypeHint {
match self.scope.contains_key(type_name) {
true => UnknownTypeHint::ValueInScopeWithSameName,
false => UnknownTypeHint::AlternativeTypes(self.module_types.keys().cloned().collect()),
}
}
/// Lookup constructors for type in the current scope.
///
pub fn get_constructors_for_type(
&self,
module: &EcoString,
name: &EcoString,
) -> Result<&TypeVariantConstructors, UnknownTypeConstructorError> {
let module = if module.is_empty() || *module == self.current_module {
None
} else {
Some(module)
};
match module {
None => self.module_types_constructors.get(name).ok_or_else(|| {
UnknownTypeConstructorError::Type {
name: name.clone(),
hint: self.unknown_type_hint(name),
}
}),
Some(m) => {
let module = self.importable_modules.get(m).ok_or_else(|| {
UnknownTypeConstructorError::Module {
name: name.clone(),
suggestions: self.suggest_modules(m, Imported::Type(name.clone())),
}
})?;
module.types_value_constructors.get(name).ok_or_else(|| {
UnknownTypeConstructorError::ModuleType {
name: name.clone(),
module_name: module.name.clone(),
type_constructors: module.public_type_names(),
imported_type_as_value: false,
}
})
}
}
}
/// Lookup a value constructor in the current scope.
///
pub fn get_value_constructor(
&mut self,
module: Option<&EcoString>,
name: &EcoString,
) -> Result<&ValueConstructor, UnknownValueConstructorError> {
match module {
None => self.scope.get(name).ok_or_else(|| {
let type_with_name_in_scope = self.module_types.keys().any(|type_| type_ == name);
UnknownValueConstructorError::Variable {
name: name.clone(),
variables: self.local_value_names(),
type_with_name_in_scope,
}
}),
Some(module_name) => {
let (_, module) = self.imported_modules.get(module_name).ok_or_else(|| {
UnknownValueConstructorError::Module {
name: module_name.clone(),
suggestions: self
.suggest_modules(module_name, Imported::Value(name.clone())),
}
})?;
self.references
.register_module_reference(module_name.clone());
module.get_public_value(name).ok_or_else(|| {
UnknownValueConstructorError::ModuleValue {
name: name.clone(),
module_name: module.name.clone(),
value_constructors: module.public_value_names(),
imported_value_as_type: false,
}
})
}
}
}
pub fn get_type_variants_fields(
&self,
module: &EcoString,
name: &EcoString,
) -> Vec<&EcoString> {
self.get_constructors_for_type(module, name)
.iter()
.flat_map(|c| &c.variants)
.filter_map(|variant| {
self.type_value_constructor_to_constructor(module, variant)?
.variant
.record_field_map()
})
.flat_map(|field_map| field_map.fields.keys())
.collect_vec()
}
fn type_value_constructor_to_constructor(
&self,
module: &EcoString,
variant: &TypeValueConstructor,
) -> Option<&ValueConstructor> {
if *module == self.current_module {
self.scope.get(&variant.name)
} else {
let (_, module) = self.imported_modules.get(module)?;
module.get_public_value(&variant.name)
}
}
pub fn insert_accessors(&mut self, type_name: EcoString, accessors: AccessorsMap) {
let _ = self.accessors.insert(type_name, accessors);
}
/// Instantiate converts generic variables into unbound ones.
///
pub fn instantiate(
&mut self,
t: Arc<Type>,
ids: &mut im::HashMap<u64, Arc<Type>>,
hydrator: &Hydrator,
) -> Arc<Type> {
match t.deref() {
Type::Named {
publicity,
name,
package,
module,
arguments,
inferred_variant,
} => {
let arguments = arguments
.iter()
.map(|type_| self.instantiate(type_.clone(), ids, hydrator))
.collect();
Arc::new(Type::Named {
publicity: *publicity,
name: name.clone(),
package: package.clone(),
module: module.clone(),
arguments,
inferred_variant: *inferred_variant,
})
}
Type::Var { type_ } => {
match type_.borrow().deref() {
TypeVar::Link { type_ } => {
return self.instantiate(type_.clone(), ids, hydrator);
}
TypeVar::Unbound { .. } => {
return Arc::new(Type::Var {
type_: type_.clone(),
});
}
TypeVar::Generic { id } => match ids.get(id) {
Some(t) => return t.clone(),
None => {
if !hydrator.is_rigid(id) {
// Check this in the hydrator, i.e. is it a created type
let v = self.new_unbound_var();
let _ = ids.insert(*id, v.clone());
return v;
}
}
},
}
Arc::new(Type::Var {
type_: type_.clone(),
})
}
Type::Fn {
arguments, return_, ..
} => fn_(
arguments
.iter()
.map(|type_| self.instantiate(type_.clone(), ids, hydrator))
.collect(),
self.instantiate(return_.clone(), ids, hydrator),
),
Type::Tuple { elements } => tuple(
elements
.iter()
.map(|type_| self.instantiate(type_.clone(), ids, hydrator))
.collect(),
),
}
}
/// Inserts a local variable at the current scope for usage tracking.
pub fn init_usage(
&mut self,
name: EcoString,
origin: VariableOrigin,
location: SrcSpan,
problems: &mut Problems,
) {
if let Some(VariableUsage {
origin,
location,
usages: 0,
recursive_usages,
}) = self
.local_variable_usages
.last_mut()
.expect("Attempted to access non-existent entity usages scope")
.insert(
name.clone(),
VariableUsage {
origin,
location,
usages: 0,
recursive_usages: 0,
},
)
{
// an entity was overwritten in the top most scope without being used
let mut unused = HashMap::with_capacity(1);
let _ = unused.insert(
name,
VariableUsage {
origin,
location,
usages: 0,
recursive_usages,
},
);
self.handle_unused_variables(unused, problems);
}
}
/// Increments an entity's usage in the current or nearest enclosing scope
pub fn increment_usage(&mut self, name: &EcoString) {
if let Some(VariableUsage { usages, .. }) = self
.local_variable_usages
.iter_mut()
.rev()
.find_map(|scope| scope.get_mut(name))
{
*usages += 1;
}
}
/// Marks an argument as being passed recursively to a function call.
pub fn increment_recursive_usage(&mut self, name: &EcoString) {
if let Some(VariableUsage {
recursive_usages, ..
}) = self
.local_variable_usages
.iter_mut()
.rev()
.find_map(|scope| scope.get_mut(name))
{
*recursive_usages += 1;
}
}
/// Emit warnings for unused definitions, imports, expressions, etc.
///
/// Returns the source byte start positions of all unused definitions.
///
pub fn handle_unused(&mut self, problems: &mut Problems) -> HashSet<u32> {
let mut unused_positions = HashSet::new();
let unused = self
.local_variable_usages
.pop()
.expect("Expected a bottom level of entity usages.");
self.handle_unused_variables(unused, problems);
// We have to handle unused imported entites a bit differently when
// emitting warning: when an import list is unused all its items and
// the import itself are unused:
//
// ```
// import wibble.{unused, also_unused}
// ^^^^^^ ^^^^^^ ^^^^^^^^^^^ Everything is unused here
// ```
//
// But instead of emitting three warnings, what we really want is to
// emit just a single warning encompassing the entire line! So we have
// to hold on all unused imported entities and emit a warning for those
// only if the module they come from is not also unused.
let mut unused_modules = HashSet::new();
let mut unused_imported_items = vec![];
for (entity, info) in self.references.unused() {
let name = entity.name;
let location = info.origin;
let warning = match info.kind {
EntityKind::Function => {
let _ = unused_positions.insert(location.start);
Warning::UnusedPrivateFunction { location, name }
}
EntityKind::Constant => {
let _ = unused_positions.insert(location.start);
Warning::UnusedPrivateModuleConstant { location, name }
}
EntityKind::Constructor => Warning::UnusedConstructor {
location,
name,
imported: false,
},
EntityKind::Type => {
let _ = unused_positions.insert(location.start);
Warning::UnusedType {
name,
imported: false,
location,
}
}
EntityKind::ImportedModule { module_name } => {
let _ = unused_modules.insert(module_name.clone());
Warning::UnusedImportedModule { name, location }
}
EntityKind::ImportedType { module } => {
unused_imported_items.push((
module,
Warning::UnusedType {
name,
imported: true,
location,
},
));
continue;
}
EntityKind::ImportedConstructor { module } => {
unused_imported_items.push((
module,
Warning::UnusedConstructor {
name,
imported: true,
location,
},
));
continue;
}
EntityKind::ImportedValue { module } => {
unused_imported_items
.push((module, Warning::UnusedImportedValue { name, location }));
continue;
}
EntityKind::ModuleAlias { module } => {
unused_imported_items.push((
module.clone(),
Warning::UnusedImportedModuleAlias {
module_name: module.clone(),
alias: name,
location,
},
));
continue;
}
};
problems.warning(warning);
}
unused_imported_items
.into_iter()
.filter(|(module, _)| !unused_modules.contains(module))
.for_each(|(_, warning)| problems.warning(warning));
unused_positions
}
fn handle_unused_variables(
&mut self,
unused: HashMap<EcoString, VariableUsage>,
problems: &mut Problems,
) {
for VariableUsage {
origin,
location,
usages,
recursive_usages,
} in unused.into_values()
{
if usages == 0 {
problems.warning(Warning::UnusedVariable { location, origin });
}
// If the function parameter is actually used somewhere, but all the
// usages are just passing it along in a recursive call, then it
// counts as being unused too.
else if origin.is_function_parameter() && recursive_usages == usages {
problems.warning(Warning::UnusedRecursiveArgument { location });
}
}
}
pub fn local_value_names(&self) -> Vec<EcoString> {
self.scope
.keys()
.filter(|&t| PIPE_VARIABLE != t)
.cloned()
.collect()
}
/// Suggest modules to import or use, for an unknown module
pub fn suggest_modules(&self, module: &str, imported: Imported) -> Vec<ModuleSuggestion> {
let mut suggestions = self
.importable_modules
.iter()
.filter_map(|(importable, module_info)| {
if module_info.is_internal && module_info.package != self.current_package {
return None;
}
match &imported {
// Don't suggest importing modules if they are already imported
_ if self
.imported_modules
.contains_key(importable.split('/').next_back().unwrap_or(importable)) =>
{
None
}
Imported::Type(name) if module_info.get_public_type(name).is_some() => {
Some(ModuleSuggestion::Importable(importable.clone()))
}
Imported::Value(name) if module_info.get_public_value(name).is_some() => {
Some(ModuleSuggestion::Importable(importable.clone()))
}
Imported::Module | Imported::Type(_) | Imported::Value(_) => None,
}
})
.collect_vec();
suggestions.extend(
self.imported_modules
.keys()
.map(|module| ModuleSuggestion::Imported(module.clone())),
);
let threshold = std::cmp::max(module.chars().count() / 3, 1);
// Filter and sort options based on edit distance.
suggestions
.into_iter()
.sorted()
.filter_map(|suggestion| {
edit_distance(module, suggestion.last_name_component(), threshold)
.map(|distance| (suggestion, distance))
})
.sorted_by_key(|&(_, distance)| distance)
.map(|(suggestion, _)| suggestion)
.collect()
}
pub fn type_variant_name(
&self,
type_module: &EcoString,
type_name: &EcoString,
variant_index: u16,
) -> Option<&EcoString> {
let type_constructors = if type_module == &self.current_module {
&self.module_types_constructors
} else {
&self
.importable_modules
.get(type_module)?
.types_value_constructors
};
type_constructors
.get(type_name)
.and_then(|type_constructors| type_constructors.variants.get(variant_index as usize))
.map(|variant| &variant.name)
}
}
#[derive(Debug)]
/// An imported name, for looking up a module which exports it
pub enum Imported {
/// An imported module, with no extra information
Module,
/// An imported type
Type(EcoString),
/// An imported value
Value(EcoString),
}
/// Unify two types that should be the same.
/// Any unbound type variables will be linked to the other type as they are the same.
///
/// It two types are found to not be the same an error is returned.
///
pub fn unify(t1: Arc<Type>, t2: Arc<Type>) -> Result<(), UnifyError> {
if t1 == t2 {
return Ok(());
}
// Collapse right hand side type links. Left hand side will be collapsed in the next block.
if let Type::Var { type_ } = t2.deref()
&& let TypeVar::Link { type_ } = type_.borrow().deref()
{
return unify(t1, type_.clone());
}
if let Type::Var { type_ } = t1.deref() {
enum Action {
Unify(Arc<Type>),
CouldNotUnify,
Link,
}
let action = match type_.borrow().deref() {
TypeVar::Link { type_ } => Action::Unify(type_.clone()),
TypeVar::Unbound { id } => {
unify_unbound_type(&t2, *id)?;
Action::Link
}
TypeVar::Generic { id } => {
if let Type::Var { type_ } = t2.deref()
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests.rs | compiler-core/src/type_/tests.rs | use super::*;
use crate::{
analyse::TargetSupport,
ast::{TypedModule, TypedStatement, UntypedExpr, UntypedModule},
build::{Origin, Outcome, Target},
config::{GleamVersion, PackageConfig},
error::Error,
type_::{build_prelude, expression::FunctionDefinition, pretty::Printer},
uid::UniqueIdGenerator,
warning::{TypeWarningEmitter, VectorWarningEmitterIO, WarningEmitter, WarningEmitterIO},
};
use ecow::EcoString;
use itertools::Itertools;
use pubgrub::Range;
use std::rc::Rc;
use vec1::Vec1;
use camino::Utf8PathBuf;
mod accessors;
mod assert;
mod assignments;
mod conditional_compilation;
mod custom_types;
mod dead_code_detection;
mod echo;
mod errors;
mod exhaustiveness;
mod externals;
mod functions;
mod guards;
mod imports;
mod let_assert;
mod pipes;
mod pretty;
mod target_implementations;
mod type_alias;
mod use_;
mod version_inference;
mod warnings;
#[macro_export]
macro_rules! assert_infer {
($src:expr, $type_:expr $(,)?) => {
let t = $crate::type_::tests::infer($src);
assert_eq!(($src, t), ($src, $type_.to_string()),);
};
}
#[macro_export]
macro_rules! assert_module_infer {
($(($name:expr, $module_src:literal)),+, $src:literal, $module:expr $(,)?) => {
let constructors =
$crate::type_::tests::infer_module($src, vec![$(("thepackage", $name, $module_src)),*]);
let expected = $crate::type_::tests::stringify_tuple_strs($module);
assert_eq!(($src, constructors), ($src, expected));
};
($src:expr, $module:expr $(,)?) => {{
let constructors = $crate::type_::tests::infer_module($src, vec![]);
let expected = $crate::type_::tests::stringify_tuple_strs($module);
assert_eq!(($src, constructors), ($src, expected));
}};
}
#[macro_export]
macro_rules! assert_js_module_infer {
($src:expr, $module:expr $(,)?) => {{
let constructors = $crate::type_::tests::infer_module_with_target(
"test_module",
$src,
vec![],
$crate::build::Target::JavaScript,
);
let expected = $crate::type_::tests::stringify_tuple_strs($module);
assert_eq!(($src, constructors), ($src, expected));
}};
}
#[macro_export]
macro_rules! assert_module_error {
($src:expr) => {
let error = $crate::type_::tests::module_error($src, vec![]);
let output = format!("----- SOURCE CODE\n{}\n\n----- ERROR\n{}", $src, error);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
($(($name:expr, $module_src:literal)),+, $src:literal $(,)?) => {
let error = $crate::type_::tests::module_error(
$src,
vec![
$(("thepackage", $name, $module_src)),*
],
);
let mut output = String::from("----- SOURCE CODE\n");
for (name, src) in [$(($name, $module_src)),*] {
output.push_str(&format!("-- {name}.gleam\n{src}\n\n"));
}
output.push_str(&format!("-- main.gleam\n{}\n\n----- ERROR\n{error}", $src));
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
($(($package:literal, $name:expr, $module_src:literal)),+, $src:literal $(,)?) => {
let error = $crate::type_::tests::module_error(
$src,
vec![
$(($package, $name, $module_src)),*
],
);
let mut output = String::from("----- SOURCE CODE\n");
for (name, src) in [$(($name, $module_src)),*] {
output.push_str(&format!("-- {name}.gleam\n{src}\n\n"));
}
output.push_str(&format!("-- main.gleam\n{}\n\n----- ERROR\n{error}", $src));
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_internal_module_error {
($src:expr) => {
let error = $crate::type_::tests::internal_module_error($src, vec![]);
let output = format!("----- SOURCE CODE\n{}\n\n----- ERROR\n{}", $src, error);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_js_module_error {
($src:expr) => {
let error = $crate::type_::tests::module_error_with_target(
$src,
vec![],
$crate::build::Target::JavaScript,
);
let output = format!("----- SOURCE CODE\n{}\n\n----- ERROR\n{}", $src, error);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_module_syntax_error {
($src:expr) => {
let error = $crate::type_::tests::syntax_error($src);
let output = format!("----- SOURCE CODE\n{}\n\n----- ERROR\n{}", $src, error);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_error {
($src:expr, $error:expr $(,)?) => {
let result = $crate::type_::tests::compile_statement_sequence($src)
.expect_err("should infer an error");
assert_eq!(($src, sort_options($error)), ($src, sort_options(result)),);
};
($src:expr) => {
let (error, names) = $crate::type_::tests::compile_statement_sequence($src)
.expect_err("should infer an error");
let error = $crate::error::Error::Type {
names: Box::new(names),
src: $src.into(),
path: camino::Utf8PathBuf::from("/src/one/two.gleam"),
errors: error,
};
let error_string = error.pretty_string();
let output = format!(
"----- SOURCE CODE\n{}\n\n----- ERROR\n{}",
$src, error_string
);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
fn get_warnings(
src: &str,
deps: Vec<DependencyModule<'_>>,
target: Target,
gleam_version: Option<Range<Version>>,
) -> Vec<crate::warning::Warning> {
let warnings = VectorWarningEmitterIO::default();
_ = compile_module_with_opts(
"test_module",
src,
Some(Rc::new(warnings.clone())),
deps,
target,
TargetSupport::NotEnforced,
gleam_version,
);
warnings.take().into_iter().collect_vec()
}
pub(crate) fn get_printed_warnings(
src: &str,
deps: Vec<DependencyModule<'_>>,
target: Target,
gleam_version: Option<Range<Version>>,
) -> String {
print_warnings(get_warnings(src, deps, target, gleam_version))
}
fn print_warnings(warnings: Vec<crate::warning::Warning>) -> String {
let mut nocolor = termcolor::Buffer::no_color();
for warning in warnings {
warning.pretty(&mut nocolor);
}
String::from_utf8(nocolor.into_inner()).expect("Error printing produced invalid utf8")
}
#[macro_export]
macro_rules! assert_warning {
($src:expr) => {
let warning = $crate::type_::tests::get_printed_warnings($src, vec![], crate::build::Target::Erlang, None);
assert!(!warning.is_empty());
let output = format!("----- SOURCE CODE\n{}\n\n----- WARNING\n{}", $src, warning);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
($(($name:expr, $module_src:literal)),+, $src:literal $(,)?) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![
$(("thepackage", $name, $module_src)),*
],
crate::build::Target::Erlang,
None
);
assert!(!warning.is_empty());
let mut output = String::from("----- SOURCE CODE\n");
for (name, src) in [$(($name, $module_src)),*] {
output.push_str(&format!("-- {name}.gleam\n{src}\n\n"));
}
output.push_str(&format!("-- main.gleam\n{}\n\n----- WARNING\n{warning}", $src));
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
($(($package:expr, $name:expr, $module_src:literal)),+, $src:expr) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![$(($package, $name, $module_src)),*],
crate::build::Target::Erlang,
None
);
assert!(!warning.is_empty());
let mut output = String::from("----- SOURCE CODE\n");
for (name, src) in [$(($name, $module_src)),*] {
output.push_str(&format!("-- {name}.gleam\n{src}\n\n"));
}
output.push_str(&format!("-- main.gleam\n{}\n\n----- WARNING\n{warning}", $src));
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_js_warning {
($src:expr) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![],
crate::build::Target::JavaScript,
None,
);
assert!(!warning.is_empty());
let output = format!("----- SOURCE CODE\n{}\n\n----- WARNING\n{}", $src, warning);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_js_no_warnings {
($src:expr) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![],
crate::build::Target::JavaScript,
None,
);
assert!(warning.is_empty());
};
}
#[macro_export]
macro_rules! assert_warnings_with_gleam_version {
($gleam_version:expr, $src:expr$(,)?) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![],
crate::build::Target::Erlang,
Some($gleam_version),
);
assert!(!warning.is_empty());
let output = format!("----- SOURCE CODE\n{}\n\n----- WARNING\n{}", $src, warning);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_js_warnings_with_gleam_version {
($gleam_version:expr, $src:expr$(,)?) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![],
crate::build::Target::JavaScript,
Some($gleam_version),
);
assert!(!warning.is_empty());
let output = format!("----- SOURCE CODE\n{}\n\n----- WARNING\n{}", $src, warning);
insta::assert_snapshot!(insta::internals::AutoName, output, $src);
};
}
#[macro_export]
macro_rules! assert_js_no_warnings_with_gleam_version {
($gleam_version:expr, $src:expr$(,)?) => {
let warning = $crate::type_::tests::get_printed_warnings(
$src,
vec![],
crate::build::Target::JavaScript,
Some($gleam_version),
);
assert!(warning.is_empty());
};
}
#[macro_export]
macro_rules! assert_no_warnings {
($src:expr $(,)?) => {
let warnings = $crate::type_::tests::get_warnings($src, vec![], crate::build::Target::Erlang, None);
assert_eq!(warnings, vec![]);
};
($(($name:expr, $module_src:literal)),+, $src:expr $(,)?) => {
let warnings = $crate::type_::tests::get_warnings(
$src,
vec![$(("thepackage", $name, $module_src)),*],
crate::build::Target::Erlang,
None,
);
assert_eq!(warnings, vec![]);
};
($(($package:expr, $name:expr, $module_src:literal)),+, $src:expr $(,)?) => {
let warnings = $crate::type_::tests::get_warnings(
$src,
vec![$(($package, $name, $module_src)),*],
crate::build::Target::Erlang,
None,
);
assert_eq!(warnings, vec![]);
};
}
fn compile_statement_sequence(
src: &str,
) -> Result<Vec1<TypedStatement>, (Vec1<crate::type_::Error>, Names)> {
let ast = crate::parse::parse_statement_sequence(src).expect("syntax error");
let mut modules = im::HashMap::new();
let ids = UniqueIdGenerator::new();
// DUPE: preludeinsertion
// TODO: Currently we do this here and also in the tests. It would be better
// to have one place where we create all this required state for use in each
// place.
let _ = modules.insert(PRELUDE_MODULE_NAME.into(), build_prelude(&ids));
let mut problems = Problems::new();
let dev_dependencies = HashSet::new();
let mut environment = EnvironmentArguments {
ids,
current_package: "thepackage".into(),
gleam_version: None,
current_module: "themodule".into(),
target: Target::Erlang,
importable_modules: &modules,
target_support: TargetSupport::Enforced,
current_origin: Origin::Src,
dev_dependencies: &dev_dependencies,
}
.build();
let res = ExprTyper::new(
&mut environment,
FunctionDefinition {
has_body: true,
has_erlang_external: false,
has_javascript_external: false,
},
&mut problems,
)
.infer_statements(ast);
match Vec1::try_from_vec(problems.take_errors()) {
Err(_) => Ok(res),
Ok(errors) => Err((errors, environment.names)),
}
}
fn infer(src: &str) -> String {
let mut printer = Printer::new();
let result = compile_statement_sequence(src).expect("should successfully infer");
printer.pretty_print(result.last().type_().as_ref(), 0)
}
pub fn stringify_tuple_strs(module: Vec<(&str, &str)>) -> Vec<(EcoString, String)> {
module
.into_iter()
.map(|(k, v)| (k.into(), v.into()))
.collect()
}
/// A module loaded as a dependency in the tests.
///
/// In order the tuple elements indicate:
/// 1. The package name.
/// 2. The module name.
/// 3. The module source.
type DependencyModule<'a> = (&'a str, &'a str, &'a str);
pub fn infer_module(src: &str, dep: Vec<DependencyModule<'_>>) -> Vec<(EcoString, String)> {
infer_module_with_target("test_module", src, dep, Target::Erlang)
}
pub fn infer_module_with_target(
module_name: &str,
src: &str,
dep: Vec<DependencyModule<'_>>,
target: Target,
) -> Vec<(EcoString, String)> {
let ast = compile_module_with_opts(
module_name,
src,
None,
dep,
target,
TargetSupport::NotEnforced,
None,
)
.expect("should successfully infer");
ast.type_info
.values
.iter()
.filter(|(_, v)| v.publicity.is_importable())
.map(|(k, v)| {
let mut printer = Printer::new();
(k.clone(), printer.pretty_print(&v.type_, 0))
})
.sorted()
.collect()
}
pub fn compile_module(
module_name: &str,
src: &str,
warnings: Option<Rc<dyn WarningEmitterIO>>,
dep: Vec<DependencyModule<'_>>,
) -> Outcome<TypedModule, Vec1<super::Error>> {
compile_module_with_opts(
module_name,
src,
warnings,
dep,
Target::Erlang,
TargetSupport::NotEnforced,
None,
)
}
pub fn compile_module_with_opts(
module_name: &str,
src: &str,
warnings: Option<Rc<dyn WarningEmitterIO>>,
dep: Vec<DependencyModule<'_>>,
target: Target,
target_support: TargetSupport,
gleam_version: Option<Range<Version>>,
) -> Outcome<TypedModule, Vec1<super::Error>> {
let ids = UniqueIdGenerator::new();
let mut modules = im::HashMap::new();
let emitter =
WarningEmitter::new(warnings.unwrap_or_else(|| Rc::new(VectorWarningEmitterIO::default())));
// DUPE: preludeinsertion
// TODO: Currently we do this here and also in the tests. It would be better
// to have one place where we create all this required state for use in each
// place.
let _ = modules.insert(PRELUDE_MODULE_NAME.into(), build_prelude(&ids));
let mut direct_dependencies = HashMap::from_iter(vec![]);
for (package, name, module_src) in dep {
let parsed =
crate::parse::parse_module(Utf8PathBuf::from("test/path"), module_src, &emitter)
.expect("syntax error");
let mut ast = parsed.module;
ast.name = name.into();
let line_numbers = LineNumbers::new(module_src);
let mut config = PackageConfig::default();
config.name = package.into();
let module = crate::analyse::ModuleAnalyzerConstructor::<()> {
target,
ids: &ids,
origin: Origin::Src,
importable_modules: &modules,
warnings: &TypeWarningEmitter::null(),
direct_dependencies: &HashMap::new(),
dev_dependencies: &HashSet::new(),
target_support,
package_config: &config,
}
.infer_module(ast, line_numbers, "".into())
.expect("should successfully infer");
let _ = modules.insert(name.into(), module.type_info);
if package != "non-dependency-package" {
let _ = direct_dependencies.insert(package.into(), ());
}
}
let parsed = crate::parse::parse_module(Utf8PathBuf::from("test/path"), src, &emitter)
.expect("syntax error");
let mut ast = parsed.module;
ast.name = module_name.into();
let mut config = PackageConfig::default();
config.name = "thepackage".into();
config.gleam_version = gleam_version.map(|v| GleamVersion::from_pubgrub(v));
let warnings = TypeWarningEmitter::new("/src/warning/wrn.gleam".into(), src.into(), emitter);
crate::analyse::ModuleAnalyzerConstructor::<()> {
target,
ids: &ids,
origin: Origin::Src,
importable_modules: &modules,
warnings: &warnings,
direct_dependencies: &direct_dependencies,
dev_dependencies: &HashSet::from_iter(["dev_dependency".into()]),
target_support: TargetSupport::Enforced,
package_config: &config,
}
.infer_module(ast, LineNumbers::new(src), "".into())
}
pub fn module_error(src: &str, deps: Vec<DependencyModule<'_>>) -> String {
module_error_with_target(src, deps, Target::Erlang)
}
pub fn module_error_with_target(
src: &str,
deps: Vec<DependencyModule<'_>>,
target: Target,
) -> String {
let outcome = compile_module_with_opts(
"themodule",
src,
None,
deps,
target,
TargetSupport::NotEnforced,
None,
);
let (error, names) = match outcome {
Outcome::Ok(_) => panic!("should infer an error"),
Outcome::PartialFailure(ast, errors) => (errors.into(), ast.names),
Outcome::TotalFailure(errors) => (errors.into(), Default::default()),
};
let error = Error::Type {
names: Box::new(names),
src: src.into(),
path: Utf8PathBuf::from("/src/one/two.gleam"),
errors: Vec1::try_from_vec(error).expect("should have at least one error"),
};
error.pretty_string()
}
pub fn internal_module_error(src: &str, deps: Vec<DependencyModule<'_>>) -> String {
internal_module_error_with_target(src, deps, Target::Erlang)
}
pub fn internal_module_error_with_target(
src: &str,
deps: Vec<DependencyModule<'_>>,
target: Target,
) -> String {
let outcome = compile_module_with_opts(
"thepackage/internal/themodule",
src,
None,
deps,
target,
TargetSupport::NotEnforced,
None,
);
let (error, names) = match outcome {
Outcome::Ok(_) => panic!("should infer an error"),
Outcome::PartialFailure(ast, errors) => (errors.into(), ast.names),
Outcome::TotalFailure(errors) => (errors.into(), Default::default()),
};
let error = Error::Type {
names: Box::new(names),
src: src.into(),
path: Utf8PathBuf::from("/src/one/two.gleam"),
errors: Vec1::try_from_vec(error).expect("should have at least one error"),
};
error.pretty_string()
}
pub fn syntax_error(src: &str) -> String {
let error =
crate::parse::parse_module(Utf8PathBuf::from("test/path"), src, &WarningEmitter::null())
.expect_err("should trigger an error when parsing");
let error = Error::Parse {
src: src.into(),
path: Utf8PathBuf::from("/src/one/two.gleam"),
error: Box::new(error),
};
error.pretty_string()
}
#[test]
fn field_map_reorder_test() {
let int = |value: &str| UntypedExpr::Int {
value: value.into(),
int_value: crate::parse::parse_int_value(value).unwrap(),
location: SrcSpan { start: 0, end: 0 },
};
struct Case {
arity: u32,
fields: HashMap<EcoString, u32>,
arguments: Vec<CallArg<UntypedExpr>>,
expected_result: Result<(), crate::type_::Error>,
expected_arguments: Vec<CallArg<UntypedExpr>>,
}
impl Case {
fn test(self) {
let mut arguments = self.arguments;
let fm = FieldMap {
arity: self.arity,
fields: self.fields,
};
let location = SrcSpan { start: 0, end: 0 };
assert_eq!(
self.expected_result,
fm.reorder(&mut arguments, location, IncorrectArityContext::Function)
);
assert_eq!(self.expected_arguments, arguments);
}
}
Case {
arity: 0,
fields: HashMap::new(),
arguments: vec![],
expected_result: Ok(()),
expected_arguments: vec![],
}
.test();
Case {
arity: 3,
fields: HashMap::new(),
arguments: vec![
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("1"),
},
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("2"),
},
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("3"),
},
],
expected_result: Ok(()),
expected_arguments: vec![
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("1"),
},
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("2"),
},
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("3"),
},
],
}
.test();
Case {
arity: 3,
fields: [("last".into(), 2)].into(),
arguments: vec![
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("1"),
},
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("2"),
},
CallArg {
implicit: None,
location: Default::default(),
label: Some("last".into()),
value: int("3"),
},
],
expected_result: Ok(()),
expected_arguments: vec![
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("1"),
},
CallArg {
implicit: None,
location: Default::default(),
label: None,
value: int("2"),
},
CallArg {
implicit: None,
location: Default::default(),
label: Some("last".into()),
value: int("3"),
},
],
}
.test();
}
#[test]
fn infer_module_type_retention_test() {
let module: UntypedModule = crate::ast::Module {
documentation: vec![],
name: "ok".into(),
definitions: vec![],
type_info: (),
names: Default::default(),
unused_definition_positions: Default::default(),
};
let direct_dependencies = HashMap::from_iter(vec![]);
let ids = UniqueIdGenerator::new();
let mut modules = im::HashMap::new();
// DUPE: preludeinsertion
// TODO: Currently we do this here and also in the tests. It would be better
// to have one place where we create all this required state for use in each
// place.
let _ = modules.insert(PRELUDE_MODULE_NAME.into(), build_prelude(&ids));
let mut config = PackageConfig::default();
config.name = "thepackage".into();
let module = crate::analyse::ModuleAnalyzerConstructor::<()> {
target: Target::Erlang,
ids: &ids,
origin: Origin::Src,
importable_modules: &modules,
warnings: &TypeWarningEmitter::null(),
direct_dependencies: &direct_dependencies,
dev_dependencies: &HashSet::new(),
target_support: TargetSupport::Enforced,
package_config: &config,
}
.infer_module(module, LineNumbers::new(""), "".into())
.expect("Should infer OK");
assert_eq!(
module.type_info,
ModuleInterface {
warnings: vec![],
origin: Origin::Src,
package: "thepackage".into(),
name: "ok".into(),
is_internal: false,
types: HashMap::new(),
types_value_constructors: HashMap::from([
(
"Bool".into(),
TypeVariantConstructors {
type_parameters_ids: vec![],
variants: vec![
TypeValueConstructor {
name: "True".into(),
parameters: vec![],
documentation: None,
},
TypeValueConstructor {
name: "False".into(),
parameters: vec![],
documentation: None,
}
],
opaque: Opaque::NotOpaque,
}
),
(
"Result".into(),
TypeVariantConstructors {
type_parameters_ids: vec![1, 2],
variants: vec![
TypeValueConstructor {
name: "Ok".into(),
parameters: vec![TypeValueConstructorField {
type_: generic_var(1),
label: None,
documentation: None,
}],
documentation: None,
},
TypeValueConstructor {
name: "Error".into(),
parameters: vec![TypeValueConstructorField {
type_: generic_var(2),
label: None,
documentation: None,
}],
documentation: None,
}
],
opaque: Opaque::NotOpaque,
}
),
(
"Nil".into(),
TypeVariantConstructors {
type_parameters_ids: vec![],
variants: vec![TypeValueConstructor {
name: "Nil".into(),
parameters: vec![],
documentation: None,
}],
opaque: Opaque::NotOpaque,
}
)
]),
values: HashMap::new(),
accessors: HashMap::new(),
line_numbers: LineNumbers::new(""),
src_path: "".into(),
minimum_required_version: Version::new(0, 1, 0),
type_aliases: HashMap::new(),
documentation: Vec::new(),
contains_echo: false,
references: References::default(),
inline_functions: HashMap::new(),
}
);
}
#[test]
fn simple_exprs() {
assert_infer!("True", "Bool");
assert_infer!("False", "Bool");
assert_infer!("1", "Int");
assert_infer!("-2", "Int");
assert_infer!("1.0", "Float");
assert_infer!("-8.0", "Float");
assert_infer!("\"ok\"", "String");
assert_infer!("\"ok\"", "String");
assert_infer!("[]", "List(a)");
assert_infer!("4 % 1", "Int");
assert_infer!("4 > 1", "Bool");
assert_infer!("4 >= 1", "Bool");
assert_infer!("4 <= 1", "Bool");
assert_infer!("4 < 1", "Bool");
// Numbers with _'s
assert_infer!("1000_000", "Int");
assert_infer!("1_000", "Int");
assert_infer!("1_000.", "Float");
assert_infer!("10_000.001", "Float");
assert_infer!("100_000.", "Float");
// Nil
assert_infer!("Nil", "Nil");
// todo
assert_infer!("todo", "a");
assert_infer!("1 == todo", "Bool");
assert_infer!("todo != 1", "Bool");
assert_infer!("todo + 1", "Int");
assert_infer!("todo(\"test\") + 1", "Int");
// hex, octal, and binary literals
assert_infer!("0xF", "Int");
assert_infer!("0o11", "Int");
assert_infer!("0b1010", "Int");
// scientific notation
assert_infer!("6.02e23", "Float");
assert_infer!("6.02e-23", "Float");
}
#[test]
fn assert() {
assert_infer!("let assert [] = [] 1", "Int");
assert_infer!("let assert [a] = [1] a", "Int");
assert_infer!("let assert [a, 2] = [1] a", "Int");
assert_infer!("let assert [a, .._] = [1] a", "Int");
assert_infer!("let assert [a, .._,] = [1] a", "Int");
assert_infer!("fn(x) { let assert [a] = x a }", "fn(List(a)) -> a");
assert_infer!("fn(x) { let assert [a] = x a + 1 }", "fn(List(Int)) -> Int");
assert_infer!("let assert _x = 1 2.0", "Float");
assert_infer!("let assert _ = 1 2.0", "Float");
assert_infer!("let assert #(tag, x) = #(1.0, 1) x", "Int");
assert_infer!("fn(x) { let assert #(a, b) = x a }", "fn(#(a, b)) -> a");
assert_infer!("let assert 5: Int = 5 5", "Int");
}
#[test]
fn lists() {
assert_infer!("[]", "List(a)");
assert_infer!("[1]", "List(Int)");
assert_infer!("[1, 2, 3]", "List(Int)");
assert_infer!("[[]]", "List(List(a))");
assert_infer!("[[1.0, 2.0]]", "List(List(Float))");
assert_infer!("[fn(x) { x }]", "List(fn(a) -> a)");
assert_infer!("[fn(x) { x + 1 }]", "List(fn(Int) -> Int)");
assert_infer!("[fn(x) { x }, fn(x) { x + 1 }]", "List(fn(Int) -> Int)");
assert_infer!("[fn(x) { x + 1 }, fn(x) { x }]", "List(fn(Int) -> Int)");
assert_infer!("[[], []]", "List(List(a))");
assert_infer!("[[], [1]]", "List(List(Int))");
assert_infer!("[1, ..[2, ..[]]]", "List(Int)");
assert_infer!("[fn(x) { x }, ..[]]", "List(fn(a) -> a)");
assert_infer!("let x = [1, ..[]] [2, ..x]", "List(Int)");
}
#[test]
fn trailing_comma_lists() {
assert_infer!("[1, ..[2, ..[],]]", "List(Int)");
assert_infer!("[fn(x) { x },..[]]", "List(fn(a) -> a)");
assert_infer!("let f = fn(x) { x } [f, f]", "List(fn(a) -> a)");
assert_infer!("[#([], [])]", "List(#(List(a), List(b)))");
}
#[test]
fn tuples() {
assert_infer!("#(1)", "#(Int)");
assert_infer!("#(1, 2.0)", "#(Int, Float)");
assert_infer!("#(1, 2.0, 3)", "#(Int, Float, Int)");
assert_infer!("#(1, 2.0, #(1, 1))", "#(Int, Float, #(Int, Int))",);
}
#[test]
fn expr_fn() {
assert_infer!("fn(x) { x }", "fn(a) -> a");
assert_infer!("fn(x) { x }", "fn(a) -> a");
assert_infer!("fn(x, y) { x }", "fn(a, b) -> a");
assert_infer!("fn(x, y) { [] }", "fn(a, b) -> List(c)");
assert_infer!("let x = 1.0 1", "Int");
assert_infer!("let id = fn(x) { x } id(1)", "Int");
assert_infer!("let x = fn() { 1.0 } x()", "Float");
assert_infer!("fn(x) { x }(1)", "Int");
assert_infer!("fn() { 1 }", "fn() -> Int");
assert_infer!("fn() { 1.1 }", "fn() -> Float");
assert_infer!("fn(x) { 1.1 }", "fn(a) -> Float");
assert_infer!("fn(x) { x }", "fn(a) -> a");
assert_infer!("let x = fn(x) { 1.1 } x", "fn(a) -> Float");
assert_infer!("fn(x, y, z) { 1 }", "fn(a, b, c) -> Int");
assert_infer!("fn(x) { let y = x y }", "fn(a) -> a");
assert_infer!("let id = fn(x) { x } id(1)", "Int");
assert_infer!(
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/pattern.rs | compiler-core/src/type_/pattern.rs | use ecow::eco_format;
use hexpm::version::{LowestVersion, Version};
use im::hashmap;
use itertools::Itertools;
use num_bigint::BigInt;
/// Type inference and checking of patterns used in case expressions
/// and variables bindings.
///
use super::*;
use crate::{
analyse::{self, Inferred, name::check_name_case},
ast::{
AssignName, BitArrayOption, BitArraySize, ImplicitCallArgOrigin, Layer, TailPattern,
TypedBitArraySize, UntypedPatternBitArraySegment,
},
parse::PatternPosition,
reference::ReferenceKind,
type_::expression::FunctionDefinition,
};
use std::sync::Arc;
pub struct PatternTyper<'a, 'b> {
environment: &'a mut Environment<'b>,
current_function: &'a FunctionDefinition,
hydrator: &'a Hydrator,
mode: PatternMode,
initial_pattern_vars: HashSet<EcoString>,
/// Variables which have been inferred to a specific variant of their type
/// from this pattern-matching. Key is the variable name, Value is the inferred variant index.
inferred_variant_variables: HashMap<EcoString, u16>,
problems: &'a mut Problems,
/// The minimum Gleam version required to compile the typed pattern.
pub minimum_required_version: Version,
pub error_encountered: bool,
/// Variables which have been assigned in the current pattern. We can't
/// register them immediately. If we're in a bit array, variables that are
/// assigned in the pattern can be used as part of the pattern, e.g.
/// `<<a, b:size(a)>>`. However, if we are not in a bit array pattern,
/// variables cannot be used within the pattern. This is invalid:
/// `#(size, <<a:size(size)>>)`. This is due to a limitation of Erlang.
///
/// What we do instead is store the variables in this map. Each variable
/// keeps track of whether it is in scope, so that we can correctly detect
/// valid/invalid uses.
variables: HashMap<EcoString, LocalVariable>,
/// What kind of pattern we are typing
position: PatternPosition,
}
#[derive(Debug)]
struct LocalVariable {
location: SrcSpan,
origin: VariableOrigin,
type_: Arc<Type>,
usage: Usage,
scope: Scope,
}
impl LocalVariable {
fn in_scope(&self) -> bool {
match self.scope {
Scope::CurrentBitArrayPattern => true,
Scope::OtherPattern => false,
}
}
fn was_used(&self) -> bool {
match self.usage {
Usage::UsedInPattern => true,
Usage::UnusedSoFar => false,
}
}
}
#[derive(Debug, Clone, Copy)]
enum Usage {
UsedInPattern,
UnusedSoFar,
}
#[derive(Debug, Clone, Copy)]
enum Scope {
CurrentBitArrayPattern,
OtherPattern,
}
enum PatternMode {
Initial,
Alternative(Vec<EcoString>),
}
impl<'a, 'b> PatternTyper<'a, 'b> {
pub fn new(
environment: &'a mut Environment<'b>,
current_function: &'a FunctionDefinition,
hydrator: &'a Hydrator,
problems: &'a mut Problems,
position: PatternPosition,
) -> Self {
Self {
environment,
current_function,
hydrator,
mode: PatternMode::Initial,
initial_pattern_vars: HashSet::new(),
inferred_variant_variables: HashMap::new(),
minimum_required_version: Version::new(0, 1, 0),
problems,
error_encountered: false,
variables: HashMap::new(),
position,
}
}
fn insert_variable(
&mut self,
name: &EcoString,
type_: Arc<Type>,
location: SrcSpan,
origin: VariableOrigin,
) {
self.check_name_case(location, name, Named::Variable);
match &mut self.mode {
PatternMode::Initial => {
// Ensure there are no duplicate variable names in the pattern
if self.initial_pattern_vars.contains(name) {
self.error(convert_unify_error(
UnifyError::DuplicateVarInPattern { name: name.clone() },
location,
));
return;
}
// We no longer have access to the variable from the subject of the pattern
// so it doesn't need to be inferred any more.
let _ = self.inferred_variant_variables.remove(name);
// Record that this variable originated in this pattern so any
// following alternative patterns can be checked to ensure they
// have the same variables.
let _ = self.initial_pattern_vars.insert(name.clone());
_ = self.variables.insert(
name.clone(),
LocalVariable {
location,
origin: origin.clone(),
type_: type_.clone(),
usage: Usage::UnusedSoFar,
scope: Scope::CurrentBitArrayPattern,
},
);
}
PatternMode::Alternative(assigned) => {
match self.environment.scope.get_mut(name) {
// This variable was defined in the Initial multi-pattern
Some(initial) if self.initial_pattern_vars.contains(name) => {
if assigned.contains(name) {
self.error(convert_unify_error(
UnifyError::DuplicateVarInPattern { name: name.clone() },
location,
));
return;
}
assigned.push(name.clone());
let initial_type = initial.type_.clone();
match unify(initial_type, type_.clone()) {
Ok(()) => {}
Err(error) => {
self.problems.error(convert_unify_error(error, location));
self.error_encountered = true;
}
};
unify_constructor_variants(Arc::make_mut(&mut initial.type_), &type_);
}
// This variable was not defined in the Initial multi-pattern
_ => self.error(convert_unify_error(
UnifyError::ExtraVarInAlternativePattern { name: name.clone() },
location,
)),
}
}
}
}
fn set_subject_variable_variant(&mut self, name: EcoString, variant_index: u16) {
match &self.mode {
PatternMode::Initial => {
// If this name is reassigned in the pattern itself, we don't need to infer
// it, since it isn't accessible in this scope anymore.
if self.initial_pattern_vars.contains(&name) {
return;
}
let variable = self
.environment
.scope
.get(&name)
.expect("Variable already exists in the case subjects");
// The type in this scope is now separate from the parent scope, so we
// remove any links to ensure that they aren't linked in any way and that
// we don't accidentally set the variant of the variable outside of this scope
let mut type_ = collapse_links(variable.type_.clone());
Arc::make_mut(&mut type_).set_custom_type_variant(variant_index);
// Mark this variable as having been inferred
let _ = self
.inferred_variant_variables
.insert(name.clone(), variant_index);
let origin = match &variable.variant {
ValueConstructorVariant::LocalVariable { origin, .. } => origin.clone(),
ValueConstructorVariant::ModuleConstant { .. }
| ValueConstructorVariant::ModuleFn { .. }
| ValueConstructorVariant::Record { .. } => VariableOrigin::generated(),
};
// This variable is only inferred in this branch of the case expression
self.environment.insert_local_variable(
name.clone(),
variable.definition_location().span,
origin,
type_,
);
}
PatternMode::Alternative(_) => {
// If we haven't inferred this variable in all alternative patterns so far,
// we can't set its variant here
let Some(inferred_variant) = self.inferred_variant_variables.get(&name) else {
return;
};
// If multiple variants are possible in this pattern, we can't infer it at all
// and we have to remove the variant index
if *inferred_variant != variant_index {
// This variable's variant is no longer known
let _ = self.inferred_variant_variables.remove(&name);
let variable = self
.environment
.scope
.get_mut(&name)
.expect("Variable already exists in the case subjects");
Arc::make_mut(&mut variable.type_).generalise_custom_type_variant();
}
}
}
}
pub fn infer_alternative_multi_pattern(
&mut self,
multi_pattern: UntypedMultiPattern,
subjects: &[TypedExpr],
location: &SrcSpan,
) -> Vec<TypedPattern> {
self.mode = PatternMode::Alternative(vec![]);
let typed_multi = self.infer_multi_pattern(multi_pattern, subjects);
if self.error_encountered {
return typed_multi;
}
match &self.mode {
PatternMode::Initial => panic!("Pattern mode switched from Alternative to Initial"),
PatternMode::Alternative(assigned)
if assigned.len() < self.initial_pattern_vars.len() =>
{
for name in assigned {
let _ = self.initial_pattern_vars.remove(name);
}
self.error(Error::MissingVarInAlternativePattern {
location: *location,
// It is safe to use expect here as we checked the length above
name: self
.initial_pattern_vars
.iter()
.next()
.expect("Getting undefined pattern variable")
.clone(),
});
typed_multi
}
PatternMode::Alternative(_) => typed_multi,
}
}
pub fn infer_multi_pattern(
&mut self,
multi_pattern: UntypedMultiPattern,
subjects: &[TypedExpr],
) -> Vec<TypedPattern> {
// If there are N subjects the multi-pattern is expected to be N patterns
if subjects.len() != multi_pattern.len() {
let first = multi_pattern
.first()
.expect("multi-pattern to contain at least one pattern");
let last = multi_pattern
.last()
.expect("multi-pattern to contain at least one pattern");
self.error(Error::IncorrectNumClausePatterns {
location: first.location().merge(&last.location()),
expected: subjects.len(),
given: multi_pattern.len(),
});
return Vec::new();
}
// Unify each pattern in the multi-pattern with the corresponding subject
let mut typed_multi = Vec::with_capacity(multi_pattern.len());
for (pattern, subject) in multi_pattern.into_iter().zip(subjects) {
let subject_variable = Self::subject_variable(subject);
let pattern = self.unify(pattern, subject.type_(), subject_variable);
typed_multi.push(pattern);
}
self.register_variables();
typed_multi
}
pub fn infer_single_pattern(
&mut self,
pattern: UntypedPattern,
subject: &TypedExpr,
) -> TypedPattern {
let subject_variable = Self::subject_variable(subject);
let typed_pattern = self.unify(pattern, subject.type_(), subject_variable);
self.register_variables();
typed_pattern
}
fn subject_variable(subject: &TypedExpr) -> Option<EcoString> {
match subject {
TypedExpr::Var {
constructor:
ValueConstructor {
// Records should not be considered local variables
// See: https://github.com/gleam-lang/gleam/issues/3861
variant: ValueConstructorVariant::Record { .. },
..
},
..
} => None,
TypedExpr::Var { name, .. } => Some(name.clone()),
// If the subject of a `case` expression is something like
// `echo some_variable`, we still want to narrow the variant for
// `some_variable`.
TypedExpr::Echo {
expression: Some(subject),
..
} => Self::subject_variable(subject),
TypedExpr::Int { .. }
| TypedExpr::Float { .. }
| TypedExpr::String { .. }
| TypedExpr::Block { .. }
| TypedExpr::Pipeline { .. }
| TypedExpr::Fn { .. }
| TypedExpr::List { .. }
| TypedExpr::Call { .. }
| TypedExpr::BinOp { .. }
| TypedExpr::Case { .. }
| TypedExpr::RecordAccess { .. }
| TypedExpr::PositionalAccess { .. }
| TypedExpr::ModuleSelect { .. }
| TypedExpr::Tuple { .. }
| TypedExpr::TupleIndex { .. }
| TypedExpr::Todo { .. }
| TypedExpr::Panic { .. }
| TypedExpr::Echo { .. }
| TypedExpr::BitArray { .. }
| TypedExpr::RecordUpdate { .. }
| TypedExpr::NegateBool { .. }
| TypedExpr::NegateInt { .. }
| TypedExpr::Invalid { .. } => None,
}
}
/// Register the variables bound in this pattern in the environment
fn register_variables(&mut self) {
for (name, variable) in std::mem::take(&mut self.variables) {
let was_used = variable.was_used();
let LocalVariable {
location,
origin,
type_,
usage: _,
scope: _,
} = variable;
// If this variable has already been referenced in another part of
// the pattern, we don't need to register it for usage tracking as
// it has already been used.
if !was_used {
self.environment
.init_usage(name.clone(), origin.clone(), location, self.problems);
}
self.environment
.insert_local_variable(name, location, origin, type_);
}
}
fn infer_pattern_bit_array(
&mut self,
mut segments: Vec<UntypedPatternBitArraySegment>,
location: SrcSpan,
) -> TypedPattern {
// Any variables from other parts of the pattern are no longer in scope.
// Only variables from the bit array pattern itself can be used.
for (_, variable) in self.variables.iter_mut() {
variable.scope = Scope::OtherPattern;
}
let last_segment = segments.pop();
let mut typed_segments: Vec<_> = segments
.into_iter()
.map(|s| self.infer_pattern_segment(s, false))
.collect();
if let Some(s) = last_segment {
let typed_last_segment = self.infer_pattern_segment(s, true);
typed_segments.push(typed_last_segment)
}
TypedPattern::BitArray {
location,
segments: typed_segments,
}
}
fn infer_pattern_segment(
&mut self,
mut segment: UntypedPatternBitArraySegment,
is_last_segment: bool,
) -> TypedPatternBitArraySegment {
// If the segment doesn't have an explicit type option we add a default
// one ourselves if the pattern is unambiguous: literal strings are
// implicitly considered utf-8 encoded strings, while floats are
// implicitly given the float type option.
if !segment.has_type_option() {
match segment.value_unwrapping_assign() {
Pattern::String { location, .. } => {
self.track_feature_usage(FeatureKind::UnannotatedUtf8StringSegment, *location);
segment.options.push(BitArrayOption::Utf8 {
location: SrcSpan::default(),
});
}
Pattern::Float { location, .. } => {
self.track_feature_usage(FeatureKind::UnannotatedFloatSegment, *location);
segment.options.push(BitArrayOption::Float {
location: SrcSpan::default(),
})
}
Pattern::Int { .. }
| Pattern::Variable { .. }
| Pattern::BitArraySize(_)
| Pattern::Assign { .. }
| Pattern::Discard { .. }
| Pattern::List { .. }
| Pattern::Constructor { .. }
| Pattern::Tuple { .. }
| Pattern::BitArray { .. }
| Pattern::StringPrefix { .. }
| Pattern::Invalid { .. } => (),
}
}
let has_non_utf8_string_option = segment.has_utf16_option() || segment.has_utf32_option();
let options: Vec<_> = segment
.options
.into_iter()
.map(|option| {
analyse::infer_bit_array_option(option, |value, type_| {
Ok(self.unify(value, type_, None))
})
})
.try_collect()
.expect("The function always returns Ok");
self.check_pattern_segment_size_expression(&options);
let segment_type = match bit_array::type_options_for_pattern(
&options,
!is_last_segment,
self.environment.target,
) {
Ok(type_) => type_,
Err(error) => {
self.error(Error::BitArraySegmentError {
error: error.error,
location: error.location,
});
self.environment.new_unbound_var()
}
};
// Track usage of the unaligned bit arrays feature on JavaScript so that
// warnings can be emitted if the Gleam version constraint is too low
if self.environment.target == Target::JavaScript
&& !self.current_function.has_javascript_external
{
for option in options.iter() {
match option {
// Use of the `bits` segment type
BitArrayOption::Bits { location } => {
self.track_feature_usage(
FeatureKind::JavaScriptUnalignedBitArray,
*location,
);
}
// Int segments that aren't a whole number of bytes
BitArrayOption::Size { value, .. } if segment_type.is_int() => match &**value {
Pattern::BitArraySize(BitArraySize::Int {
location,
int_value,
..
}) if int_value % 8 != BigInt::ZERO => {
self.track_feature_usage(
FeatureKind::JavaScriptUnalignedBitArray,
*location,
);
}
Pattern::Int { .. }
| Pattern::Float { .. }
| Pattern::String { .. }
| Pattern::Variable { .. }
| Pattern::BitArraySize(_)
| Pattern::Assign { .. }
| Pattern::Discard { .. }
| Pattern::List { .. }
| Pattern::Constructor { .. }
| Pattern::Tuple { .. }
| Pattern::BitArray { .. }
| Pattern::StringPrefix { .. }
| Pattern::Invalid { .. } => (),
},
BitArrayOption::Bytes { .. }
| BitArrayOption::Int { .. }
| BitArrayOption::Float { .. }
| BitArrayOption::Utf8 { .. }
| BitArrayOption::Utf16 { .. }
| BitArrayOption::Utf32 { .. }
| BitArrayOption::Utf8Codepoint { .. }
| BitArrayOption::Utf16Codepoint { .. }
| BitArrayOption::Utf32Codepoint { .. }
| BitArrayOption::Signed { .. }
| BitArrayOption::Unsigned { .. }
| BitArrayOption::Big { .. }
| BitArrayOption::Little { .. }
| BitArrayOption::Native { .. }
| BitArrayOption::Size { .. }
| BitArrayOption::Unit { .. } => (),
}
}
}
let type_ = match segment.value.deref() {
Pattern::Assign { pattern, .. } if pattern.is_discard() && segment_type.is_string() => {
self.error(Error::BitArraySegmentError {
error: bit_array::ErrorType::VariableUtfSegmentInPattern,
location: segment.location,
});
self.environment.new_unbound_var()
}
Pattern::Variable { .. } if segment_type.is_string() => {
self.error(Error::BitArraySegmentError {
error: bit_array::ErrorType::VariableUtfSegmentInPattern,
location: segment.location,
});
self.environment.new_unbound_var()
}
Pattern::Int { .. }
| Pattern::Float { .. }
| Pattern::String { .. }
| Pattern::Variable { .. }
| Pattern::BitArraySize(_)
| Pattern::Assign { .. }
| Pattern::Discard { .. }
| Pattern::List { .. }
| Pattern::Constructor { .. }
| Pattern::Tuple { .. }
| Pattern::BitArray { .. }
| Pattern::StringPrefix { .. }
| Pattern::Invalid { .. } => segment_type,
};
let typed_value = self.unify(*segment.value, type_.clone(), None);
match &typed_value {
// We can't directly match on the contents of a `Box`, so we must
// use a guard here.
Pattern::Assign {
location, pattern, ..
} if pattern.is_variable() => {
// It is tricky to generate code on Erlang for a pattern like
// `<<a as b>>`, since assignment patterns are not allowed in
// bit array patterns in Erlang. Since there is basically no
// reason to ever need to do this anyway, we simply emit an error
// here.
self.error(Error::DoubleVariableAssignmentInBitArray {
location: *location,
});
}
Pattern::Assign { location, .. } if has_non_utf8_string_option => {
self.error(Error::NonUtf8StringAssignmentInBitArray {
location: *location,
});
}
Pattern::Int { .. }
| Pattern::Float { .. }
| Pattern::String { .. }
| Pattern::Variable { .. }
| Pattern::BitArraySize(_)
| Pattern::Assign { .. }
| Pattern::Discard { .. }
| Pattern::List { .. }
| Pattern::Constructor { .. }
| Pattern::Tuple { .. }
| Pattern::BitArray { .. }
| Pattern::StringPrefix { .. }
| Pattern::Invalid { .. } => {}
};
BitArraySegment {
location: segment.location,
value: Box::new(typed_value),
options,
type_,
}
}
/// When we have an assignment or a case expression we unify the pattern with the
/// inferred type of the subject in order to determine what variables to insert
/// into the environment (or to detect a type error).
///
fn unify(
&mut self,
pattern: UntypedPattern,
type_: Arc<Type>,
// The name of the variable this pattern matches on, if any. Used for variant inference.
//
// Example:
// ```gleam
// case some_wibble {
// Wibble(..) -> {
// some_wibble.field_only_present_in_wibble
// }
// _ -> panic
// }
// ```
//
// Here, the pattern `Wibble(..)` has the subject variable `some_wibble`, meaning that
// in the inner scope, we can infer that the `some_wibble` variable is the `Wibble` variant
//
subject_variable: Option<EcoString>,
) -> TypedPattern {
match pattern {
Pattern::Discard { name, location, .. } => {
self.check_name_case(location, &name, Named::Discard);
let _ = self
.environment
.discarded_names
.insert(name.clone(), location);
Pattern::Discard {
type_,
name,
location,
}
}
Pattern::Invalid { location, .. } => Pattern::Invalid { type_, location },
Pattern::Variable {
name,
location,
origin,
..
} => match name.as_str() {
"true" | "false" => {
self.error(Error::LowercaseBoolPattern { location });
Pattern::Invalid { location, type_ }
}
_ => {
self.insert_variable(&name, type_.clone(), location, origin.clone());
Pattern::Variable {
type_,
name,
location,
origin,
}
}
},
Pattern::BitArraySize(size) => {
let location = size.location();
match self.bit_array_size(size, type_.clone()) {
Ok(size) => Pattern::BitArraySize(size),
Err(error) => {
self.error(error);
Pattern::Invalid { location, type_ }
}
}
}
Pattern::StringPrefix {
location,
left_location,
right_location,
left_side_string,
left_side_assignment,
right_side_assignment,
} => {
// The entire concatenate pattern must be a string
self.unify_types(type_, string(), location);
// The left hand side may assign a variable, which is the prefix of the string
if let Some((left, left_location)) = &left_side_assignment {
self.insert_variable(
left,
string(),
*left_location,
VariableOrigin {
syntax: VariableSyntax::AssignmentPattern,
declaration: self.position.to_declaration(),
},
);
}
// The right hand side may assign a variable, which is the suffix of the string
match &right_side_assignment {
AssignName::Variable(right) => {
self.insert_variable(
right,
string(),
right_location,
VariableOrigin {
syntax: VariableSyntax::Variable(right.clone()),
declaration: self.position.to_declaration(),
},
);
}
AssignName::Discard(right) => {
let _ = self
.environment
.discarded_names
.insert(right.clone(), right_location);
self.check_name_case(right_location, right, Named::Discard);
}
};
Pattern::StringPrefix {
location,
left_location,
right_location,
left_side_string,
left_side_assignment,
right_side_assignment,
}
}
Pattern::Assign {
name,
pattern,
location,
} => {
let pattern = self.unify(*pattern, type_, subject_variable);
if pattern.is_discard() {
self.problems.warning(Warning::UnusedDiscardPattern {
location,
name: name.clone(),
});
}
self.insert_variable(
&name,
pattern.type_().clone(),
location,
VariableOrigin {
syntax: VariableSyntax::AssignmentPattern,
declaration: self.position.to_declaration(),
},
);
Pattern::Assign {
name,
pattern: Box::new(pattern),
location,
}
}
Pattern::Int {
location,
value,
int_value,
} => {
self.unify_types(type_, int(), location);
if self.environment.target == Target::JavaScript
&& !self.current_function.has_javascript_external
{
check_javascript_int_safety(&int_value, location, self.problems);
}
Pattern::Int {
location,
value,
int_value,
}
}
Pattern::Float {
location,
value,
float_value,
} => {
self.unify_types(type_, float(), location);
check_float_safety(float_value, location, self.problems);
Pattern::Float {
location,
value,
float_value,
}
}
Pattern::String { location, value } => {
self.unify_types(type_, string(), location);
Pattern::String { location, value }
}
Pattern::List {
location,
elements,
tail,
..
} => match type_.named_type_arguments(
Publicity::Public,
PRELUDE_PACKAGE_NAME,
PRELUDE_MODULE_NAME,
"List",
1,
self.environment,
) {
Some(arguments) => {
let type_ = arguments
.first()
.expect("Failed to get type argument of List")
.clone();
let elements = elements
.into_iter()
.map(|element| self.unify(element, type_.clone(), None))
.collect();
let type_ = list(type_);
let tail = tail.map(|tail| {
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/fields.rs | compiler-core/src/type_/fields.rs | use super::Error;
use crate::{
ast::{CallArg, SrcSpan},
type_::error::IncorrectArityContext,
};
use ecow::EcoString;
use itertools::Itertools;
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FieldMap {
/// Number of accepted arguments, including unlabelled fields.
pub arity: u32,
/// Map of labels to argument indices
pub fields: HashMap<EcoString, u32>,
}
#[derive(Debug, Clone, Copy)]
pub struct DuplicateField;
impl FieldMap {
pub fn new(arity: u32) -> Self {
Self {
arity,
fields: HashMap::new(),
}
}
pub fn insert(&mut self, label: EcoString, index: u32) -> Result<(), DuplicateField> {
match self.fields.insert(label, index) {
Some(_) => Err(DuplicateField),
None => Ok(()),
}
}
pub fn into_option(self) -> Option<Self> {
if self.fields.is_empty() {
None
} else {
Some(self)
}
}
/// Reorder an argument list so that labelled fields supplied out-of-order are
/// in the correct order.
///
pub fn reorder<A>(
&self,
arguments: &mut Vec<CallArg<A>>,
location: SrcSpan,
context: IncorrectArityContext,
) -> Result<(), Error> {
let mut labelled_arguments_given = false;
let mut seen_labels = HashSet::new();
let mut unknown_labels = Vec::new();
let number_of_arguments = arguments.len();
if self.arity as usize != arguments.len() {
return Err(Error::IncorrectArity {
labels: self.missing_labels(arguments),
location,
context,
expected: self.arity as usize,
given: arguments.len(),
});
}
for argument in arguments.iter() {
match &argument.label {
Some(_) => {
labelled_arguments_given = true;
}
None => {
if labelled_arguments_given && !argument.is_implicit() {
return Err(Error::PositionalArgumentAfterLabelled {
location: argument.location,
});
}
}
}
}
// Keeps track of which labelled arguments need to be inserted into which indices
let mut labelled_arguments = HashMap::new();
// We iterate the argument in reverse order, because we have to remove elements
// from the `args` list quite a lot, and removing from the end of a list is more
// efficient than removing from the beginning or the middle.
let mut i = arguments.len();
while i > 0 {
i -= 1;
let (label, &location) =
match &arguments.get(i).expect("Field indexing to get label").label {
// A labelled argument, we may need to reposition it
Some(l) => (
l,
&arguments
.get(i)
.expect("Indexing in labelled field reordering")
.location,
),
// Not a labelled argument
None => {
continue;
}
};
let position = match self.fields.get(label) {
None => {
unknown_labels.push((label.clone(), location));
continue;
}
Some(&p) => p,
};
if seen_labels.contains(label) {
return Err(Error::DuplicateArgument {
location,
label: label.clone(),
});
}
let _ = seen_labels.insert(label.clone());
// Add this argument to the `labelled_arguments` map, and remove if from the
// existing arguments list. It will be reinserted later in the correct index
let _ = labelled_arguments.insert(position as usize, arguments.remove(i));
}
// The labelled arguments must be reinserted in order
for i in 0..number_of_arguments {
if let Some(argument) = labelled_arguments.remove(&i) {
arguments.insert(i, argument);
}
}
if unknown_labels.is_empty() {
Ok(())
} else {
Err(Error::UnknownLabels {
valid: self.fields.keys().cloned().collect(),
unknown: unknown_labels,
supplied: seen_labels.into_iter().collect(),
})
}
}
/// This returns an array of the labels that are unused given an argument
/// list.
/// The unused labels are in the order they are expected to be passed in
/// to a call using those.
///
/// ## Examples
///
/// ```gleam
/// pub fn wibble(label1 a, label2 b, label3 c) { todo }
///
/// wibble(1, label3: 2) // -> unused labels: [label2]
/// ```
///
pub fn missing_labels<A>(&self, arguments: &[CallArg<A>]) -> Vec<EcoString> {
// We need to know how many positional arguments are in the function
// arguments. That's given by the position of the first labelled
// argument; if the first label argument is third, then we know the
// function also needs two unlabelled arguments first.
let Some(positional_arguments) = self.fields.values().min().cloned() else {
return vec![];
};
// We need to count how many positional arguments were actually supplied
// in the call, to remove the corresponding labelled arguments that have
// been taken by any positional argument.
let given_positional_arguments = arguments
.iter()
.filter(|argument| argument.label.is_none() && !argument.is_use_implicit_callback())
.count();
let explicit_labels = arguments
.iter()
.filter_map(|argument| argument.label.as_ref())
.collect::<HashSet<&EcoString>>();
self.fields
.iter()
// As a start we remove all the labels that are already used explicitly,
// for sure those are not going to be unused!
.filter(|(label, _)| !explicit_labels.contains(label))
// ...then we sort all the labels in order by their original position in
// the function definition
.sorted_by_key(|(_, position)| *position)
// ... finally we remove all the ones that are taken by a positional
// argument
.dropping(given_positional_arguments.saturating_sub(positional_arguments as usize))
.map(|(label, _)| label.clone())
.collect_vec()
}
pub fn indices_to_labels(&self) -> HashMap<u32, &EcoString> {
self.fields
.iter()
.map(|(name, index)| (*index, name))
.collect()
}
}
#[derive(Debug)]
pub struct FieldMapBuilder {
index: u32,
any_labels: bool,
field_map: FieldMap,
}
impl FieldMapBuilder {
pub fn new(size: u32) -> Self {
Self {
index: 0,
any_labels: false,
field_map: FieldMap::new(size),
}
}
pub fn add(&mut self, label: Option<&EcoString>, location: SrcSpan) -> Result<(), Error> {
match label {
Some(label) => self.labelled(label, location)?,
None => self.unlabelled(location)?,
}
self.index += 1;
Ok(())
}
fn labelled(&mut self, label: &EcoString, location: SrcSpan) -> Result<(), Error> {
if self.field_map.insert(label.clone(), self.index).is_err() {
return Err(Error::DuplicateField {
label: label.clone(),
location,
});
};
self.any_labels = true;
Ok(())
}
fn unlabelled(&mut self, location: SrcSpan) -> Result<(), Error> {
if self.any_labels {
return Err(Error::UnlabelledAfterlabelled { location });
}
Ok(())
}
pub fn finish(self) -> Option<FieldMap> {
self.field_map.into_option()
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/error.rs | compiler-core/src/type_/error.rs | use super::{
FieldAccessUsage,
expression::{ArgumentKind, CallKind},
};
use crate::{
ast::{BinOp, BitArraySegmentTruncation, Layer, SrcSpan, TodoKind},
build::Target,
exhaustiveness::ImpossibleBitArraySegmentPattern,
parse::LiteralFloatValue,
type_::{Type, expression::ComparisonOutcome},
};
use camino::Utf8PathBuf;
use ecow::EcoString;
use hexpm::version::Version;
use num_bigint::BigInt;
#[cfg(test)]
use pretty_assertions::assert_eq;
use std::sync::Arc;
/// Errors and warnings discovered when compiling a module.
///
#[derive(Debug, Eq, PartialEq, Clone, Default)]
pub struct Problems {
errors: Vec<Error>,
warnings: Vec<Warning>,
}
impl Problems {
pub fn new() -> Self {
Default::default()
}
/// Sort the warnings and errors by their location.
///
pub fn sort(&mut self) {
self.errors.sort_by_key(|e| e.start_location());
self.warnings.sort_by_key(|w| w.location().start);
}
/// Register an error.
///
pub fn error(&mut self, error: Error) {
self.errors.push(error)
}
/// Register a warning.
///
pub fn warning(&mut self, warning: Warning) {
self.warnings.push(warning)
}
/// Take all the errors, leaving an empty vector in its place.
///
pub fn take_errors(&mut self) -> Vec<Error> {
std::mem::take(&mut self.errors)
}
/// Take all the warnings, leaving an empty vector in its place.
///
pub fn take_warnings(&mut self) -> Vec<Warning> {
std::mem::take(&mut self.warnings)
}
}
/// This is used by the unknown record field error to tell if an unknown field
/// is a field appearing in another variant of the same type to provide a better
/// error message explaining why it can't be accessed:
///
/// ```gleam
/// pub type Wibble {
/// Wibble(field: Int)
/// Wobble(thing: String)
/// }
///
/// Wobble("hello").field
/// // ^^^^^^
/// ```
///
/// Here the error can be extra useful and explain that to access `field` all
/// variants should have that field at the same position and with the same type.
///
#[derive(Debug, Eq, PartialEq, Clone, Copy)]
pub enum UnknownField {
/// The field we're trying to access appears in at least a variant, so it
/// can be useful to explain why it cannot be accessed and how to fix it
/// (adding it to all variants/making sure it has the same type/making sure
/// it's in the same position).
///
AppearsInAVariant,
/// The field we are trying to access appears in a variant, but we can
/// infer that the value we are accessing on is never the one that this
/// value is, so we can give information accordingly.
AppearsInAnImpossibleVariant,
/// The field is not in any of the variants, this might truly be a typo and
/// there's no need to add further explanations.
///
TrulyUnknown,
/// The type that the user is trying to access has no fields whatsoever,
/// such as Int or fn(..) -> _
NoFields,
}
/// A suggestion for an unknown module
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum ModuleSuggestion {
/// A module which which has a similar name, and an
/// exported value matching the one being accessed
Importable(EcoString),
/// A module already imported in the current scope
Imported(EcoString),
}
impl ModuleSuggestion {
pub fn suggestion(&self, module: &str) -> String {
match self {
ModuleSuggestion::Importable(name) => {
// Add a little extra information if the names don't match
let imported_name = self.last_name_component();
if module == imported_name {
format!("Did you mean to import `{name}`?")
} else {
format!("Did you mean to import `{name}` and reference `{imported_name}`?")
}
}
ModuleSuggestion::Imported(name) => format!("Did you mean `{name}`?"),
}
}
pub fn last_name_component(&self) -> &str {
match self {
ModuleSuggestion::Imported(name) | ModuleSuggestion::Importable(name) => {
name.split('/').next_back().unwrap_or(name)
}
}
}
}
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum Error {
InvalidImport {
location: SrcSpan,
importing_module: EcoString,
imported_module: EcoString,
kind: InvalidImportKind,
},
BitArraySegmentError {
error: crate::bit_array::ErrorType,
location: SrcSpan,
},
UnknownLabels {
unknown: Vec<(EcoString, SrcSpan)>,
valid: Vec<EcoString>,
supplied: Vec<EcoString>,
},
UnknownVariable {
location: SrcSpan,
name: EcoString,
variables: Vec<EcoString>,
/// If there's a discarded variable with the same name in the same scope
/// this will contain its location.
discarded_location: Option<SrcSpan>,
type_with_name_in_scope: bool,
},
UnknownType {
location: SrcSpan,
name: EcoString,
hint: UnknownTypeHint,
},
UnknownModule {
location: SrcSpan,
name: EcoString,
suggestions: Vec<ModuleSuggestion>,
},
UnknownModuleType {
location: SrcSpan,
name: EcoString,
module_name: EcoString,
type_constructors: Vec<EcoString>,
value_with_same_name: bool,
},
UnknownModuleValue {
location: SrcSpan,
name: EcoString,
module_name: EcoString,
value_constructors: Vec<EcoString>,
type_with_same_name: bool,
context: ModuleValueUsageContext,
},
ModuleAliasUsedAsName {
location: SrcSpan,
name: EcoString,
},
NotFn {
location: SrcSpan,
type_: Arc<Type>,
},
UnknownRecordField {
location: SrcSpan,
type_: Arc<Type>,
label: EcoString,
fields: Vec<EcoString>,
usage: FieldAccessUsage,
unknown_field: UnknownField,
},
IncorrectArity {
location: SrcSpan,
expected: usize,
context: IncorrectArityContext,
given: usize,
labels: Vec<EcoString>,
},
UnsafeRecordUpdate {
location: SrcSpan,
reason: UnsafeRecordUpdateReason,
},
UnnecessarySpreadOperator {
location: SrcSpan,
arity: usize,
},
IncorrectTypeArity {
location: SrcSpan,
name: EcoString,
expected: usize,
given: usize,
},
CouldNotUnify {
location: SrcSpan,
situation: Option<UnifyErrorSituation>,
expected: Arc<Type>,
given: Arc<Type>,
},
RecursiveType {
location: SrcSpan,
},
DuplicateName {
location_a: SrcSpan,
location_b: SrcSpan,
name: EcoString,
},
DuplicateImport {
location: SrcSpan,
previous_location: SrcSpan,
name: EcoString,
},
DuplicateTypeName {
location: SrcSpan,
previous_location: SrcSpan,
name: EcoString,
},
DuplicateArgument {
location: SrcSpan,
label: EcoString,
},
DuplicateField {
location: SrcSpan,
label: EcoString,
},
PrivateTypeLeak {
location: SrcSpan,
leaked: Type,
},
UnexpectedLabelledArg {
location: SrcSpan,
label: EcoString,
},
PositionalArgumentAfterLabelled {
location: SrcSpan,
},
IncorrectNumClausePatterns {
location: SrcSpan,
expected: usize,
given: usize,
},
NonLocalClauseGuardVariable {
location: SrcSpan,
name: EcoString,
},
ExtraVarInAlternativePattern {
location: SrcSpan,
name: EcoString,
},
MissingVarInAlternativePattern {
location: SrcSpan,
name: EcoString,
},
DuplicateVarInPattern {
location: SrcSpan,
name: EcoString,
},
OutOfBoundsTupleIndex {
location: SrcSpan,
index: u64,
size: usize,
},
NotATuple {
location: SrcSpan,
given: Arc<Type>,
},
NotATupleUnbound {
location: SrcSpan,
},
RecordAccessUnknownType {
location: SrcSpan,
},
RecordUpdateInvalidConstructor {
location: SrcSpan,
},
UnexpectedTypeHole {
location: SrcSpan,
},
ReservedModuleName {
name: EcoString,
},
KeywordInModuleName {
name: EcoString,
keyword: EcoString,
},
NotExhaustivePatternMatch {
location: SrcSpan,
unmatched: Vec<EcoString>,
kind: PatternMatchKind,
},
/// A function was defined with multiple arguments with the same name
///
/// # Examples
///
/// ```gleam
/// fn main(x, x) { Nil }
/// ```
/// ```gleam
/// fn main() {
/// fn(x, x) { Nil }
/// }
/// ```
ArgumentNameAlreadyUsed {
location: SrcSpan,
name: EcoString,
},
/// A function was defined with an unlabelled argument after a labelled one.
UnlabelledAfterlabelled {
location: SrcSpan,
},
/// A type alias was defined directly or indirectly in terms of itself, which would
/// cause it to expand to infinite size.
/// e.g.
/// type ForkBomb = #(ForkBomb, ForkBomb)
RecursiveTypeAlias {
location: SrcSpan,
cycle: Vec<EcoString>,
},
/// A function has been given an external implementation but not all the
/// type annotations have been given. The annotations are required as we
/// cannot infer the types of external implementations.
ExternalMissingAnnotation {
location: SrcSpan,
kind: MissingAnnotation,
},
/// A function has been given without either a Gleam implementation or an
/// external one.
NoImplementation {
location: SrcSpan,
},
/// A function/constant that is used doesn't have an implementation for the
/// current compilation target.
UnsupportedExpressionTarget {
location: SrcSpan,
target: Target,
},
/// A function's JavaScript implementation has been given but it does not
/// have a valid module name.
InvalidExternalJavascriptModule {
location: SrcSpan,
module: EcoString,
name: EcoString,
},
/// A function's JavaScript implementation has been given but it does not
/// have a valid function name.
InvalidExternalJavascriptFunction {
location: SrcSpan,
function: EcoString,
name: EcoString,
},
/// A case expression is missing one or more patterns to match all possible
/// values of the type.
InexhaustiveCaseExpression {
location: SrcSpan,
missing: Vec<EcoString>,
},
/// A case expression is missing its body.
MissingCaseBody {
location: SrcSpan,
},
/// Let assignment's pattern does not match all possible values of the type.
InexhaustiveLetAssignment {
location: SrcSpan,
missing: Vec<EcoString>,
},
/// A type alias has a type variable but it is not used in the definition.
///
/// For example, here `unused` is not used
///
/// ```gleam
/// pub type Wibble(unused) =
/// Int
/// ```
UnusedTypeAliasParameter {
location: SrcSpan,
name: EcoString,
},
/// A definition has two type parameters with the same name.
///
/// ```gleam
/// pub type Wibble(a, a) =
/// Int
/// ```
/// ```gleam
/// pub type Wibble(a, a) {
/// Wibble
/// }
/// ```
DuplicateTypeParameter {
location: SrcSpan,
name: EcoString,
},
/// A public function doesn't have an implementation for the current target.
/// This is only raised when compiling a package with `TargetSupport::Enforced`, which is
/// typically the root package, deps not being enforced.
///
/// For example, if compiling to Erlang:
///
/// ```gleam
/// @external(javascript, "one", "two")
/// pub fn wobble() -> Int
/// ```
UnsupportedPublicFunctionTarget {
target: Target,
name: EcoString,
location: SrcSpan,
},
/// When there's something that is not a function to the left of the `<-`
/// operator in a use expression:
///
/// For example:
///
/// ```gleam
/// use <- "wibble"
/// todo
/// ```
NotFnInUse {
location: SrcSpan,
type_: Arc<Type>,
},
/// When the function to the right hand side of `<-` in a `use` expression
/// is called with the wrong number of arguments (given already takes into
/// account the use callback passed to the function).
///
UseFnIncorrectArity {
location: SrcSpan,
expected: usize,
given: usize,
},
/// When on the left hand side of `<-` in a `use` expression there is the
/// wrong number of patterns.
///
/// For example:
///
/// ```gleam
/// use _, _ <- result.try(res)
/// todo
/// ```
///
UseCallbackIncorrectArity {
call_location: SrcSpan,
pattern_location: SrcSpan,
expected: usize,
given: usize,
},
/// When on the right hand side of use there is a function that doesn't take
/// a callback function as its last argument.
///
/// For example:
///
/// ```gleam
/// use <- io.println
/// ```
///
UseFnDoesntTakeCallback {
location: SrcSpan,
actual_type: Option<Type>,
},
/// When the name assigned to a variable or function doesn't follow the gleam
/// naming conventions.
///
/// For example:
///
/// ```gleam
/// let myBadName = 42
/// ```
BadName {
location: SrcSpan,
kind: Named,
name: EcoString,
},
/// Occurs when all the variant types of a custom type are deprecated
///
/// ```gleam
/// type Wibble {
/// @deprecated("1")
/// Wobble1
/// @deprecated("1")
/// Wobble1
/// }
/// ```
AllVariantsDeprecated {
location: SrcSpan,
},
/// Occurs when any varient of a custom type is deprecated while
/// the custom type itself is deprecated
DeprecatedVariantOnDeprecatedType {
location: SrcSpan,
},
/// Occurs when a literal floating point has a value that is outside of the
/// range representable by floats: -1.7976931348623157e308 to
/// 1.7976931348623157e308.
LiteralFloatOutOfRange {
location: SrcSpan,
},
/// When the echo keyword is not followed by an expression to be printed.
/// The only place where echo is allowed to appear on its own is as a step
/// of a pipeline, otherwise omitting the expression will result in this
/// error. For example:
///
/// ```gleam
/// call(echo, 1, 2)
/// // ^^^^ Error!
/// ```
///
EchoWithNoFollowingExpression {
location: SrcSpan,
},
/// When someone tries concatenating two string values using the `+` operator.
///
/// ```gleam
/// "aaa" + "bbb"
/// // ^ We wont to suggest using `<>` instead!
/// ```
StringConcatenationWithAddInt {
location: SrcSpan,
},
/// When someone tries using an int operator on two floats.
///
/// ```gleam
/// 1 +. 3
/// //^ We wont to suggest using `+` instead!
/// ```
FloatOperatorOnInts {
operator: BinOp,
location: SrcSpan,
},
/// When someone tries using an int operator on two floats.
///
/// ```gleam
/// 1.2 + 1.0
/// // ^ We wont to suggest using `+.` instead!
/// ```
IntOperatorOnFloats {
operator: BinOp,
location: SrcSpan,
},
DoubleVariableAssignmentInBitArray {
location: SrcSpan,
},
NonUtf8StringAssignmentInBitArray {
location: SrcSpan,
},
/// This happens when a private type is marked as opaque. Only public types
/// can be opaque.
///
/// ```gleam
/// opaque type Wibble {
/// Wobble
/// }
/// ```
///
PrivateOpaqueType {
location: SrcSpan,
},
SrcImportingDevDependency {
importing_module: EcoString,
imported_module: EcoString,
package: EcoString,
location: SrcSpan,
},
/// This happens when a type has no type parameters (for example `Int`) but
/// it is being used as a constructor: `Int()`, `Bool(a, b)`.
///
TypeUsedAsAConstructor {
location: SrcSpan,
name: EcoString,
},
/// The `@external` annotation on custom types can only be used for external
/// types, types with no constructors.
///
ExternalTypeWithConstructors {
location: SrcSpan,
},
LowercaseBoolPattern {
location: SrcSpan,
},
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ModuleValueUsageContext {
UnqualifiedImport,
ModuleAccess,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MissingAnnotation {
Parameter,
Return,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PatternMatchKind {
Case,
Assignment,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum EmptyListCheckKind {
Empty,
NonEmpty,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum LiteralCollectionKind {
List,
Tuple,
Record,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum IncorrectArityContext {
Pattern,
Function,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum InvalidImportKind {
SrcImportingTest,
SrcImportingDev,
DevImportingTest,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Named {
Type,
TypeAlias,
TypeVariable,
CustomTypeVariant,
Variable,
Argument,
Label,
Constant,
Function,
Discard,
}
impl Named {
pub fn as_str(self) -> &'static str {
match self {
Named::Type => "Type",
Named::TypeAlias => "Type alias",
Named::TypeVariable => "Type variable",
Named::CustomTypeVariant => "Type variant",
Named::Variable => "Variable",
Named::Argument => "Argument",
Named::Label => "Label",
Named::Constant => "Constant",
Named::Function => "Function",
Named::Discard => "Discard",
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct VariableOrigin {
pub syntax: VariableSyntax,
pub declaration: VariableDeclaration,
}
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
/// The syntax used to define a variable. Used to determine how it can be ignored
/// when unused.
pub enum VariableSyntax {
/// A variable that can be ignored by prefixing with an underscore, `_name`
Variable(EcoString),
/// A variable from label shorthand syntax, which can be ignored with an underscore: `label: _`
LabelShorthand(EcoString),
/// A variable from an assignment pattern, which can be ignored by removing `as name`,
AssignmentPattern,
/// A variable generated by the compiler. This should never need to be ignored
Generated,
}
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
/// The source of a variable, such as a `let` assignment, or function parameter.
pub enum VariableDeclaration {
LetPattern,
UsePattern,
ClausePattern,
FunctionParameter {
/// The name of the function defining the parameter. This will be None
/// for parameters introduced by anoynmous functions: `fn(a) { a }`
///
function_name: Option<EcoString>,
/// The index of the parameter in the function's parameter list.
///
index: usize,
},
Generated,
}
impl VariableOrigin {
pub fn how_to_ignore(&self) -> Option<String> {
match &self.syntax {
VariableSyntax::Variable(name) => {
Some(format!("You can ignore it with an underscore: `_{name}`."))
}
VariableSyntax::LabelShorthand(label) => Some(format!(
"You can ignore it with an underscore: `{label}: _`."
)),
VariableSyntax::AssignmentPattern => Some("You can safely remove it.".to_string()),
VariableSyntax::Generated => None,
}
}
pub fn generated() -> Self {
Self {
syntax: VariableSyntax::Generated,
declaration: VariableDeclaration::Generated,
}
}
pub fn is_function_parameter(&self) -> bool {
match self.declaration {
VariableDeclaration::FunctionParameter { .. } => true,
VariableDeclaration::LetPattern
| VariableDeclaration::UsePattern
| VariableDeclaration::ClausePattern
| VariableDeclaration::Generated => false,
}
}
}
#[derive(Debug, Eq, PartialEq, Clone, serde::Serialize, serde::Deserialize)]
pub enum Warning {
Todo {
kind: TodoKind,
location: SrcSpan,
type_: Arc<Type>,
},
ImplicitlyDiscardedResult {
location: SrcSpan,
},
UnusedLiteral {
location: SrcSpan,
},
UnusedValue {
location: SrcSpan,
},
NoFieldsRecordUpdate {
location: SrcSpan,
},
AllFieldsRecordUpdate {
location: SrcSpan,
},
UnusedType {
location: SrcSpan,
imported: bool,
name: EcoString,
},
UnusedConstructor {
location: SrcSpan,
imported: bool,
name: EcoString,
},
UnusedImportedValue {
location: SrcSpan,
name: EcoString,
},
UnusedImportedModule {
location: SrcSpan,
name: EcoString,
},
UnusedImportedModuleAlias {
location: SrcSpan,
alias: EcoString,
module_name: EcoString,
},
UnusedPrivateModuleConstant {
location: SrcSpan,
name: EcoString,
},
UnusedPrivateFunction {
location: SrcSpan,
name: EcoString,
},
UnusedVariable {
location: SrcSpan,
origin: VariableOrigin,
},
UnnecessaryDoubleIntNegation {
location: SrcSpan,
},
UnnecessaryDoubleBoolNegation {
location: SrcSpan,
},
InefficientEmptyListCheck {
location: SrcSpan,
kind: EmptyListCheckKind,
},
TransitiveDependencyImported {
location: SrcSpan,
module: EcoString,
package: EcoString,
},
DeprecatedItem {
location: SrcSpan,
message: EcoString,
layer: Layer,
},
UnreachableCasePattern {
location: SrcSpan,
reason: UnreachablePatternReason,
},
UnusedDiscardPattern {
location: SrcSpan,
name: EcoString,
},
/// This happens when someone tries to write a case expression where one of
/// the subjects is a literal tuple, list or bit array for example:
///
/// ```gleam
/// case #(wibble, wobble) { ... }
/// ```
///
/// Matching on a literal collection of elements is redundant since we can
/// always pass the single items it's made of separated by a comma:
///
/// ```gleam
/// case wibble, wobble { ... }
/// ```
///
CaseMatchOnLiteralCollection {
kind: LiteralCollectionKind,
location: SrcSpan,
},
/// This happens if someone tries to match on some kind of literal value
/// like an Int, a String, an empty List etc.
///
/// ```gleam
/// case #() { ... }
/// ```
///
/// The whole match becomes redundant since one can already tell beforehand
/// the structure of the value being matched.
///
/// Note: for non-empty literal collection of values we want to provide a
/// better error message that suggests to drop the wrapper, for that
/// we have the `CaseMatchOnLiteralCollection` variant.
///
CaseMatchOnLiteralValue {
location: SrcSpan,
},
/// This happens when someone defines an external type (with no
/// constructors) and marks it as opqaue:
///
/// ```gleam
/// opaque type External
/// ```
///
/// Since an external type already has no constructors, marking it as
/// opaque is redundant.
///
OpaqueExternalType {
location: SrcSpan,
},
/// This happens when an internal type is accidentally exposed in the public
/// API. Since internal types are excluded from documentation, completions
/// and the package interface, this would lead to poor developer experience.
///
/// ```gleam
/// @internal type Wibble
///
/// pub fn wibble(thing: Wibble) { todo }
/// // ^^^^^^^^^^^^^ There would be no documentation
/// // explaining what `Wibble` is in the
/// // package's doc site.
/// ```
InternalTypeLeak {
location: SrcSpan,
leaked: Type,
},
RedundantAssertAssignment {
location: SrcSpan,
},
AssertAssignmentOnImpossiblePattern {
location: SrcSpan,
reason: AssertImpossiblePattern,
},
/// When a `todo` or `panic` is used as a function instead of providing the
/// error message with the `as` syntax.
///
/// ```gleam
/// todo("this won't appear in the error message")
/// ```
///
TodoOrPanicUsedAsFunction {
kind: TodoOrPanic,
location: SrcSpan,
arguments_location: Option<SrcSpan>,
arguments: usize,
},
UnreachableCodeAfterPanic {
location: SrcSpan,
panic_position: PanicPosition,
},
/// When a function capture is used in a pipe to pipe into the first
/// argument of a function:
///
/// ```gleam
/// wibble |> wobble(_, 1)
/// ^ Redundant and can be removed
/// ```
///
RedundantPipeFunctionCapture {
location: SrcSpan,
},
/// When the `gleam` range specified in the package's `gleam.toml` is too
/// low and would include a version that's too low to support this feature.
///
/// For example, let's say that a package is saying `gleam = ">=1.1.0"`
/// but it is using label shorthand syntax: `wibble(label:)`.
/// That requires a version that is `>=1.4.0`, so the constraint expressed
/// in the `gleam.toml` is too permissive and if someone were to run this
/// code with v1.1.0 they would run into compilation errors since the
/// compiler cannot know of label shorthands!
///
FeatureRequiresHigherGleamVersion {
location: SrcSpan,
minimum_required_version: Version,
wrongfully_allowed_version: Version,
feature_kind: FeatureKind,
},
/// When targeting JavaScript and an `Int` value is specified that lies
/// outside the range `Number.MIN_SAFE_INTEGER` - `Number.MAX_SAFE_INTEGER`.
///
JavaScriptIntUnsafe {
location: SrcSpan,
},
/// When we are trying to use bool assert on a literal boolean. For example:
/// ```gleam
/// assert True
/// ^ The programmer knows this will never panic, so it's useless
/// ```
AssertLiteralBool {
location: SrcSpan,
},
/// When a segment has a constant value that is bigger than its size and we
/// know for certain is going to be truncated.
///
BitArraySegmentTruncatedValue {
truncation: BitArraySegmentTruncation,
location: SrcSpan,
},
/// In Gleam v1 it is possible to import one module twice using different aliases.
/// This is deprecated, and likely would be removed in a Gleam v2.
ModuleImportedTwice {
name: EcoString,
first: SrcSpan,
second: SrcSpan,
},
/// Top-level definition should not shadow an imported one.
/// This includes constant or function imports.
TopLevelDefinitionShadowsImport {
location: SrcSpan,
name: EcoString,
},
/// This warning is raised when we perform a comparison that the compiler
/// can tell is always going to succeed or fail. For example:
///
/// ```gleam
/// 1 == 1 // This always succeeds
/// 2 != 2 // This always fails
/// 1 > 10 // This always fails
/// a == a // This always succeeds
/// ```
RedundantComparison {
location: SrcSpan,
outcome: ComparisonOutcome,
},
/// When a function's argument is only ever used unchanged in recursive
/// calls. For example:
///
/// ```gleam
/// pub fn wibble(x, n) {
/// // ^ This argument is not needed,
/// case n {
/// 0 -> Nil
/// _ -> wibble(x, n - 1)
/// // ^ It's only used in recursive calls!
/// }
/// }
/// ```
///
UnusedRecursiveArgument {
location: SrcSpan,
},
}
#[derive(Debug, Eq, PartialEq, Clone, serde::Serialize, serde::Deserialize)]
pub enum AssertImpossiblePattern {
/// When `let assert`-ing on a variant that's different from the inferred
/// one.
///
/// ```gleam
/// let assert Error(_) = Ok(_)
/// ```
///
InferredVariant,
/// When `let assert`-ing on a pattern that will never match because it's
/// matching on impossible segment(s).
///
/// ```gleam
/// let assert <<-2:unsigned>> = bit_array
/// ```
///
ImpossibleSegments {
segments: Vec<ImpossibleBitArraySegmentPattern>,
},
}
#[derive(Debug, Eq, Copy, PartialEq, Clone, serde::Serialize, serde::Deserialize)]
pub enum FeatureKind {
LabelShorthandSyntax,
ConstantStringConcatenation,
ArithmeticInGuards,
UnannotatedUtf8StringSegment,
UnannotatedFloatSegment,
NestedTupleAccess,
InternalAnnotation,
AtInJavascriptModules,
RecordUpdateVariantInference,
RecordAccessVariantInference,
LetAssertWithMessage,
VariantWithDeprecatedAnnotation,
JavaScriptUnalignedBitArray,
BoolAssert,
ExternalCustomType,
ConstantRecordUpdate,
ExpressionInSegmentSize,
}
impl FeatureKind {
pub fn required_version(&self) -> Version {
match self {
FeatureKind::InternalAnnotation | FeatureKind::NestedTupleAccess => {
Version::new(1, 1, 0)
}
FeatureKind::AtInJavascriptModules => Version::new(1, 2, 0),
FeatureKind::ArithmeticInGuards => Version::new(1, 3, 0),
FeatureKind::LabelShorthandSyntax | FeatureKind::ConstantStringConcatenation => {
Version::new(1, 4, 0)
}
FeatureKind::UnannotatedUtf8StringSegment => Version::new(1, 5, 0),
FeatureKind::RecordUpdateVariantInference
| FeatureKind::RecordAccessVariantInference => Version::new(1, 6, 0),
FeatureKind::VariantWithDeprecatedAnnotation | FeatureKind::LetAssertWithMessage => {
Version::new(1, 7, 0)
}
FeatureKind::JavaScriptUnalignedBitArray => Version::new(1, 9, 0),
FeatureKind::UnannotatedFloatSegment => Version::new(1, 10, 0),
FeatureKind::BoolAssert => Version::new(1, 11, 0),
FeatureKind::ExpressionInSegmentSize => Version::new(1, 12, 0),
FeatureKind::ExternalCustomType | FeatureKind::ConstantRecordUpdate => {
Version::new(1, 14, 0)
}
}
}
}
#[derive(Debug, Eq, PartialEq, Clone, Copy, serde::Serialize, serde::Deserialize)]
pub enum PanicPosition {
/// When the unreachable part is a function argument, this means that one
/// of the previous arguments must be a panic.
PreviousFunctionArgument,
/// When the unreachable part is a function call, this means that its last
/// argument must be a panic.
LastFunctionArgument,
/// When the expression to be printed by echo panics.
EchoExpression,
/// Any expression that doesn't fall in the previous two categories
PreviousExpression,
}
#[derive(Debug, Eq, PartialEq, Clone, Copy, serde::Serialize, serde::Deserialize)]
pub enum TodoOrPanic {
Todo,
Panic,
}
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum UnreachablePatternReason {
/// The clause is unreachable because a previous pattern
/// matches the same case.
DuplicatePattern,
/// The clause is unreachable because we have inferred the variant
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/printer.rs | compiler-core/src/type_/printer.rs | use bimap::BiMap;
use ecow::{EcoString, eco_format};
use im::HashMap;
use std::{collections::HashSet, sync::Arc};
use crate::{
ast::SrcSpan,
type_::{Type, TypeAliasConstructor, TypeVar},
};
/// This class keeps track of what names are used for modules in the current
/// scope, so they can be printed in errors, etc.
///
#[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct Names {
/// Types that exist in the current module, either defined or imported in an
/// unqualified fashion.
///
/// key: (Defining module name, type name)
/// value: Alias name
///
/// # Example 1
///
/// ```gleam
/// type Wibble = wobble.Woo
/// ```
/// would result in
/// - key: `("wibble", "Woo")`
/// - value: `"Wibble"`
///
/// # Example 2
///
/// ```gleam
/// import some/module.{type Wibble}
/// ```
/// would result in
/// - key: `("some/module", "Wibble")`
/// - value: `"Wibble"`
///
/// # Example 3
///
/// ```gleam
/// import some/module.{type Wibble as Wobble}
/// ```
/// would result in
/// - key: `("some/module", "Wibble")`
/// - value: `"Wobble"`
///
local_types: BiMap<(EcoString, EcoString), EcoString>,
/// Mapping of imported modules to their locally used named
///
/// key: The name of the module
/// value: The name the module is aliased to
///
/// # Example 1
///
/// ```gleam
/// import mod1 as my_mod
/// ```
/// would result in:
/// - key: "mod1"
/// - value: "my_mod"
///
/// # Example 2
///
/// ```gleam
/// import mod1
/// ```
/// would result in:
/// - key: "mod1"
/// - value: "mod1"
///
imported_modules: HashMap<EcoString, (EcoString, SrcSpan)>,
/// Generic type parameters that have been annotated in the current
/// function.
///
/// key: The id of generic type that was annotated
/// value: The name that is used for the generic type in the annotation.
///
/// # Example 1
///
/// ```gleam
/// fn equal(x: something, y: something) -> Bool {
/// arg1 == arg2
/// }
/// ```
///
/// key: <some id int>
/// value: `"something"`
///
type_variables: HashMap<u64, EcoString>,
/// Constructors which are imported in the current module in an
/// unqualified fashion.
///
/// key: (Defining module name, type name)
/// value: Alias name
///
/// # Example 1
///
/// ```gleam
/// import wibble.{Wobble}
/// ```
/// would result in
/// - key: `("wibble", "Wobble")`
/// - value: `"Wobble"`
///
/// # Example 2
///
/// ```gleam
/// import wibble.{Wobble as Woo}
/// ```
/// would result in
/// - key: `("wibble", "Wobble")`
/// - value: `"Woo"`
///
local_value_constructors: BiMap<(EcoString, EcoString), EcoString>,
/// A map containing information about public alias of internal types in
/// other packages. This is a common pattern in Gleam, in order to reexport
/// an internal type, without exposing its implementation details. Because
/// of this, we want to be able to properly handle this case, and use the
/// public alias rather than the internal underlying type. Since Gleam type
/// aliases are not part of the type system, we have to track them manually
/// here.
///
/// This is a mapping of internal types to their public aliases that we want
/// to favour over the internal types.
///
/// For example, if we had the following code:
///
/// ```gleam
/// // lustre/element.gleam
/// import lustre/internal
///
/// pub type Element(a) = internal.Element(a)
/// ```
///
/// This map would contain a key of `("lustre/internal", "Element")` with a
/// value of `("lustre/element", "Element")`. This can then be used to look
/// up the alias we want to print based on the type we are printing.
///
reexport_aliases: HashMap<(EcoString, EcoString), (EcoString, EcoString)>,
}
/// The `PartialEq` implementation for `Type` doesn't account for `TypeVar::Link`,
/// so we implement an equality check that does account for it here.
fn compare_arguments(arguments: &[Arc<Type>], parameters: &[Arc<Type>]) -> bool {
if arguments.len() != parameters.len() {
return false;
}
arguments
.iter()
.zip(parameters)
.all(|(argument, parameter)| argument.same_as(parameter))
}
impl Names {
pub fn new() -> Self {
Self {
local_types: Default::default(),
imported_modules: Default::default(),
type_variables: Default::default(),
local_value_constructors: Default::default(),
reexport_aliases: Default::default(),
}
}
/// Record a named type in this module.
pub fn named_type_in_scope(
&mut self,
module_name: EcoString,
type_name: EcoString,
local_alias: EcoString,
) {
_ = self.local_types.remove_by_right(&local_alias);
_ = self
.local_types
.insert((module_name, type_name), local_alias);
}
pub fn type_in_scope(
&mut self,
local_alias: EcoString,
type_: &Type,
parameters: &[Arc<Type>],
) {
match type_ {
Type::Named {
module,
name,
arguments,
..
} if compare_arguments(arguments, parameters) => {
self.named_type_in_scope(module.clone(), name.clone(), local_alias);
}
Type::Named { .. } | Type::Fn { .. } | Type::Var { .. } | Type::Tuple { .. } => {
_ = self.local_types.remove_by_right(&local_alias);
}
}
}
/// Record a type variable in this module.
pub fn type_variable_in_scope(&mut self, id: u64, local_alias: EcoString) {
_ = self.type_variables.insert(id, local_alias.clone());
}
/// Record an imported module in this module.
///
/// Returns the location of the previous time this module was imported, if there was one.
pub fn imported_module(
&mut self,
module_name: EcoString,
module_alias: EcoString,
location: SrcSpan,
) -> Option<SrcSpan> {
self.imported_modules
.insert(module_name, (module_alias, location))
.map(|(_, location)| location)
}
/// Check whether a particular type alias is reexporting an internal type,
/// and if so register it so we can print it correctly.
pub fn maybe_register_reexport_alias(
&mut self,
package: &EcoString,
alias_name: &EcoString,
alias: &TypeAliasConstructor,
) {
match alias.type_.as_ref() {
Type::Named {
publicity,
package: type_package,
module,
name,
arguments,
..
} => {
// We only count this alias as a reexport if it is:
// - aliasing a type in the same package
// - the type is internal
// - the alias exposes the same type parameters as the internal type
if type_package == package
&& publicity.is_internal()
&& compare_arguments(arguments, &alias.parameters)
{
_ = self.reexport_aliases.insert(
(module.clone(), name.clone()),
(alias.module.clone(), alias_name.clone()),
);
}
}
Type::Fn { .. } | Type::Var { .. } | Type::Tuple { .. } => {}
}
}
/// Get the name and optional module qualifier for a named type.
fn named_type<'a>(
&'a self,
module: &'a EcoString,
name: &'a EcoString,
print_mode: PrintMode,
) -> NameContextInformation<'a> {
if print_mode == PrintMode::ExpandAliases {
if let Some((module, _)) = self.imported_modules.get(module) {
return NameContextInformation::Qualified(module, name.as_str());
};
return NameContextInformation::Unimported(module, name);
}
let key = (module.clone(), name.clone());
// Only check for local aliases if we want to print aliases
// There is a local name for this type, use that.
if let Some(name) = self.local_types.get_by_left(&key) {
return NameContextInformation::Unqualified(name.as_str());
}
if let Some((module, alias)) = self.reexport_aliases.get(&key) {
if let Some((module, _)) = self.imported_modules.get(module) {
return NameContextInformation::Qualified(module, alias);
} else {
return NameContextInformation::Unimported(module, alias);
}
}
// This type is from a module that has been imported
if let Some((module, _)) = self.imported_modules.get(module) {
return NameContextInformation::Qualified(module, name.as_str());
};
NameContextInformation::Unimported(module, name)
}
/// Record a named value in this module.
pub fn named_constructor_in_scope(
&mut self,
module_name: EcoString,
value_name: EcoString,
local_alias: EcoString,
) {
_ = self.local_value_constructors.remove_by_right(&local_alias);
_ = self
.local_value_constructors
.insert((module_name.clone(), value_name), local_alias.clone());
}
/// Get the name and optional module qualifier for a named constructor.
pub fn named_constructor<'a>(
&'a self,
module: &'a EcoString,
name: &'a EcoString,
) -> NameContextInformation<'a> {
let key = (module.clone(), name.clone());
// There is a local name for this value, use that.
if let Some(name) = self.local_value_constructors.get_by_left(&key) {
return NameContextInformation::Unqualified(name.as_str());
}
// This value is from a module that has been imported
if let Some((module, _)) = self.imported_modules.get(module) {
return NameContextInformation::Qualified(module, name.as_str());
};
NameContextInformation::Unimported(module, name)
}
pub fn is_imported(&self, module: &str) -> bool {
self.imported_modules.contains_key(module)
}
pub fn get_type_variable(&self, id: u64) -> Option<&EcoString> {
self.type_variables.get(&id)
}
pub fn reexport_alias(
&self,
module: EcoString,
name: EcoString,
) -> Option<&(EcoString, EcoString)> {
self.reexport_aliases.get(&(module, name))
}
}
#[derive(Debug)]
pub enum NameContextInformation<'a> {
/// This type is from a module that has not been imported in this module.
Unimported(&'a str, &'a str),
/// This type has been imported in an unqualifid fashion in this module.
Unqualified(&'a str),
/// This type is from a module that has been imported.
Qualified(&'a str, &'a str),
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum PrintMode {
/// Prints the context-specific representation of a type.
Normal,
/// Prints full detail of the given type, always qualified.
/// Useful for providing more detail to the user.
///
/// For example, with this code:
/// ```gleam
/// type A = Int
/// ```
/// If the type `gleam.Int` were printed using the `Normal` mode,
/// we would print `A`, since that is the local alias for the `Int` type.
///
/// However, if the user were hovering over the type `A` itself, it wouldn't be
/// particularly helpful to print `A`.
/// So with `ExpandAliases`, it would print `gleam.Int`,
/// which tells the user exactly what type `A` represents.
///
ExpandAliases,
}
/// A type printer that does not wrap and indent, but does take into account the
/// names that types and modules have been aliased with in the current module.
#[derive(Debug)]
pub struct Printer<'a> {
names: &'a Names,
uid: u64,
/// Some type variables aren't bound to names, so when trying to print those,
/// we need to create our own names which don't overlap with existing type variables.
/// These two data structures store a mapping of IDs to created type-variable names,
/// to ensure consistent printing, and the set of all printed names so that we don't
/// create a type variable name which matches an existing one.
///
/// Note: These are stored per printer, not per TypeNames struct, because:
/// - It doesn't really matter what these are, as long as they are consistent.
/// - We would need mutable access to the names struct, which isn't really possible
/// in many contexts.
///
printed_type_variables: HashMap<u64, EcoString>,
printed_type_variable_names: HashSet<EcoString>,
}
impl<'a> Printer<'a> {
pub fn new(names: &'a Names) -> Self {
Printer {
names,
uid: Default::default(),
printed_type_variables: Default::default(),
printed_type_variable_names: names.type_variables.values().cloned().collect(),
}
}
/// In the AST, type variables are represented by their IDs, not their names.
/// This means that when we are printing a type variable, we either need to
/// find its name that was given by the programmer, or generate a new one.
/// Type variable names are local to functions, meaning there can be one
/// named `a` in one function, and a different one named `a` in another
/// function. However, there can't be two named `a` in the same function.
///
/// By default, the printer avoids duplicating type variable names entirely.
/// This is because we don't have easy access to information about which type
/// variables belong to this function. In order to ensure no accidental,
/// collisions, we treat all type variables from the module as in scope, even
/// though this isn't the case.
///
/// When sufficient information is present to ensure type variables are not
/// duplicated, `new_without_type_variables` can be used, in combination with
/// `register_type_variables` in order to precisely control which variables
/// are in scope.
///
pub fn new_without_type_variables(names: &'a Names) -> Self {
Printer {
names,
uid: Default::default(),
printed_type_variables: Default::default(),
printed_type_variable_names: Default::default(),
}
}
/// Clear the registered type variable names. This allows the same `Printer`
/// to be used in multiple different scopes, which have different sets of
/// type variables. After clearing, the correct variables from the desired
/// scope can be registered using `register_type_variable`.
pub fn clear_type_variables(&mut self) {
self.printed_type_variable_names.clear();
}
/// As explained in the documentation for `new_without_type_variables`, it
/// it not always possible to determine which type variables are in scope.
/// However, when it is possible, this function can be used to manually
/// register which type variable names are in scope and cannot be used.
pub fn register_type_variable(&mut self, name: EcoString) {
_ = self.printed_type_variable_names.insert(name);
}
pub fn print_type(&mut self, type_: &Type) -> EcoString {
let mut buffer = EcoString::new();
self.print(type_, &mut buffer, PrintMode::Normal);
buffer
}
pub fn print_module(&self, module: &str) -> EcoString {
match self.names.imported_modules.get(module) {
Some((module, _)) => module.clone(),
_ => module.split("/").last().unwrap_or(module).into(),
}
}
pub fn print_type_without_aliases(&mut self, type_: &Type) -> EcoString {
let mut buffer = EcoString::new();
self.print(type_, &mut buffer, PrintMode::ExpandAliases);
buffer
}
fn print(&mut self, type_: &Type, buffer: &mut EcoString, print_mode: PrintMode) {
match type_ {
Type::Named {
name,
arguments,
module,
..
} => {
let (module, name) = match self.names.named_type(module, name, print_mode) {
NameContextInformation::Qualified(module, name) => (Some(module), name),
NameContextInformation::Unqualified(name) => (None, name),
// TODO: indicate that the module is not import and as such
// needs to be, as well as how.
NameContextInformation::Unimported(module, name) => {
(module.split('/').next_back(), name)
}
};
if let Some(module) = module {
buffer.push_str(module);
buffer.push('.');
}
buffer.push_str(name);
if !arguments.is_empty() {
buffer.push('(');
self.print_arguments(arguments, buffer, print_mode);
buffer.push(')');
}
}
Type::Fn { arguments, return_ } => {
buffer.push_str("fn(");
self.print_arguments(arguments, buffer, print_mode);
buffer.push_str(") -> ");
self.print(return_, buffer, print_mode);
}
Type::Var { type_, .. } => match *type_.borrow() {
TypeVar::Link { ref type_, .. } => self.print(type_, buffer, print_mode),
TypeVar::Unbound { id, .. } | TypeVar::Generic { id, .. } => {
buffer.push_str(&self.type_variable(id))
}
},
Type::Tuple { elements, .. } => {
buffer.push_str("#(");
self.print_arguments(elements, buffer, print_mode);
buffer.push(')');
}
}
}
pub fn print_constructor(&mut self, module: &EcoString, name: &EcoString) -> EcoString {
let (module, name) = match self.names.named_constructor(module, name) {
NameContextInformation::Qualified(module, name) => (Some(module), name),
NameContextInformation::Unqualified(name) => (None, name),
NameContextInformation::Unimported(module, name) => {
(module.split('/').next_back(), name)
}
};
match module {
Some(module) => eco_format!("{module}.{name}"),
None => name.into(),
}
}
fn print_arguments(
&mut self,
arguments: &[Arc<Type>],
type_str: &mut EcoString,
print_mode: PrintMode,
) {
for (i, argument) in arguments.iter().enumerate() {
self.print(argument, type_str, print_mode);
if i < arguments.len() - 1 {
type_str.push_str(", ");
}
}
}
/// A suitable name of a type variable.
pub fn type_variable(&mut self, id: u64) -> EcoString {
if let Some(name) = self.names.type_variables.get(&id) {
return name.clone();
}
if let Some(name) = self.printed_type_variables.get(&id) {
return name.clone();
}
loop {
let name = self.next_letter();
if !self.printed_type_variable_names.contains(&name) {
_ = self.printed_type_variable_names.insert(name.clone());
_ = self.printed_type_variables.insert(id, name.clone());
return name;
}
}
}
fn next_letter(&mut self) -> EcoString {
let alphabet_length = 26;
let char_offset = 97;
let mut chars = vec![];
let mut n;
let mut rest = self.uid;
loop {
n = rest % alphabet_length;
rest /= alphabet_length;
chars.push((n as u8 + char_offset) as char);
if rest == 0 {
break;
}
rest -= 1
}
self.uid += 1;
chars.into_iter().rev().collect()
}
}
#[test]
fn test_local_type() {
let mut names = Names::new();
names.named_type_in_scope("mod".into(), "Tiger".into(), "Cat".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Tiger".into(),
arguments: vec![],
module: "mod".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "Cat");
}
#[test]
fn test_prelude_type() {
let mut names = Names::new();
names.named_type_in_scope("gleam".into(), "Int".into(), "Int".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Int".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "Int");
}
#[test]
fn test_shadowed_prelude_type() {
let mut names = Names::new();
names.named_type_in_scope("gleam".into(), "Int".into(), "Int".into());
names.named_type_in_scope("mod".into(), "Int".into(), "Int".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Int".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "gleam.Int");
}
#[test]
fn test_generic_type_annotation() {
let mut names = Names::new();
names.type_variable_in_scope(0, "one".into());
let mut printer = Printer::new(&names);
let type_ = Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id: 0 })),
};
assert_eq!(printer.print_type(&type_), "one");
}
#[test]
fn test_generic_type_var() {
let names = Names::new();
let mut printer = Printer::new(&names);
let type_ = Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Unbound { id: 0 })),
};
let typ2 = Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Unbound { id: 1 })),
};
assert_eq!(printer.print_type(&type_), "a");
assert_eq!(printer.print_type(&typ2), "b");
}
#[test]
fn test_tuple_type() {
let names = Names::new();
let mut printer = Printer::new(&names);
let type_ = Type::Tuple {
elements: vec![
Arc::new(Type::Named {
name: "Int".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
}),
Arc::new(Type::Named {
name: "String".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
}),
],
};
assert_eq!(printer.print_type(&type_), "#(gleam.Int, gleam.String)");
}
#[test]
fn test_fn_type() {
let mut names = Names::new();
names.named_type_in_scope("gleam".into(), "Int".into(), "Int".into());
names.named_type_in_scope("gleam".into(), "Bool".into(), "Bool".into());
let mut printer = Printer::new(&names);
let type_ = Type::Fn {
arguments: vec![
Arc::new(Type::Named {
name: "Int".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
}),
Arc::new(Type::Named {
name: "String".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
}),
],
return_: Arc::new(Type::Named {
name: "Bool".into(),
arguments: vec![],
module: "gleam".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
}),
};
assert_eq!(printer.print_type(&type_), "fn(Int, gleam.String) -> Bool");
}
#[test]
fn test_module_alias() {
let mut names = Names::new();
assert!(
names
.imported_module("mod1".into(), "animals".into(), SrcSpan::new(50, 63))
.is_none()
);
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Cat".into(),
arguments: vec![],
module: "mod1".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "animals.Cat");
}
#[test]
fn test_type_alias_and_generics() {
let mut names = Names::new();
names.named_type_in_scope("mod".into(), "Tiger".into(), "Cat".into());
names.type_variable_in_scope(0, "one".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Tiger".into(),
arguments: vec![Arc::new(Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id: 0 })),
})],
module: "mod".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "Cat(one)");
}
#[test]
fn test_unqualified_import_and_generic() {
let mut names = Names::new();
names.named_type_in_scope("mod".into(), "Cat".into(), "C".into());
names.type_variable_in_scope(0, "one".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Cat".into(),
arguments: vec![Arc::new(Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id: 0 })),
})],
module: "mod".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "C(one)");
}
#[test]
fn nested_module() {
let names = Names::new();
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Cat".into(),
arguments: vec![],
module: "one/two/three".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "three.Cat");
}
#[test]
fn test_unqualified_import_and_module_alias() {
let mut names = Names::new();
assert!(
names
.imported_module("mod1".into(), "animals".into(), SrcSpan::new(76, 93))
.is_none()
);
let _ = names
.local_types
.insert(("mod1".into(), "Cat".into()), "C".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Cat".into(),
arguments: vec![],
module: "mod1".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "C");
}
#[test]
fn test_module_imports() {
let mut names = Names::new();
assert!(
names
.imported_module("mod".into(), "animals".into(), SrcSpan::new(76, 93))
.is_none()
);
let _ = names
.local_types
.insert(("mod2".into(), "Cat".into()), "Cat".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Cat".into(),
arguments: vec![],
module: "mod".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
let typ1 = Type::Named {
name: "Cat".into(),
arguments: vec![],
module: "mod2".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
assert_eq!(printer.print_type(&type_), "animals.Cat");
assert_eq!(printer.print_type(&typ1), "Cat");
}
#[test]
fn test_multiple_generic_annotations() {
let mut names = Names::new();
names.type_variable_in_scope(0, "one".into());
names.type_variable_in_scope(1, "two".into());
let mut printer = Printer::new(&names);
let type_ = Type::Named {
name: "Tiger".into(),
arguments: vec![
Arc::new(Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id: 0 })),
}),
Arc::new(Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id: 1 })),
}),
],
module: "tigermodule".into(),
publicity: crate::ast::Publicity::Public,
package: "".into(),
inferred_variant: None,
};
let typ1 = Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id: 2 })),
};
assert_eq!(printer.print_type(&type_), "tigermodule.Tiger(one, two)");
assert_eq!(printer.print_type(&typ1), "a");
}
#[test]
fn test_variable_name_already_in_scope() {
let mut names = Names::new();
names.type_variable_in_scope(1, "a".into());
names.type_variable_in_scope(2, "b".into());
let mut printer = Printer::new(&names);
let type_ = |id| Type::Var {
type_: Arc::new(std::cell::RefCell::new(TypeVar::Generic { id })),
};
assert_eq!(printer.print_type(&type_(0)), "c");
assert_eq!(printer.print_type(&type_(1)), "a");
assert_eq!(printer.print_type(&type_(2)), "b");
assert_eq!(printer.print_type(&type_(3)), "d");
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/expression.rs | compiler-core/src/type_/expression.rs | use super::{pipe::PipeTyper, *};
use crate::{
STDLIB_PACKAGE_NAME,
analyse::{Inferred, infer_bit_array_option, name::check_argument_names},
ast::{
Arg, Assert, Assignment, AssignmentKind, BinOp, BitArrayOption, BitArraySegment,
CAPTURE_VARIABLE, CallArg, Clause, ClauseGuard, Constant, FunctionLiteralKind, HasLocation,
ImplicitCallArgOrigin, InvalidExpression, Layer, RECORD_UPDATE_VARIABLE,
RecordBeingUpdated, SrcSpan, Statement, TodoKind, TypeAst, TypedArg, TypedAssert,
TypedAssignment, TypedClause, TypedClauseGuard, TypedConstant, TypedExpr,
TypedMultiPattern, TypedStatement, USE_ASSIGNMENT_VARIABLE, UntypedArg, UntypedAssert,
UntypedAssignment, UntypedClause, UntypedClauseGuard, UntypedConstant,
UntypedConstantBitArraySegment, UntypedExpr, UntypedExprBitArraySegment,
UntypedMultiPattern, UntypedStatement, UntypedUse, UntypedUseAssignment, Use,
UseAssignment,
},
build::Target,
exhaustiveness::{self, CompileCaseResult, CompiledCase, Reachability},
parse::{LiteralFloatValue, PatternPosition},
reference::ReferenceKind,
};
use ecow::eco_format;
use hexpm::version::{LowestVersion, Version};
use im::hashmap;
use itertools::Itertools;
use num_bigint::BigInt;
use vec1::Vec1;
#[derive(Clone, Copy, Debug, Eq, PartialOrd, Ord, PartialEq, Serialize)]
pub struct Implementations {
/// Whether the function has a pure-gleam implementation.
///
/// It's important to notice that, even if all individual targets are
/// supported, it would not be the same as being pure Gleam.
/// Imagine this scenario:
///
/// ```gleam
/// @external(javascript, "wibble", "wobble")
/// @external(erlang, "wibble", "wobble")
/// pub fn func() -> Int
/// ```
///
/// `func` supports all _current_ Gleam targets; however, if a new target
/// is added - say a WASM target - `func` wouldn't support it! On the other
/// hand, a pure Gleam function will support all future targets.
pub gleam: bool,
pub can_run_on_erlang: bool,
pub can_run_on_javascript: bool,
/// Whether the function has an implementation that uses external erlang
/// code.
pub uses_erlang_externals: bool,
/// Whether the function has an implementation that uses external javascript
/// code.
pub uses_javascript_externals: bool,
}
impl Implementations {
pub fn supporting_all() -> Self {
Self {
gleam: true,
can_run_on_erlang: true,
can_run_on_javascript: true,
uses_javascript_externals: false,
uses_erlang_externals: false,
}
}
}
/// The purity of a function.
///
/// This is not actually proper purity tracking, rather an approximation, which
/// is good enough for the purpose it is currently used for: warning for unused
/// pure functions. The current system contains some false negatives, i.e. some
/// cases where it will fail to emit a warning when it probably should.
///
/// If we wanted to properly track function side effects - say to perform
/// optimisations on pure Gleam code - we would probably need to lift that
/// tracking into the type system, the same way that variant inference currently
/// works. This would require quite a lot of work and doesn't seem a worthwhile
/// amount of effort for a single warning message, where a much simpler solution
/// is generally going to be good enough.
///
/// In the future we may want to implement a full side effect tracking system;
/// this current implementation will not be sufficient for anything beyond a
/// warning message to help people out in certain cases.
///
#[derive(Debug, Clone, Copy, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum Purity {
/// The function is in pure Gleam, and does not reference any language
/// feature that can cause side effects, such as `panic`, `assert` or `echo`.
/// It also does not call any impure functions.
Pure,
/// This function is part of the standard library, or an otherwise trusted
/// source, and though it might use FFI, we can trust that the FFI function
/// will not cause any side effects.
TrustedPure,
/// This function is impure because it either uses FFI, panics, uses `echo`,
/// or calls another impure function.
Impure,
/// We don't know the purity of this function. This highlights the main issue
/// with the current purity tracking system. In the following code for example:
///
/// ```gleam
/// let f = function.identity
///
/// f(10)
/// ```
///
/// Since purity is not currently part of the type system, when analysing the
/// call of the local `f` function, we now have no information about the
/// purity of it, and therefore cannot infer the consequences of calling it.
///
/// If there was a `purity` or `side_effects` field in the `Type::Fn` variant,
/// we would be able to properly infer it.
///
Unknown,
}
impl Purity {
pub fn is_pure(&self) -> bool {
match self {
Purity::Pure | Purity::TrustedPure => true,
Purity::Impure | Purity::Unknown => false,
}
}
#[must_use]
pub fn merge(self, other: Purity) -> Purity {
match (self, other) {
// If we call a trusted pure function, the current function remains pure
(Purity::Pure, Purity::TrustedPure) => Purity::Pure,
(Purity::Pure, other) => other,
// If we call a pure function, the current function remains trusted pure
(Purity::TrustedPure, Purity::Pure) => Purity::TrustedPure,
(Purity::TrustedPure, other) => other,
// Nothing can make an already impure function pure again
(Purity::Impure, _) => Purity::Impure,
// If we call an impure function from a function we don't know the
// purity of, we are now certain that it is impure.
(Purity::Unknown, Purity::Impure) => Purity::Impure,
(Purity::Unknown, _) => Purity::Impure,
}
}
}
/// Tracking whether the function being currently type checked has externals
/// implementations or not.
/// This is used to determine whether an error should be raised in the case when
/// a value is used that does not have an implementation for the current target.
#[derive(Clone, Copy, Debug)]
pub struct FunctionDefinition {
/// The function has { ... } after the function head
pub has_body: bool,
/// The function has @external(erlang, "...", "...")
pub has_erlang_external: bool,
/// The function has @external(JavaScript, "...", "...")
pub has_javascript_external: bool,
}
impl FunctionDefinition {
pub fn has_external_for_target(&self, target: Target) -> bool {
match target {
Target::Erlang => self.has_erlang_external,
Target::JavaScript => self.has_javascript_external,
}
}
}
impl Implementations {
/// Given the implementations of a function update those with taking into
/// account the `implementations` of another function (or constant) used
/// inside its body.
pub fn update_from_use(
&mut self,
implementations: &Implementations,
current_function_definition: &FunctionDefinition,
) {
// With this pattern matching we won't forget to deal with new targets
// when those are added :)
let Implementations {
gleam,
uses_erlang_externals: other_uses_erlang_externals,
uses_javascript_externals: other_uses_javascript_externals,
can_run_on_erlang: other_can_run_on_erlang,
can_run_on_javascript: other_can_run_on_javascript,
} = implementations;
let FunctionDefinition {
has_body: _,
has_erlang_external,
has_javascript_external,
} = current_function_definition;
// If a pure-Gleam function uses a function that doesn't have a pure
// Gleam implementation, then it's no longer pure-Gleam.
self.gleam = self.gleam && *gleam;
// A function can run on a target if the code that it uses can run on on
// the same target,
self.can_run_on_erlang = *has_erlang_external
|| (self.can_run_on_erlang && (*gleam || *other_can_run_on_erlang));
self.can_run_on_javascript = *has_javascript_external
|| (self.can_run_on_javascript && (*gleam || *other_can_run_on_javascript));
// If a function uses a function that relies on external code (be it
// javascript or erlang) then it's considered as using external code as
// well.
//
// For example:
// ```gleam
// @external(erlang, "wibble", "wobble")
// pub fn erlang_only_with_pure_gleam_default() -> Int {
// 1 + 1
// }
//
// pub fn main() { erlang_only_with_pure_gleam_default() }
// ```
// Both functions will end up using external erlang code and have the
// following implementations:
// `Implementations { gleam: true, uses_erlang_externals: true, uses_javascript_externals: false}`.
// They have a pure gleam implementation and an erlang specific external
// implementation.
self.uses_erlang_externals = self.uses_erlang_externals || *other_uses_erlang_externals;
self.uses_javascript_externals =
self.uses_javascript_externals || *other_uses_javascript_externals;
}
/// Returns true if the current target is supported by the given
/// implementations.
/// If something has a pure gleam implementation then it supports all
/// targets automatically.
pub fn supports(&self, target: Target) -> bool {
self.gleam
|| match target {
Target::Erlang => self.can_run_on_erlang,
Target::JavaScript => self.can_run_on_javascript,
}
}
}
/// This is used to tell apart regular function calls and `use` expressions:
/// a `use` is still typed as if it were a normal function call but we want to
/// be able to tell the difference in order to provide better error message.
///
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum CallKind {
Function,
Use {
call_location: SrcSpan,
assignments_location: SrcSpan,
last_statement_location: SrcSpan,
},
}
/// This is used to tell apart regular call arguments and the callback that is
/// implicitly passed to a `use` function call.
/// Both are going to be typed as usual but we want to tell them apart in order
/// to report better error messages for `use` expressions.
///
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub enum ArgumentKind {
Regular,
UseCallback {
function_location: SrcSpan,
assignments_location: SrcSpan,
last_statement_location: SrcSpan,
},
}
#[derive(Debug)]
pub(crate) struct ExprTyper<'a, 'b> {
pub(crate) environment: &'a mut Environment<'b>,
/// The minimum Gleam version required to compile the typed expression.
pub minimum_required_version: Version,
// This is set to true if the previous expression that has been typed is
// determined to always panic.
// For example when typing a literal `panic`, this flag will be set to true.
// The same goes, for example, if the branches of a case expression all
// panic.
pub(crate) previous_panics: bool,
// This is used to track if we've already warned for unreachable code.
// After emitting the first unreachable code warning we never emit another
// one to avoid flooding with repetitive warnings.
pub(crate) already_warned_for_unreachable_code: bool,
pub(crate) implementations: Implementations,
pub(crate) purity: Purity,
pub(crate) current_function_definition: FunctionDefinition,
// Type hydrator for creating types from annotations
pub(crate) hydrator: Hydrator,
// Accumulated errors and warnings found while typing the expression
pub(crate) problems: &'a mut Problems,
}
impl<'a, 'b> ExprTyper<'a, 'b> {
pub fn new(
environment: &'a mut Environment<'b>,
definition: FunctionDefinition,
problems: &'a mut Problems,
) -> Self {
let mut hydrator = Hydrator::new();
let implementations = Implementations {
// We start assuming the function is pure Gleam and narrow it down
// if we run into functions/constants that have only external
// implementations for some of the targets.
gleam: definition.has_body,
can_run_on_erlang: definition.has_body || definition.has_erlang_external,
can_run_on_javascript: definition.has_body || definition.has_javascript_external,
uses_erlang_externals: definition.has_erlang_external,
uses_javascript_externals: definition.has_javascript_external,
};
let uses_externals = match environment.target {
Target::Erlang => implementations.uses_erlang_externals,
Target::JavaScript => implementations.uses_javascript_externals,
};
let purity = if is_trusted_pure_module(environment) {
// The standard library uses a lot of FFI, but as we are the maintainers we know that
// it can be trusted to pure pure.
Purity::TrustedPure
} else if uses_externals {
Purity::Impure
} else {
Purity::Pure
};
hydrator.permit_holes(true);
Self {
hydrator,
previous_panics: false,
already_warned_for_unreachable_code: false,
environment,
implementations,
purity,
current_function_definition: definition,
minimum_required_version: Version::new(0, 1, 0),
problems,
}
}
fn in_new_scope<T, E>(
&mut self,
process_scope: impl FnOnce(&mut Self) -> Result<T, E>,
) -> Result<T, E> {
self.scoped(|this| {
let result = process_scope(this);
let was_successful = result.is_ok();
(result, was_successful)
})
}
fn value_in_new_scope<A>(&mut self, process_scope: impl FnOnce(&mut Self) -> A) -> A {
self.scoped(|this| (process_scope(this), true))
}
fn expr_in_new_scope(
&mut self,
process_scope: impl FnOnce(&mut Self) -> TypedExpr,
) -> TypedExpr {
self.scoped(|this| {
let expr = process_scope(this);
let was_successful = !expr.is_invalid();
(expr, was_successful)
})
}
fn scoped<A>(&mut self, process_scope: impl FnOnce(&mut Self) -> (A, bool)) -> A {
// Create new scope
let environment_reset_data = self.environment.open_new_scope();
let hydrator_reset_data = self.hydrator.open_new_scope();
// Process the scope
let (result, was_successful) = process_scope(self);
// Close scope, discarding any scope local state
self.environment
.close_scope(environment_reset_data, was_successful, self.problems);
self.hydrator.close_scope(hydrator_reset_data);
result
}
pub fn type_from_ast(&mut self, ast: &TypeAst) -> Result<Arc<Type>, Error> {
self.hydrator
.type_from_ast(ast, self.environment, self.problems)
}
fn instantiate(&mut self, t: Arc<Type>, ids: &mut im::HashMap<u64, Arc<Type>>) -> Arc<Type> {
self.environment.instantiate(t, ids, &self.hydrator)
}
pub fn new_unbound_var(&mut self) -> Arc<Type> {
self.environment.new_unbound_var()
}
pub fn infer_or_error(&mut self, expr: UntypedExpr) -> Result<TypedExpr, Error> {
if self.previous_panics {
self.warn_for_unreachable_code(expr.location(), PanicPosition::PreviousExpression);
}
match expr {
UntypedExpr::Todo {
location,
message: label,
kind,
..
} => Ok(self.infer_todo(location, kind, label)),
UntypedExpr::Panic {
location, message, ..
} => Ok(self.infer_panic(location, message)),
UntypedExpr::Echo {
location,
keyword_end,
expression,
message,
} => Ok(self.infer_echo(location, keyword_end, expression, message)),
UntypedExpr::Var { location, name, .. } => {
self.infer_var(name, location, ReferenceRegistration::Register)
}
UntypedExpr::Int {
location,
value,
int_value,
..
} => {
if self.environment.target == Target::JavaScript
&& !self.current_function_definition.has_javascript_external
{
check_javascript_int_safety(&int_value, location, self.problems);
}
Ok(self.infer_int(value, int_value, location))
}
UntypedExpr::Block {
statements,
location,
} => Ok(self.infer_block(statements, location)),
UntypedExpr::Tuple {
location, elements, ..
} => Ok(self.infer_tuple(elements, location)),
UntypedExpr::Float {
location,
value,
float_value,
} => {
check_float_safety(float_value, location, self.problems);
Ok(self.infer_float(value, float_value, location))
}
UntypedExpr::String {
location, value, ..
} => Ok(self.infer_string(value, location)),
UntypedExpr::PipeLine { expressions } => Ok(self.infer_pipeline(expressions)),
UntypedExpr::Fn {
location,
kind,
arguments,
body,
return_annotation,
..
} => Ok(self.infer_fn(arguments, &[], body, kind, return_annotation, location)),
UntypedExpr::Case {
location,
subjects,
clauses,
..
} => Ok(self.infer_case(subjects, clauses, location)),
UntypedExpr::List {
location,
elements,
tail,
} => Ok(self.infer_list(elements, tail, location)),
UntypedExpr::Call {
location,
fun,
arguments,
..
} => Ok(self.infer_call(*fun, arguments, location, CallKind::Function)),
UntypedExpr::BinOp {
location,
name,
name_location,
left,
right,
} => Ok(self.infer_binop(name, name_location, *left, *right, location)),
UntypedExpr::FieldAccess {
label_location,
label,
container,
location,
} => Ok(self.infer_field_access(
*container,
location,
label,
label_location,
FieldAccessUsage::Other,
)),
UntypedExpr::TupleIndex {
location,
index,
tuple,
..
} => self.infer_tuple_index(*tuple, index, location),
UntypedExpr::BitArray { location, segments } => {
self.infer_bit_array(segments, location)
}
UntypedExpr::RecordUpdate {
location,
constructor,
record,
arguments,
} => self.infer_record_update(*constructor, record, arguments, location),
UntypedExpr::NegateBool { location, value } => {
Ok(self.infer_negate_bool(location, *value))
}
UntypedExpr::NegateInt { location, value } => {
Ok(self.infer_negate_int(location, *value))
}
}
}
fn infer_pipeline(&mut self, expressions: Vec1<UntypedExpr>) -> TypedExpr {
PipeTyper::infer(self, expressions)
}
fn infer_todo(
&mut self,
location: SrcSpan,
kind: TodoKind,
message: Option<Box<UntypedExpr>>,
) -> TypedExpr {
// Type the todo as whatever it would need to be to type check.
let type_ = self.new_unbound_var();
// Emit a warning that there is a todo in the code.
let warning_location = match kind {
TodoKind::Keyword | TodoKind::IncompleteUse | TodoKind::EmptyBlock => location,
TodoKind::EmptyFunction { function_location } => function_location,
};
self.problems.warning(Warning::Todo {
kind,
location: warning_location,
type_: type_.clone(),
});
self.purity = Purity::Impure;
let message = message.map(|message| Box::new(self.infer_and_unify(*message, string())));
TypedExpr::Todo {
location,
type_,
message,
kind,
}
}
fn infer_panic(&mut self, location: SrcSpan, message: Option<Box<UntypedExpr>>) -> TypedExpr {
let type_ = self.new_unbound_var();
self.purity = Purity::Impure;
let message = message.map(|message| Box::new(self.infer_and_unify(*message, string())));
self.previous_panics = true;
TypedExpr::Panic {
location,
type_,
message,
}
}
fn infer_echo(
&mut self,
location: SrcSpan,
keyword_end: u32,
expression: Option<Box<UntypedExpr>>,
message: Option<Box<UntypedExpr>>,
) -> TypedExpr {
self.environment.echo_found = true;
self.purity = Purity::Impure;
let expression = if let Some(expression) = expression {
let expression = self.infer(*expression);
if self.previous_panics {
self.warn_for_unreachable_code(location, PanicPosition::EchoExpression);
}
expression
} else {
let location = SrcSpan {
start: location.start,
end: keyword_end,
};
self.problems
.error(Error::EchoWithNoFollowingExpression { location });
self.error_expr(location)
};
TypedExpr::Echo {
location,
type_: expression.type_(),
expression: Some(Box::new(expression)),
message: message.map(|message| Box::new(self.infer_and_unify(*message, string()))),
}
}
pub(crate) fn warn_for_unreachable_code(
&mut self,
location: SrcSpan,
panic_position: PanicPosition,
) {
// We don't want to warn twice for unreachable code inside the same
// block, so we have to keep track if we've already emitted a warning of
// this kind.
if !self.already_warned_for_unreachable_code {
self.already_warned_for_unreachable_code = true;
self.problems.warning(Warning::UnreachableCodeAfterPanic {
location,
panic_position,
})
}
}
fn infer_string(&mut self, value: EcoString, location: SrcSpan) -> TypedExpr {
TypedExpr::String {
location,
value,
type_: string(),
}
}
fn infer_int(&mut self, value: EcoString, int_value: BigInt, location: SrcSpan) -> TypedExpr {
TypedExpr::Int {
location,
value,
int_value,
type_: int(),
}
}
fn infer_float(
&mut self,
value: EcoString,
float_value: LiteralFloatValue,
location: SrcSpan,
) -> TypedExpr {
TypedExpr::Float {
location,
value,
float_value,
type_: float(),
}
}
/// Emit a warning if the given expressions should not be discarded.
/// e.g. because it's a literal (why was it made in the first place?)
/// e.g. because it's of the `Result` type (errors should be handled)
fn expression_discarded(&mut self, discarded: &TypedExpr) {
if discarded.is_literal() {
self.problems.warning(Warning::UnusedLiteral {
location: discarded.location(),
});
} else if discarded.type_().is_result() {
self.problems.warning(Warning::ImplicitlyDiscardedResult {
location: discarded.location(),
});
} else if discarded.is_pure_value_constructor() {
self.problems.warning(Warning::UnusedValue {
location: discarded.location(),
})
}
}
pub(crate) fn infer_statements(
&mut self,
untyped: Vec1<UntypedStatement>,
) -> Vec1<TypedStatement> {
let count = untyped.len();
let location = SrcSpan::new(
untyped.first().location().start,
untyped.last().location().end,
);
self.infer_iter_statements(location, count, untyped.into_iter())
}
// Helper to create a new error expr.
fn error_expr(&mut self, location: SrcSpan) -> TypedExpr {
TypedExpr::Invalid {
location,
type_: self.new_unbound_var(),
extra_information: None,
}
}
fn error_expr_with_information(
&mut self,
location: SrcSpan,
extra_information: Option<InvalidExpression>,
) -> TypedExpr {
TypedExpr::Invalid {
location,
type_: self.new_unbound_var(),
extra_information,
}
}
fn infer_iter_statements<StatementsIter: Iterator<Item = UntypedStatement>>(
&mut self,
location: SrcSpan,
count: usize,
mut untyped: StatementsIter,
) -> Vec1<TypedStatement> {
let mut i = 0;
let mut statements: Vec<TypedStatement> = Vec::with_capacity(count);
while let Some(statement) = untyped.next() {
i += 1;
match statement {
Statement::Use(use_) => {
let statement = self.infer_use(use_, location, untyped.collect());
statements.push(statement);
break; // Inferring the use has consumed the rest of the exprs
}
Statement::Expression(expression) => {
let location = expression.location();
let expression = match self.infer_or_error(expression) {
Ok(expression) => expression,
Err(error) => {
self.problems.error(error);
self.error_expr(location)
}
};
// This isn't the final expression in the sequence, so call the
// `expression_discarded` function to see if anything is being
// discarded that we think shouldn't be.
if i < count {
self.expression_discarded(&expression);
}
statements.push(Statement::Expression(expression));
}
Statement::Assignment(assignment) => {
let assignment = self.infer_assignment(*assignment);
statements.push(Statement::Assignment(Box::new(assignment)));
}
Statement::Assert(assert) => {
let assert = self.infer_assert(assert);
statements.push(Statement::Assert(assert));
}
}
}
Vec1::try_from_vec(statements).expect("empty sequence")
}
fn infer_use(
&mut self,
use_: UntypedUse,
sequence_location: SrcSpan,
mut following_expressions: Vec<UntypedStatement>,
) -> TypedStatement {
let use_call_location = use_.call.location();
let mut call = get_use_expression_call(*use_.call);
let assignments = UseAssignments::from_use_expression(use_.assignments);
let assignments_count = assignments.body_assignments.len();
let mut statements = assignments.body_assignments;
if following_expressions.is_empty() {
let todo = Statement::Expression(UntypedExpr::Todo {
location: use_.location,
message: None,
kind: TodoKind::IncompleteUse,
});
statements.push(todo);
} else {
statements.append(&mut following_expressions);
}
let statements = Vec1::try_from_vec(statements).expect("safe: todo added above");
// We need this to report good error messages in case there's a type error
// in use. We consider `use` to be the last statement of a block since
// it consumes everything that comes below it and returns a single value.
let last_statement_location = statements
.iter()
.find_or_last(|statement| statement.is_use())
.expect("safe: iter from non empty vec")
.location();
let first = statements.first().location();
// Collect the following expressions into a function to be passed as a
// callback to the use's call function.
let callback = UntypedExpr::Fn {
arguments: assignments.function_arguments,
location: SrcSpan::new(first.start, sequence_location.end),
end_of_head_byte_index: sequence_location.end,
return_annotation: None,
kind: FunctionLiteralKind::Use {
location: use_.location,
},
body: statements,
};
// Add this new callback function to the arguments to function call
call.arguments.push(CallArg {
label: None,
location: SrcSpan::new(first.start, sequence_location.end),
value: callback,
// This argument is implicitly given by Gleam's use syntax so we
// mark it as such.
implicit: Some(ImplicitCallArgOrigin::Use),
});
let call_location = SrcSpan {
start: use_.location.start,
end: sequence_location.end,
};
// We use `stacker` to prevent overflowing the stack when many `use`
// expressions are chained. See https://github.com/gleam-lang/gleam/issues/4287
let infer_call = || {
self.infer_call(
*call.function,
call.arguments,
call_location,
CallKind::Use {
call_location: use_call_location,
assignments_location: use_.assignments_location,
last_statement_location,
},
)
};
let call = stacker::maybe_grow(64 * 1024, 1024 * 1024, infer_call);
// After typing the call we know that the last argument must be an
// anonymous function and the first assignments in its body are the
// typed assignments on the left hand side of a `use`.
let assignments = extract_typed_use_call_assignments(&call, assignments_count);
Statement::Use(Use {
call: Box::new(call),
location: use_.location,
right_hand_side_location: use_.right_hand_side_location,
assignments_location: use_.assignments_location,
assignments,
})
}
fn infer_negate_bool(&mut self, location: SrcSpan, value: UntypedExpr) -> TypedExpr {
self.infer_multiple_negate_bool(location, 1, location, value)
}
fn infer_multiple_negate_bool(
&mut self,
starting_location: SrcSpan,
negations: usize,
location: SrcSpan,
value: UntypedExpr,
) -> TypedExpr {
// If we're typing a double negation we just keep going increasing the
// number of consecutive negations, inferring the wrapped value.
if let UntypedExpr::NegateBool {
location: inner_location,
value,
} = value
{
return TypedExpr::NegateBool {
location,
value: Box::new(self.infer_multiple_negate_bool(
starting_location,
negations + 1,
inner_location,
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/pipe.rs | compiler-core/src/type_/pipe.rs | use self::expression::CallKind;
use super::*;
use crate::ast::{
FunctionLiteralKind, ImplicitCallArgOrigin, PIPE_VARIABLE, PipelineAssignmentKind, Statement,
TypedPipelineAssignment, UntypedExpr,
};
use vec1::Vec1;
#[derive(Debug)]
pub(crate) struct PipeTyper<'a, 'b, 'c> {
size: usize,
argument_type: Arc<Type>,
argument_location: SrcSpan,
location: SrcSpan,
first_value: TypedPipelineAssignment,
assignments: Vec<(TypedPipelineAssignment, PipelineAssignmentKind)>,
expr_typer: &'a mut ExprTyper<'b, 'c>,
}
impl<'a, 'b, 'c> PipeTyper<'a, 'b, 'c> {
fn new(expr_typer: &'a mut ExprTyper<'b, 'c>, size: usize, first: TypedExpr, end: u32) -> Self {
let first_type = first.type_();
let first_location = first.location();
let first_value = new_pipeline_assignment(expr_typer, first);
Self {
size,
expr_typer,
argument_type: first_type,
argument_location: first_location,
location: SrcSpan {
start: first_location.start,
end,
},
assignments: Vec::with_capacity(size),
first_value,
}
}
pub fn infer(
expr_typer: &'a mut ExprTyper<'b, 'c>,
expressions: Vec1<UntypedExpr>,
) -> TypedExpr {
// The scope is reset as pipelines are rewritten into a series of
// assignments, and we don't want these variables to leak out of the
// pipeline.
let scope = expr_typer.environment.scope.clone();
let result = PipeTyper::run(expr_typer, expressions);
expr_typer.environment.scope = scope;
result
}
fn run(expr_typer: &'a mut ExprTyper<'b, 'c>, expressions: Vec1<UntypedExpr>) -> TypedExpr {
let size = expressions.len();
let end = expressions.last().location().end;
let mut expressions = expressions.into_iter();
let first = expressions.next().expect("Empty pipeline in typer");
let first = expr_typer.infer(first);
Self::new(expr_typer, size, first, end).infer_expressions(expressions)
}
fn infer_expressions(
mut self,
expressions: impl IntoIterator<Item = UntypedExpr>,
) -> TypedExpr {
let (finally, finally_kind) = self.infer_each_expression(expressions);
let assignments = std::mem::take(&mut self.assignments);
TypedExpr::Pipeline {
location: self.location,
first_value: self.first_value,
assignments,
finally: Box::new(finally),
finally_kind,
}
}
fn infer_each_expression(
&mut self,
expressions: impl IntoIterator<Item = UntypedExpr>,
) -> (TypedExpr, PipelineAssignmentKind) {
let mut finally = None;
for (i, call) in expressions.into_iter().enumerate() {
if self.expr_typer.previous_panics {
self.expr_typer
.warn_for_unreachable_code(call.location(), PanicPosition::PreviousExpression);
}
self.warn_if_call_first_argument_is_hole(&call);
let (kind, call) = match call {
func @ UntypedExpr::Fn { location, kind, .. } => {
let (func, arguments, return_type) = self.expr_typer.do_infer_call(
func,
vec![self.untyped_left_hand_value_variable_call_argument()],
location,
CallKind::Function,
);
self.expr_typer.purity =
self.expr_typer.purity.merge(func.called_function_purity());
let kind = match kind {
FunctionLiteralKind::Capture { hole } => {
PipelineAssignmentKind::Hole { hole }
}
FunctionLiteralKind::Anonymous { .. } | FunctionLiteralKind::Use { .. } => {
PipelineAssignmentKind::FunctionCall
}
};
(
kind,
TypedExpr::Call {
location,
arguments,
type_: return_type,
fun: Box::new(func),
},
)
}
// left |> right(..args)
// ^^^^^ This is `fun`
UntypedExpr::Call {
fun,
arguments,
location,
..
} => {
let fun = self.expr_typer.infer(*fun);
match fun.type_().fn_types() {
// Rewrite as right(..args)(left)
Some((fn_arguments, _)) if fn_arguments.len() == arguments.len() => {
// We are calling the return value of another function.
// Without lifting purity tracking into the type system,
// we have no idea whether it's pure or not!
self.expr_typer.purity = self.expr_typer.purity.merge(Purity::Unknown);
(
PipelineAssignmentKind::FunctionCall,
self.infer_apply_to_call_pipe(fun, arguments, location),
)
}
// Rewrite as right(left, ..args)
_ => {
self.expr_typer.purity =
self.expr_typer.purity.merge(fun.called_function_purity());
(
PipelineAssignmentKind::FirstArgument {
second_argument: arguments.first().map(|arg| arg.location),
},
self.infer_insert_pipe(fun, arguments, location),
)
}
}
}
UntypedExpr::Echo {
location,
keyword_end: _,
expression: None,
message,
} => {
self.expr_typer.environment.echo_found = true;
self.expr_typer.purity = Purity::Impure;
// An echo that is not followed by an expression that is
// used as a pipeline's step is just like the identity
// function.
// So it gets the type of the value coming from the previous
// step of the pipeline.
(
PipelineAssignmentKind::Echo,
TypedExpr::Echo {
location,
expression: None,
type_: self.argument_type.clone(),
message: message.map(|message| {
Box::new(self.expr_typer.infer_and_unify(*message, string()))
}),
},
)
}
// right(left)
UntypedExpr::Int { .. }
| UntypedExpr::Float { .. }
| UntypedExpr::String { .. }
| UntypedExpr::Block { .. }
| UntypedExpr::Var { .. }
| UntypedExpr::List { .. }
| UntypedExpr::BinOp { .. }
| UntypedExpr::PipeLine { .. }
| UntypedExpr::Case { .. }
| UntypedExpr::FieldAccess { .. }
| UntypedExpr::Tuple { .. }
| UntypedExpr::TupleIndex { .. }
| UntypedExpr::Todo { .. }
| UntypedExpr::Panic { .. }
| UntypedExpr::Echo { .. }
| UntypedExpr::BitArray { .. }
| UntypedExpr::RecordUpdate { .. }
| UntypedExpr::NegateBool { .. }
| UntypedExpr::NegateInt { .. } => (
PipelineAssignmentKind::FunctionCall,
self.infer_apply_pipe(call),
),
};
if i + 2 == self.size {
finally = Some((call, kind));
} else {
self.push_assignment(call, kind);
}
}
finally.expect("Empty pipeline in typer")
}
/// Create a call argument that can be used to refer to the value on the
/// left hand side of the pipe
fn typed_left_hand_value_variable_call_argument(&self) -> CallArg<TypedExpr> {
CallArg {
label: None,
location: self.argument_location,
value: self.typed_left_hand_value_variable(),
// This argument is given implicitly by the pipe, not explicitly by
// the programmer.
implicit: Some(ImplicitCallArgOrigin::Pipe),
}
}
/// Create a call argument that can be used to refer to the value on the
/// left hand side of the pipe
fn untyped_left_hand_value_variable_call_argument(&self) -> CallArg<UntypedExpr> {
CallArg {
label: None,
location: self.argument_location,
value: self.untyped_left_hand_value_variable(),
// This argument is given implicitly by the pipe, not explicitly by
// the programmer.
implicit: Some(ImplicitCallArgOrigin::Pipe),
}
}
/// Create a variable that can be used to refer to the value on the left
/// hand side of the pipe
fn typed_left_hand_value_variable(&self) -> TypedExpr {
TypedExpr::Var {
location: self.argument_location,
name: PIPE_VARIABLE.into(),
constructor: ValueConstructor::local_variable(
self.argument_location,
VariableOrigin::generated(),
self.argument_type.clone(),
),
}
}
/// Create a variable that can be used to refer to the value on the left
/// hand side of the pipe
fn untyped_left_hand_value_variable(&self) -> UntypedExpr {
UntypedExpr::Var {
location: self.argument_location,
name: PIPE_VARIABLE.into(),
}
}
/// Push an assignment for the value on the left hand side of the pipe
fn push_assignment(&mut self, expression: TypedExpr, kind: PipelineAssignmentKind) {
self.argument_type = expression.type_();
self.argument_location = expression.location();
let assignment = new_pipeline_assignment(self.expr_typer, expression);
self.assignments.push((assignment, kind));
}
/// Attempt to infer a |> b(..c) as b(..c)(a)
fn infer_apply_to_call_pipe(
&mut self,
function: TypedExpr,
arguments: Vec<CallArg<UntypedExpr>>,
location: SrcSpan,
) -> TypedExpr {
let (function, arguments, type_) = self.expr_typer.do_infer_call_with_known_fun(
function,
arguments,
location,
CallKind::Function,
);
let function = TypedExpr::Call {
location,
type_,
arguments,
fun: Box::new(function),
};
let arguments = vec![self.untyped_left_hand_value_variable_call_argument()];
// TODO: use `.with_unify_error_situation(UnifyErrorSituation::PipeTypeMismatch)`
// This will require the typing of the arguments to be lifted up out of
// the function below. If it is not we don't know if the error comes
// from incorrect usage of the pipe or if it originates from the
// argument expressions.
let (function, arguments, type_) = self.expr_typer.do_infer_call_with_known_fun(
function,
arguments,
location,
CallKind::Function,
);
TypedExpr::Call {
location,
type_,
arguments,
fun: Box::new(function),
}
}
/// Attempt to infer a |> b(c) as b(a, c)
fn infer_insert_pipe(
&mut self,
function: TypedExpr,
mut arguments: Vec<CallArg<UntypedExpr>>,
location: SrcSpan,
) -> TypedExpr {
arguments.insert(0, self.untyped_left_hand_value_variable_call_argument());
// TODO: use `.with_unify_error_situation(UnifyErrorSituation::PipeTypeMismatch)`
// This will require the typing of the arguments to be lifted up out of
// the function below. If it is not we don't know if the error comes
// from incorrect usage of the pipe or if it originates from the
// argument expressions.
let (fun, arguments, type_) = self.expr_typer.do_infer_call_with_known_fun(
function,
arguments,
location,
CallKind::Function,
);
TypedExpr::Call {
location,
type_,
arguments,
fun: Box::new(fun),
}
}
/// Attempt to infer a |> b as b(a)
/// b is the `function` argument.
fn infer_apply_pipe(&mut self, function: UntypedExpr) -> TypedExpr {
let function_location = function.location();
let function = Box::new(self.expr_typer.infer(function));
self.expr_typer.purity = self
.expr_typer
.purity
.merge(function.called_function_purity());
let return_type = self.expr_typer.new_unbound_var();
// Ensure that the function accepts one argument of the correct type
let unification_result = unify(
function.type_(),
fn_(vec![self.argument_type.clone()], return_type.clone()),
);
match unification_result {
Ok(_) => (),
Err(error) => {
let error = if self.check_if_pipe_type_mismatch(&error) {
convert_unify_error(error, function.location())
.with_unify_error_situation(UnifyErrorSituation::PipeTypeMismatch)
} else {
convert_unify_error(flip_unify_error(error), function.location())
};
self.expr_typer.problems.error(error);
}
};
TypedExpr::Call {
location: function_location,
type_: return_type,
fun: function,
arguments: vec![self.typed_left_hand_value_variable_call_argument()],
}
}
fn check_if_pipe_type_mismatch(&mut self, error: &UnifyError) -> bool {
let types = match error {
UnifyError::CouldNotUnify {
expected, given, ..
} => (expected.as_ref(), given.as_ref()),
UnifyError::ExtraVarInAlternativePattern { .. }
| UnifyError::MissingVarInAlternativePattern { .. }
| UnifyError::DuplicateVarInPattern { .. }
| UnifyError::RecursiveType => return false,
};
match types {
(Type::Fn { arguments: a, .. }, Type::Fn { arguments: b, .. })
if a.len() == b.len() =>
{
match (a.first(), b.first()) {
(Some(a), Some(b)) => unify(a.clone(), b.clone()).is_err(),
_ => false,
}
}
_ => false,
}
}
fn warn_if_call_first_argument_is_hole(&mut self, call: &UntypedExpr) {
if let UntypedExpr::Fn { kind, body, .. } = &call
&& kind.is_capture()
&& let Statement::Expression(UntypedExpr::Call { arguments, .. }) = body.first()
{
match arguments.as_slice() {
// If the first argument is labelled, we don't warn the user
// as they might be intentionally adding it to provide more
// information about exactly which argument is being piped into.
[first] | [first, ..] if first.is_capture_hole() && first.label.is_none() => self
.expr_typer
.problems
.warning(Warning::RedundantPipeFunctionCapture {
location: first.location,
}),
_ => (),
}
}
}
}
fn new_pipeline_assignment(
expr_typer: &mut ExprTyper<'_, '_>,
expression: TypedExpr,
) -> TypedPipelineAssignment {
let location = expression.location();
// Insert the variable for use in type checking the rest of the pipeline
expr_typer.environment.insert_local_variable(
PIPE_VARIABLE.into(),
location,
VariableOrigin::generated(),
expression.type_(),
);
TypedPipelineAssignment {
location,
name: PIPE_VARIABLE.into(),
value: Box::new(expression),
}
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/hydrator.rs | compiler-core/src/type_/hydrator.rs | use super::*;
use crate::{
analyse::name::check_name_case,
ast::{Layer, TypeAst, TypeAstConstructor, TypeAstFn, TypeAstHole, TypeAstTuple, TypeAstVar},
reference::ReferenceKind,
};
use std::sync::Arc;
use im::hashmap;
/// The Hydrator takes an AST representing a type (i.e. a type annotation
/// for a function argument) and returns a Type for that annotation.
///
/// If a valid Type cannot be constructed it returns an error.
///
/// It keeps track of any type variables created. This is useful for:
///
/// - Determining if a generic type variable should be made into an
/// unbound type variable during type instantiation.
/// - Ensuring that the same type is constructed if the programmer
/// uses the same name for a type variable multiple times.
///
#[derive(Debug)]
pub struct Hydrator {
created_type_variables: im::HashMap<EcoString, CreatedTypeVariable>,
/// A rigid type is a generic type that was specified as being generic in
/// an annotation. As such it should never be instantiated into an unbound
/// variable. This type_id => name map is used for reporting the original
/// annotated name on error.
rigid_type_names: im::HashMap<u64, EcoString>,
permit_new_type_variables: bool,
permit_holes: bool,
}
#[derive(Debug)]
pub struct ScopeResetData {
created_type_variables: im::HashMap<EcoString, CreatedTypeVariable>,
rigid_type_names: im::HashMap<u64, EcoString>,
}
impl Default for Hydrator {
fn default() -> Self {
Self::new()
}
}
impl Hydrator {
pub fn new() -> Self {
Self {
created_type_variables: hashmap![],
rigid_type_names: hashmap![],
permit_new_type_variables: true,
permit_holes: false,
}
}
pub fn named_type_variables(&self) -> im::HashMap<EcoString, CreatedTypeVariable> {
self.created_type_variables.clone()
}
pub fn open_new_scope(&mut self) -> ScopeResetData {
let created_type_variables = self.created_type_variables.clone();
let rigid_type_names = self.rigid_type_names.clone();
ScopeResetData {
created_type_variables,
rigid_type_names,
}
}
pub fn close_scope(&mut self, data: ScopeResetData) {
self.created_type_variables = data.created_type_variables;
self.rigid_type_names = data.rigid_type_names;
}
pub fn disallow_new_type_variables(&mut self) {
self.permit_new_type_variables = false
}
pub fn permit_holes(&mut self, flag: bool) {
self.permit_holes = flag
}
/// A rigid type is a generic type that was specified as being generic in
/// an annotation. As such it should never be instantiated into an unbound
/// variable.
pub fn is_rigid(&self, id: &u64) -> bool {
self.rigid_type_names.contains_key(id)
}
pub fn rigid_names(&self) -> im::HashMap<u64, EcoString> {
self.rigid_type_names.clone()
}
pub fn type_from_option_ast(
&mut self,
ast: &Option<TypeAst>,
environment: &mut Environment<'_>,
problems: &mut Problems,
) -> Result<Arc<Type>, Error> {
match ast {
Some(ast) => self.type_from_ast(ast, environment, problems),
None => Ok(environment.new_unbound_var()),
}
}
/// Construct a Type from an AST Type annotation.
///
pub fn type_from_ast(
&mut self,
ast: &TypeAst,
environment: &mut Environment<'_>,
problems: &mut Problems,
) -> Result<Arc<Type>, Error> {
match ast {
TypeAst::Constructor(TypeAstConstructor {
location,
name_location,
module,
name,
arguments,
start_parentheses,
}) => {
// Hydrate the type argument AST into types
let mut argument_types = Vec::with_capacity(arguments.len());
for argument in arguments {
let type_ = self.type_from_ast(argument, environment, problems)?;
argument_types.push((argument.location(), type_));
}
// Look up the constructor
let TypeConstructor {
parameters,
type_: return_type,
deprecation,
..
} = environment
.get_type_constructor(module, name)
.map_err(|e| {
convert_get_type_constructor_error(
e,
location,
module.as_ref().map(|(_, location)| *location),
)
})?
.clone();
if let Some((type_module, type_name)) = return_type.named_type_name() {
let reference_kind = if module.is_some() {
ReferenceKind::Qualified
} else if name != &type_name {
ReferenceKind::Alias
} else {
ReferenceKind::Unqualified
};
environment.references.register_type_reference(
type_module,
type_name,
name,
*name_location,
reference_kind,
);
} else {
environment
.references
.register_type_reference_in_call_graph(name.clone());
}
match deprecation {
Deprecation::NotDeprecated => {}
Deprecation::Deprecated { message } => {
problems.warning(Warning::DeprecatedItem {
location: *location,
message: message.clone(),
layer: Layer::Type,
})
}
}
// Register the type constructor as being used if it is unqualified.
// We do not track use of qualified type constructors as they may be
// used in another module.
if module.is_none() {
environment.increment_usage(name);
}
// Ensure that the correct number of arguments have been given
// to the constructor.
//
// This is a special case for when a type is being called as a
// type constructor. For example: `Int()` or `Bool(a, b)`
if let Some(start_parentheses) = start_parentheses
&& parameters.is_empty()
{
return Err(Error::TypeUsedAsAConstructor {
location: SrcSpan::new(*start_parentheses, location.end),
name: name.clone(),
});
} else if arguments.len() != parameters.len() {
return Err(Error::IncorrectTypeArity {
location: *location,
name: name.clone(),
expected: parameters.len(),
given: arguments.len(),
});
}
// Instantiate the constructor type for this specific usage
let mut type_vars = hashmap![];
#[allow(clippy::needless_collect)] // Not needless, used for side effects
let parameter_types: Vec<_> = parameters
.into_iter()
.map(|type_| environment.instantiate(type_, &mut type_vars, self))
.collect();
let return_type = environment.instantiate(return_type, &mut type_vars, self);
// Unify argument types with instantiated parameter types so that the correct types
// are inserted into the return type
for (parameter, (location, argument)) in
parameter_types.into_iter().zip(argument_types)
{
unify(parameter, argument).map_err(|e| convert_unify_error(e, location))?;
}
Ok(return_type)
}
TypeAst::Tuple(TypeAstTuple { elements, .. }) => Ok(tuple(
elements
.iter()
.map(|type_| self.type_from_ast(type_, environment, problems))
.try_collect()?,
)),
TypeAst::Fn(TypeAstFn {
arguments, return_, ..
}) => {
let arguments = arguments
.iter()
.map(|type_| self.type_from_ast(type_, environment, problems))
.try_collect()?;
let return_ = self.type_from_ast(return_, environment, problems)?;
Ok(fn_(arguments, return_))
}
TypeAst::Var(TypeAstVar { name, location }) => {
match self.created_type_variables.get_mut(name) {
Some(var) => {
var.usage_count += 1;
Ok(var.type_.clone())
}
None if self.permit_new_type_variables => {
if let Err(error) = check_name_case(*location, name, Named::TypeVariable) {
problems.error(error);
}
let t = environment.new_generic_var();
let _ = self
.rigid_type_names
.insert(environment.previous_uid(), name.clone());
environment
.names
.type_variable_in_scope(environment.previous_uid(), name.clone());
let _ = self.created_type_variables.insert(
name.clone(),
CreatedTypeVariable {
type_: t.clone(),
usage_count: 1,
},
);
Ok(t)
}
None => {
let hint = match environment.scope.contains_key(name) {
true => UnknownTypeHint::ValueInScopeWithSameName,
false => UnknownTypeHint::AlternativeTypes(
environment.module_types.keys().cloned().collect(),
),
};
Err(Error::UnknownType {
name: name.clone(),
location: *location,
hint,
})
}
}
}
TypeAst::Hole(TypeAstHole { .. }) if self.permit_holes => {
Ok(environment.new_unbound_var())
}
TypeAst::Hole(TypeAstHole { location, .. }) => Err(Error::UnexpectedTypeHole {
location: *location,
}),
}
}
pub fn clear_ridgid_type_names(&mut self) {
self.rigid_type_names.clear();
}
/// All the type variables that were created but never used.
pub fn unused_type_variables(&self) -> impl Iterator<Item = &EcoString> {
self.created_type_variables
.iter()
.filter(|(_, var)| var.usage_count == 0)
.map(|(name, _)| name)
}
/// Create a new type variable with the given name.
pub fn add_type_variable(
&mut self,
name: &EcoString,
environment: &mut Environment<'_>,
) -> Result<Arc<Type>, Arc<Type>> {
let t = environment.new_generic_var();
let v = CreatedTypeVariable {
type_: t.clone(),
usage_count: 0,
};
environment
.names
.type_variable_in_scope(environment.previous_uid(), name.clone());
match self.created_type_variables.insert(name.clone(), v) {
Some(_) => Err(t),
None => Ok(t),
}
}
}
#[derive(Debug, Clone)]
pub struct CreatedTypeVariable {
pub type_: Arc<Type>,
pub usage_count: usize,
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/pretty.rs | compiler-core/src/type_/pretty.rs | use super::{Type, TypeVar};
use crate::{
docvec,
pretty::{nil, *},
};
use ecow::EcoString;
use std::sync::Arc;
#[cfg(test)]
use super::*;
#[cfg(test)]
use std::cell::RefCell;
#[cfg(test)]
use pretty_assertions::assert_eq;
const INDENT: isize = 2;
#[derive(Debug, Default)]
pub struct Printer {
names: im::HashMap<u64, EcoString>,
uid: u64,
// A mapping of printd type names to the module that they are defined in.
printed_types: im::HashMap<EcoString, EcoString>,
}
impl Printer {
pub fn new() -> Self {
Default::default()
}
pub fn with_names(&mut self, names: im::HashMap<u64, EcoString>) {
self.names = names;
}
/// Render a Type as a well formatted string.
///
pub fn pretty_print(&mut self, type_: &Type, initial_indent: usize) -> String {
let mut buffer = String::with_capacity(initial_indent);
for _ in 0..initial_indent {
buffer.push(' ');
}
buffer
.to_doc()
.append(self.print(type_))
.nest(initial_indent as isize)
.to_pretty_string(80)
}
// TODO: have this function return a Document that borrows from the Type.
// Is this possible? The lifetime would have to go through the Arc<Refcell<Type>>
// for TypeVar::Link'd types.
pub fn print<'a>(&mut self, type_: &Type) -> Document<'a> {
match type_ {
Type::Named {
name,
arguments,
module,
..
} => {
let doc = if self.name_clashes_if_unqualified(name, module) {
qualify_type_name(module, name)
} else {
let _ = self.printed_types.insert(name.clone(), module.clone());
name.to_doc()
};
if arguments.is_empty() {
doc
} else {
doc.append("(")
.append(self.arguments_to_gleam_doc(arguments))
.append(")")
}
}
Type::Fn { arguments, return_ } => "fn("
.to_doc()
.append(self.arguments_to_gleam_doc(arguments))
.append(") ->")
.append(
break_("", " ")
.append(self.print(return_))
.nest(INDENT)
.group(),
),
Type::Var { type_, .. } => self.type_var_doc(&type_.borrow()),
Type::Tuple { elements, .. } => {
self.arguments_to_gleam_doc(elements).surround("#(", ")")
}
}
}
fn name_clashes_if_unqualified(&mut self, type_: &EcoString, module: &str) -> bool {
match self.printed_types.get(type_) {
None => false,
Some(previous_module) if module == previous_module => false,
Some(_different_module) => true,
}
}
fn type_var_doc<'a>(&mut self, type_: &TypeVar) -> Document<'a> {
match type_ {
TypeVar::Link { type_, .. } => self.print(type_),
TypeVar::Unbound { id, .. } | TypeVar::Generic { id, .. } => self.generic_type_var(*id),
}
}
pub fn generic_type_var<'a>(&mut self, id: u64) -> Document<'a> {
match self.names.get(&id) {
Some(n) => {
let _ = self.printed_types.insert(n.clone(), "".into());
n.to_doc()
}
None => {
let n = self.next_letter();
let _ = self.names.insert(id, n.clone());
let _ = self.printed_types.insert(n.clone(), "".into());
n.to_doc()
}
}
}
fn next_letter(&mut self) -> EcoString {
let alphabet_length = 26;
let char_offset = 97;
let mut chars = vec![];
let mut n;
let mut rest = self.uid;
loop {
n = rest % alphabet_length;
rest /= alphabet_length;
chars.push((n as u8 + char_offset) as char);
if rest == 0 {
break;
}
rest -= 1
}
self.uid += 1;
chars.into_iter().rev().collect()
}
fn arguments_to_gleam_doc(&mut self, arguments: &[Arc<Type>]) -> Document<'static> {
if arguments.is_empty() {
return nil();
}
let arguments = join(
arguments.iter().map(|type_| self.print(type_).group()),
break_(",", ", "),
);
break_("", "")
.append(arguments)
.nest(INDENT)
.append(break_(",", ""))
.group()
}
}
fn qualify_type_name(module: &str, type_name: &str) -> Document<'static> {
docvec![EcoString::from(module), ".", EcoString::from(type_name)]
}
#[test]
fn next_letter_test() {
let mut printer = Printer::new();
assert_eq!(printer.next_letter().as_str(), "a");
assert_eq!(printer.next_letter().as_str(), "b");
assert_eq!(printer.next_letter().as_str(), "c");
assert_eq!(printer.next_letter().as_str(), "d");
assert_eq!(printer.next_letter().as_str(), "e");
assert_eq!(printer.next_letter().as_str(), "f");
assert_eq!(printer.next_letter().as_str(), "g");
assert_eq!(printer.next_letter().as_str(), "h");
assert_eq!(printer.next_letter().as_str(), "i");
assert_eq!(printer.next_letter().as_str(), "j");
assert_eq!(printer.next_letter().as_str(), "k");
assert_eq!(printer.next_letter().as_str(), "l");
assert_eq!(printer.next_letter().as_str(), "m");
assert_eq!(printer.next_letter().as_str(), "n");
assert_eq!(printer.next_letter().as_str(), "o");
assert_eq!(printer.next_letter().as_str(), "p");
assert_eq!(printer.next_letter().as_str(), "q");
assert_eq!(printer.next_letter().as_str(), "r");
assert_eq!(printer.next_letter().as_str(), "s");
assert_eq!(printer.next_letter().as_str(), "t");
assert_eq!(printer.next_letter().as_str(), "u");
assert_eq!(printer.next_letter().as_str(), "v");
assert_eq!(printer.next_letter().as_str(), "w");
assert_eq!(printer.next_letter().as_str(), "x");
assert_eq!(printer.next_letter().as_str(), "y");
assert_eq!(printer.next_letter().as_str(), "z");
assert_eq!(printer.next_letter().as_str(), "aa");
assert_eq!(printer.next_letter().as_str(), "ab");
assert_eq!(printer.next_letter().as_str(), "ac");
assert_eq!(printer.next_letter().as_str(), "ad");
assert_eq!(printer.next_letter().as_str(), "ae");
assert_eq!(printer.next_letter().as_str(), "af");
assert_eq!(printer.next_letter().as_str(), "ag");
assert_eq!(printer.next_letter().as_str(), "ah");
assert_eq!(printer.next_letter().as_str(), "ai");
assert_eq!(printer.next_letter().as_str(), "aj");
assert_eq!(printer.next_letter().as_str(), "ak");
assert_eq!(printer.next_letter().as_str(), "al");
assert_eq!(printer.next_letter().as_str(), "am");
assert_eq!(printer.next_letter().as_str(), "an");
assert_eq!(printer.next_letter().as_str(), "ao");
assert_eq!(printer.next_letter().as_str(), "ap");
assert_eq!(printer.next_letter().as_str(), "aq");
assert_eq!(printer.next_letter().as_str(), "ar");
assert_eq!(printer.next_letter().as_str(), "as");
assert_eq!(printer.next_letter().as_str(), "at");
assert_eq!(printer.next_letter().as_str(), "au");
assert_eq!(printer.next_letter().as_str(), "av");
assert_eq!(printer.next_letter().as_str(), "aw");
assert_eq!(printer.next_letter().as_str(), "ax");
assert_eq!(printer.next_letter().as_str(), "ay");
assert_eq!(printer.next_letter().as_str(), "az");
assert_eq!(printer.next_letter().as_str(), "ba");
assert_eq!(printer.next_letter().as_str(), "bb");
assert_eq!(printer.next_letter().as_str(), "bc");
assert_eq!(printer.next_letter().as_str(), "bd");
assert_eq!(printer.next_letter().as_str(), "be");
assert_eq!(printer.next_letter().as_str(), "bf");
assert_eq!(printer.next_letter().as_str(), "bg");
assert_eq!(printer.next_letter().as_str(), "bh");
assert_eq!(printer.next_letter().as_str(), "bi");
assert_eq!(printer.next_letter().as_str(), "bj");
assert_eq!(printer.next_letter().as_str(), "bk");
assert_eq!(printer.next_letter().as_str(), "bl");
assert_eq!(printer.next_letter().as_str(), "bm");
assert_eq!(printer.next_letter().as_str(), "bn");
assert_eq!(printer.next_letter().as_str(), "bo");
assert_eq!(printer.next_letter().as_str(), "bp");
assert_eq!(printer.next_letter().as_str(), "bq");
assert_eq!(printer.next_letter().as_str(), "br");
assert_eq!(printer.next_letter().as_str(), "bs");
assert_eq!(printer.next_letter().as_str(), "bt");
assert_eq!(printer.next_letter().as_str(), "bu");
assert_eq!(printer.next_letter().as_str(), "bv");
assert_eq!(printer.next_letter().as_str(), "bw");
assert_eq!(printer.next_letter().as_str(), "bx");
assert_eq!(printer.next_letter().as_str(), "by");
assert_eq!(printer.next_letter().as_str(), "bz");
}
#[test]
fn pretty_print_test() {
macro_rules! assert_string {
($src:expr, $type_:expr $(,)?) => {
let mut printer = Printer::new();
assert_eq!($type_.to_string(), printer.pretty_print(&$src, 0),);
};
}
assert_string!(
Type::Named {
module: "whatever".into(),
package: "whatever".into(),
name: "Int".into(),
publicity: Publicity::Public,
arguments: vec![],
inferred_variant: None,
},
"Int",
);
assert_string!(
Type::Named {
module: "themodule".into(),
package: "whatever".into(),
name: "Pair".into(),
publicity: Publicity::Public,
arguments: vec![
Arc::new(Type::Named {
module: "whatever".into(),
package: "whatever".into(),
name: "Int".into(),
publicity: Publicity::Public,
arguments: vec![],
inferred_variant: None,
}),
Arc::new(Type::Named {
module: "whatever".into(),
package: "whatever".into(),
name: "Bool".into(),
publicity: Publicity::Public,
arguments: vec![],
inferred_variant: None,
}),
],
inferred_variant: None,
},
"Pair(Int, Bool)",
);
assert_string!(
Type::Fn {
arguments: vec![
Arc::new(Type::Named {
arguments: vec![],
module: "whatever".into(),
package: "whatever".into(),
name: "Int".into(),
publicity: Publicity::Public,
inferred_variant: None,
}),
Arc::new(Type::Named {
arguments: vec![],
module: "whatever".into(),
package: "whatever".into(),
name: "Bool".into(),
publicity: Publicity::Public,
inferred_variant: None,
}),
],
return_: Arc::new(Type::Named {
arguments: vec![],
module: "whatever".into(),
package: "whatever".into(),
name: "Bool".into(),
publicity: Publicity::Public,
inferred_variant: None,
}),
},
"fn(Int, Bool) -> Bool",
);
assert_string!(
Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Link {
type_: Arc::new(Type::Named {
arguments: vec![],
module: "whatever".into(),
package: "whatever".into(),
name: "Int".into(),
publicity: Publicity::Public,
inferred_variant: None,
}),
})),
},
"Int",
);
assert_string!(
Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Unbound { id: 2231 })),
},
"a",
);
assert_string!(
fn_(
vec![Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Unbound { id: 78 })),
})],
Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Unbound { id: 2 })),
}),
),
"fn(a) -> b",
);
assert_string!(
fn_(
vec![Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Generic { id: 78 })),
})],
Arc::new(Type::Var {
type_: Arc::new(RefCell::new(TypeVar::Generic { id: 2 })),
}),
),
"fn(a) -> b",
);
}
#[test]
fn function_test() {
assert_eq!(pretty_print(fn_(vec![], int())), "fn() -> Int");
assert_eq!(
pretty_print(fn_(vec![int(), int(), int()], int())),
"fn(Int, Int, Int) -> Int"
);
assert_eq!(
pretty_print(fn_(
vec![
float(),
float(),
float(),
float(),
float(),
float(),
float(),
float(),
float(),
float(),
float(),
float(),
float()
],
float()
)),
"fn(
Float,
Float,
Float,
Float,
Float,
Float,
Float,
Float,
Float,
Float,
Float,
Float,
Float,
) -> Float"
);
assert_eq!(
pretty_print(fn_(
vec![
tuple(vec![float(), float(), float(), float(), float(), float()]),
float(),
float(),
float(),
float(),
float(),
float(),
float()
],
float()
)),
"fn(
#(Float, Float, Float, Float, Float, Float),
Float,
Float,
Float,
Float,
Float,
Float,
Float,
) -> Float"
);
assert_eq!(
pretty_print(fn_(
vec![tuple(vec![
float(),
float(),
float(),
float(),
float(),
float()
]),],
tuple(vec![
tuple(vec![float(), float(), float(), float(), float(), float()]),
tuple(vec![float(), float(), float(), float(), float(), float()]),
]),
)),
"fn(#(Float, Float, Float, Float, Float, Float)) ->
#(
#(Float, Float, Float, Float, Float, Float),
#(Float, Float, Float, Float, Float, Float),
)"
);
}
#[cfg(test)]
fn pretty_print(type_: Arc<Type>) -> String {
Printer::new().pretty_print(&type_, 0)
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/accessors.rs | compiler-core/src/type_/tests/accessors.rs | use crate::assert_module_infer;
#[test]
fn bug_3629() {
assert_module_infer!(
("imported", "pub type Wibble"),
r#"
import imported
pub type Exp {
One(field: imported.Wibble)
Two(field: imported.Wibble)
}
pub fn main() {
let exp = One(todo)
exp.field
}
"#,
vec![
("One", "fn(Wibble) -> Exp"),
("Two", "fn(Wibble) -> Exp"),
("main", "fn() -> Wibble")
],
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/errors.rs | compiler-core/src/type_/tests/errors.rs | use crate::{
assert_error, assert_internal_module_error, assert_js_module_error, assert_module_error,
assert_module_syntax_error,
};
#[test]
fn bit_array_invalid_type() {
assert_module_error!(
"fn x() { \"test\" }
fn main() {
let a = <<1:size(x())>>
a
}"
);
}
#[test]
fn bit_arrays2() {
assert_error!("let <<x:utf8>> = <<1>>");
}
#[test]
fn bit_arrays3() {
assert_error!("let <<x:utf16>> = <<1>>");
}
#[test]
fn bit_arrays4() {
assert_error!("let <<x:utf32>> = <<1>>");
}
#[test]
fn bit_array_float() {
assert_error!("case <<1>> { <<a:float>> if a > 1 -> 1 _ -> 2 }");
}
#[test]
fn bit_array_binary() {
assert_error!("case <<1>> { <<a:bytes>> if a > 1 -> 1 _ -> 2 }");
}
#[test]
fn bit_array_guard() {
assert_error!("case <<1>> { <<a:utf16_codepoint>> if a == \"test\" -> 1 _ -> 2 }");
}
#[test]
fn bit_array_segment_nosize() {
assert_error!("case <<1>> { <<_:bytes, _:bytes>> -> 1 }");
}
#[test]
fn bit_array_segment_nosize2() {
assert_error!("case <<1>> { <<_:bits, _:bytes>> -> 1 }");
}
#[test]
fn bit_array_segment_nosize3() {
assert_error!("case <<1>> { <<_:bytes, _:bits>> -> 1 }");
}
#[test]
fn bit_array_segment_conflicting_options_int() {
assert_error!("let x = <<1:int-bytes>> x");
}
#[test]
fn bit_array_segment_conflicting_options_bit_array() {
assert_error!("case <<1>> { <<1:bits-bytes>> -> 1 }");
}
#[test]
fn bit_array_segment_conflicting_signedness1() {
assert_error!("let x = <<1:signed-unsigned>> x");
}
#[test]
fn bit_array_segment_conflicting_signedness2() {
assert_error!("case <<1>> { <<1:unsigned-signed>> -> 1 }");
}
#[test]
fn bit_array_segment_conflicting_endianness1() {
assert_error!("let x = <<1:big-little>> x");
}
#[test]
fn bit_array_segment_conflicting_endianness2() {
assert_error!("case <<1>> { <<1:native-big>> -> 1 }");
}
#[test]
fn bit_array_segment_size() {
assert_error!("let x = <<1:8-size(5)>> x");
}
#[test]
fn bit_array_segment_size2() {
assert_error!("case <<1>> { <<1:size(2)-size(8)>> -> 1 }");
}
#[test]
fn bit_array_segment_unit_unit() {
assert_error!("let x = <<1:unit(2)-unit(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_codepoint_utf8() {
assert_error!("let x = <<1:utf8_codepoint-unit(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_codepoint_utf16() {
assert_error!("let x = <<1:utf16_codepoint-unit(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_codepoint_utf32() {
assert_error!("case <<1>> { <<1:utf32_codepoint-unit(2)>> -> 1 }");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_codepoint_utf8_2() {
assert_error!("let x = <<1:utf8_codepoint-size(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_codepoint_utf16_2() {
assert_error!("let x = <<1:utf16_codepoint-size(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_codepoint_utf32_2() {
assert_error!("case <<1>> { <<1:utf32_codepoint-size(5)>> -> 1 }");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_utf8_2() {
assert_error!("let x = <<1:utf8-unit(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_utf16() {
assert_error!("let x = <<1:utf16-unit(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_unit_utf32() {
assert_error!("case <<1>> { <<1:utf32-unit(2)>> -> 1 }");
}
#[test]
fn bit_array_segment_type_does_not_allow_size_utf8() {
assert_error!("let x = <<1:utf8-size(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_size_utf16() {
assert_error!("let x = <<1:utf16-size(5)>> x");
}
#[test]
fn bit_array_segment_type_does_not_allow_size_utf32() {
assert_error!("case <<1>> { <<1:utf32-size(5)>> -> 1 }");
}
#[test]
fn bit_array_segment_type_does_not_allow_variable_string() {
assert_error!("case <<>> { <<a:utf8>> -> 1 _ -> 2 }");
}
#[test]
fn bit_array_segment_type_does_not_allow_aliased_variable_string() {
assert_error!("case <<>> { <<_ as a:utf8>> -> 1 _ -> 2 }");
}
#[test]
fn bit_array_segment_unit_no_size() {
assert_error!("let x = <<1:unit(5)>> x");
}
#[test]
fn bit_array_size_not_int() {
assert_error!("let x = <<1:size(\"1\")>> x");
}
#[test]
fn bit_array_size_not_int_variable() {
assert_error!("let a = 2.0 case <<1>> { <<1:size(a)>> -> a }");
}
#[test]
fn bit_array_float_size() {
// float given invalid size
assert_error!("let x = <<1:8-float>> x");
}
#[test]
fn bit_array_bits_option_in_value() {
assert_error!("let x = <<<<1:1>>:bytes>> x");
}
#[test]
fn add_int_float() {
assert_error!("1 + 1.0");
}
#[test]
fn add_f_int_float() {
assert_error!("1 +. 1.0");
}
#[test]
fn int_eq_float() {
assert_error!("1 == 1.0");
}
#[test]
fn int_gt_float() {
assert_error!("1 > 1.0");
}
#[test]
fn float_gtf_int() {
assert_error!("1.0 >. 1");
}
#[test]
fn fn0_eq_fn1() {
assert_error!("fn() { 1 } == fn(x) { x + 1 }");
}
#[test]
fn unknown_variable() {
assert_error!("x");
}
#[test]
fn unknown_variable_type() {
assert_error!("Int");
}
#[test]
fn unknown_module() {
assert_module_error!("import xpto");
}
#[test]
fn unknown_variable_2() {
assert_error!("case 1 { x -> 1 1 -> x }");
}
#[test]
fn unknown_variable_3() {
assert_error!("let add = fn(x, y) { x + y } 1 |> add(unknown)");
}
#[test]
fn incorrect_arity_error() {
assert_error!("let id = fn(x) { x } id()");
}
#[test]
fn incorrect_arity_error_2() {
assert_error!("let id = fn(x) { x } id(1, 2)");
}
#[test]
fn case_clause_mismatch() {
assert_error!("case 1 { a -> 1 b -> 2.0 }");
}
#[test]
fn case_subject_pattern_unify() {
assert_error!("case 1.0 { 1 -> 1 }");
}
#[test]
fn case_subject_pattern_unify_2() {
assert_error!("case 1 { 1.0 -> 1 }");
}
#[test]
fn case_operator_unify_situation() {
assert_error!("case 1, 2.0 { a, b -> a + b }");
}
#[test]
fn case_could_not_unify() {
assert_error!("case 1, 2.0 { a, b -> a 1, 2 -> 0 }");
}
#[test]
fn assigned_function_annotation() {
assert_error!("let f = fn(x: Int) { x } f(1.0)");
}
#[test]
fn function_return_annotation() {
assert_error!("fn() -> Int { 2.0 }");
}
#[test]
fn function_arg_and_return_annotation() {
assert_error!("fn(x: Int) -> Float { x }");
}
// https://github.com/gleam-lang/gleam/issues/1378
#[test]
fn function_return_annotation_mismatch_with_pipe() {
assert_module_error!(
"pub fn main() -> String {
1
|> add_two
}
fn add_two(i: Int) -> Int {
i + 2
}"
);
}
#[test]
fn functions_called_outside_module() {
assert_module_syntax_error!("const first = list.at([1], 0)");
}
#[test]
fn pipe_mismatch_error() {
assert_module_error!(
"pub fn main() -> String {
Orange
|> eat_veggie
}
type Fruit{ Orange }
type Veg{ Lettuce }
fn eat_veggie(v: Veg) -> String {
\"Ok\"
}"
);
}
#[test]
fn pipe_value_type_mismatch_error() {
assert_module_error!(
"pub fn main() -> String {
eat_veggie
|> Orange
}
type Fruit{ Orange }
type Veg{ Lettuce }
fn eat_veggie(v: Veg) -> String {
\"Ok\"
}"
);
}
#[test]
fn case_tuple_guard() {
assert_error!("case #(1, 2, 3) { x if x == #(1, 1.0) -> 1 }");
}
#[test]
fn case_list_guard() {
assert_error!("case [1] { x if x == [1, 2.0] -> 1 _ -> 2 }");
}
#[test]
fn case_tuple_guard_2() {
assert_error!("case #(1, 2) { x if x == #(1, 1.0) -> 1 }");
}
#[test]
fn case_int_tuple_guard() {
assert_error!("case 1 { x if x == #() -> 1 }");
}
#[test]
fn wrong_number_of_subjects() {
assert_error!("case 1 { _, _ -> 1 }");
}
#[test]
fn wrong_number_of_subjects_alternative_patterns() {
assert_error!("case 1 { _, _ | _ | _, _, _ -> 1 }");
}
#[test]
fn recursive_var() {
assert_error!("let id = fn(x) { x(x) } 1");
}
#[test]
fn true_fn() {
assert_error!("let True(x) = 1");
}
#[test]
fn ok_2_args() {
assert_error!("let Ok(1, x) = 1");
}
#[test]
fn access_int() {
assert_error!("let x = 1 x.whatever");
}
#[test]
fn tuple_2_3() {
assert_error!("#(1, 2) == #(1, 2, 3)");
}
#[test]
fn tuple_int_float() {
assert_error!("#(1.0, 2, 3) == #(1, 2, 3)");
}
#[test]
fn tuple_int() {
assert_error!("let #(a, b) = 1");
}
#[test]
fn int_float_list() {
assert_error!("[1.0] == [1]");
}
#[test]
fn guard_int_float_eq_vars() {
assert_error!("let x = 1 let y = 1.0 case x { _ if x == y -> 1 }");
}
#[test]
fn guard_float_int_eq_vars() {
assert_error!("let x = 1.0 let y = 1 case x { _ if x == y -> 1 }");
}
#[test]
fn guard_if_float() {
assert_error!("let x = 1.0 case x { _ if x -> 1 }");
}
#[test]
fn case() {
assert_error!("case #(1, 1.0) { #(x, _) | #(_, x) -> 1 }");
}
#[test]
fn case2() {
assert_error!("case [3.33], 1 { x, y if x > y -> 1 }");
}
#[test]
fn case3() {
assert_error!("case 1, 2.22, \"three\" { x, _, y if x > y -> 1 }");
}
#[test]
fn case4() {
assert_error!("case [3.33], 1 { x, y if x >= y -> 1 }");
}
#[test]
fn case5() {
assert_error!("case 1, 2.22, \"three\" { x, _, y if x >= y -> 1 }");
}
#[test]
fn case6() {
assert_error!("case [3.33], 1 { x, y if x < y -> 1 }");
}
#[test]
fn case7() {
assert_error!("case 1, 2.22, \"three\" { x, _, y if x < y -> 1 }");
}
#[test]
fn case8() {
assert_error!("case [3.33], 1 { x, y if x <= y -> 1 }");
}
#[test]
fn case9() {
assert_error!("case 1, 2.22, \"three\" { x, _, y if x <= y -> 1 }");
}
#[test]
fn case10() {
assert_error!("case [3], 1.1 { x, y if x >. y -> 1 }");
}
#[test]
fn case11() {
assert_error!("case 2.22, 1, \"three\" { x, _, y if x >. y -> 1 }");
}
#[test]
fn case12() {
assert_error!("case [3], 1.1 { x, y if x >=. y -> 1 }");
}
#[test]
fn case13() {
assert_error!("case 2.22, 1, \"three\" { x, _, y if x >=. y -> 1 }");
}
#[test]
fn case14() {
assert_error!("case [3], 1.1 { x, y if x <. y -> 1 }");
}
#[test]
fn case15() {
assert_error!("case 2.22, 1, \"three\" { x, _, y if x <. y -> 1 }");
}
#[test]
fn case16() {
assert_error!("case [3], 1.1 { x, y if x <=. y -> 1 }");
}
#[test]
fn case17() {
assert_error!("case 2.22, 1, \"three\" { x, _, y if x <=. y -> 1 }");
}
#[test]
fn case18() {
assert_error!("case 1 { x if x == \"x\" -> 1 }");
}
#[test]
fn case19() {
assert_error!("case [1] { [x] | x -> 1 }");
}
#[test]
fn case20() {
assert_error!("case [1] { [x] | [] as x -> 1 }");
}
#[test]
fn extra_var_inalternative() {
assert_error!("case [1] { [x] | [x, y] -> 1 }");
}
#[test]
fn extra_var_inalternative2() {
assert_error!("case #(1, 2) { #(1, y) | #(x, y) -> 1 }");
}
#[test]
fn extra_var_inalternative3() {
assert_error!("let x = 1 case #(1, 2) { #(1, y) | #(x, y) -> 1 }");
}
#[test]
fn tuple_arity() {
// https://github.com/gleam-lang/gleam/issues/714
assert_error!("case #(1, 2) { #(1, _, _, _) -> 1 }");
}
#[test]
fn duplicate_vars() {
assert_error!("case #(1, 2) { #(x, x) -> 1 }");
}
#[test]
fn duplicate_vars_2() {
assert_error!("case [3.33], 1 { x, x -> 1 }");
}
#[test]
fn duplicate_vars_3() {
assert_error!("case [1, 2, 3] { [x, x, y] -> 1 }");
}
#[test]
fn tuple_index_out_of_bounds() {
assert_error!("#(0, 1).2");
}
#[test]
fn tuple_index_not_a_tuple() {
assert_error!("Nil.2");
}
#[test]
fn tuple_index_not_a_tuple_unbound() {
assert_error!("fn(a) { a.2 }");
}
#[test]
fn unknown_accessed_type() {
assert_error!("fn(a) { a.field }");
}
#[test]
fn unknown_field() {
assert_error!("fn(a: a) { a.field }");
}
#[test]
fn field_not_in_all_variants() {
assert_module_error!(
"
pub type Person {
Teacher(name: String, age: Int, title: String)
Student(name: String, age: Int)
}
pub fn get_title(person: Person) { person.title }"
);
}
#[test]
fn field_not_in_any_variant() {
assert_module_error!(
"
pub type Person {
Teacher(name: String, age: Int, title: String)
Student(name: String, age: Int)
}
pub fn get_height(person: Person) { person.height }"
);
}
#[test]
fn field_type_different_between_variants() {
assert_module_error!(
"
pub type Shape {
Square(x: Int, y: Int)
Rectangle(x: String, y: String)
}
pub fn get_x(shape: Shape) { shape.x }
"
);
}
#[test]
fn accessor_multiple_variants_multiple_positions() {
// We cannot access fields on custom types with multiple variants where they are in different positions e.g. 2nd and 3rd
assert_module_error!(
"
pub type Person {
Teacher(name: String, title: String, age: Int)
Student(name: String, age: Int)
}
pub fn get_name(person: Person) { person.name }
pub fn get_age(person: Person) { person.age }"
);
}
#[test]
fn accessor_multiple_variants_multiple_positions2() {
// We cannot access fields on custom types with multiple variants where they are in different positions e.g. 1st and 3rd
assert_module_error!(
"
pub type Person {
Teacher(title: String, age: Int, name: String)
Student(name: String, age: Int)
}
pub fn get_name(person: Person) { person.name }
pub fn get_age(person: Person) { person.age }"
);
}
#[test]
fn record_access_on_inferred_variant_when_field_is_in_other_variants() {
assert_module_error!(
"
pub type Wibble {
Wibble(wibble: Int)
Wobble(wobble: Int)
}
pub fn main() {
let always_wibble = Wibble(10)
always_wibble.wobble
}
"
);
}
#[test]
fn module_could_not_unify() {
assert_module_error!("fn go() { 1 + 2.0 }");
}
#[test]
fn module_could_not_unify2() {
assert_module_error!("fn go() { 1 + 2.0 }");
}
#[test]
fn module_could_not_unify3() {
assert_module_error!(
"
fn id(x: a, y: a) { x }
pub fn x() { id(1, 1.0) }"
);
}
#[test]
fn module_could_not_unify4() {
assert_module_error!(
"
fn wobble() -> Int {
5
}
fn run(one: fn() -> String) {
one()
}
fn demo() {
run(wobble)
}"
);
}
#[test]
fn module_could_not_unify5() {
assert_module_error!(
"
fn wobble(x: Int) -> Int {
x * 5
}
fn run(one: fn(String) -> Int) {
one(\"one.\")
}
fn demo() {
run(wobble)
}"
);
}
#[test]
fn module_could_not_unify6() {
assert_module_error!("fn main() { let x: String = 5 x }");
}
#[test]
fn module_could_not_unify7() {
assert_module_error!("fn main() { let assert 5 = \"\" }");
}
#[test]
fn module_could_not_unify8() {
assert_module_error!("fn main() { let x: #(x, x) = #(5, 5.0) x }");
}
#[test]
fn module_could_not_unify9() {
assert_module_error!("fn main() { let assert [1, 2, ..x]: List(String) = [1,2,3] x }");
}
#[test]
fn module_could_not_unify10() {
assert_module_error!(
"fn main() {
let #(y, [..x]): #(x, List(x)) = #(\"one\", [1,2,3])
x
}"
);
}
#[test]
fn module_could_not_unify11() {
assert_module_error!(
"
pub type Box(inner) {
Box(inner)
}
pub fn create_int_box(value: Int) {
let x: Box(Float) = Box(value)
x
}"
);
}
#[test]
fn module_could_not_unify12() {
assert_module_error!(
"
pub type Person {
Person(name: String, age: Int)
}
pub fn create_person(age: Float) {
let x: Person = Person(name: \"Quinn\", age: age)
x
}"
);
}
#[test]
fn module_arity_error() {
assert_module_error!("fn go(x: List(a, b)) -> Int { 1 }");
}
#[test]
fn module_private_type_leak_1() {
assert_module_error!(
r#"type PrivateType
@external(erlang, "a", "b")
pub fn leak_type() -> PrivateType
"#
);
}
#[test]
fn module_private_type_leak_2() {
assert_module_error!(
r#"type PrivateType
@external(erlang, "a", "b")
fn go() -> PrivateType
pub fn leak_type() { go() }"#
);
}
#[test]
fn module_private_type_leak_3() {
assert_module_error!(
r#"type PrivateType
@external(erlang, "a", "b")
fn go() -> PrivateType
pub fn leak_type() { [go()] }"#
);
}
#[test]
fn module_private_type_leak_4() {
assert_module_error!(
r#"type PrivateType
@external(erlang, "a", "b")
pub fn go(x: PrivateType) -> Int"#
);
}
#[test]
fn module_private_type_leak_5() {
assert_module_error!(
r#"type PrivateType
pub type LeakType { Variant(PrivateType) }"#
);
}
// https://github.com/gleam-lang/gleam/issues/3387
// Private types should not leak even in internal modules
#[test]
fn module_private_type_leak_6() {
assert_internal_module_error!(
r#"type PrivateType
pub type LeakType { Variant(PrivateType) }"#
);
}
#[test]
fn unexpected_labelled_arg() {
assert_module_error!(r#"fn id(x) { x } fn y() { id(x: 4) }"#);
}
#[test]
fn unexpected_arg_with_label_shorthand() {
assert_module_error!(
r#"
fn id(x) { x }
fn y() {
let x = 4
id(x:)
}
"#
);
}
#[test]
fn positional_argument_after_labelled() {
assert_module_error!(
r#"type X { X(a: Int, b: Int, c: Int) }
fn x() { X(b: 1, a: 1, 1) }"#
);
}
#[test]
fn positional_argument_after_one_using_label_shorthand() {
assert_module_error!(
r#"type X { X(a: Int, b: Int, c: Int) }
fn x() {
let b = 1
let a = 1
X(b:, a:, 1)
}"#
);
}
#[test]
fn unknown_type() {
assert_module_error!(r#"type Thing { Thing(unknown: x) }"#);
}
#[test]
fn unknown_type_in_alias() {
// We cannot refer to unknown types in an alias
assert_module_error!("type IntMap = IllMap(Int, Int)");
}
#[test]
fn unknown_type_in_alias2() {
// We cannot refer to unknown types in an alias
assert_module_error!("type IntMap = Map(Inf, Int)");
}
#[test]
fn unknown_type_var_in_alias2() {
// We cannot use undeclared type vars in a type alias
assert_module_error!("type X = List(a)");
}
#[test]
fn module_non_local_gaurd_var() {
assert_module_error!(
r#"fn one() { 1 }
fn main() { case 1 { _ if one -> 1 } }"#
);
}
#[test]
fn unknown_record_field() {
// An unknown field should report the possible fields' labels
assert_module_error!(
"
pub type Box(a) { Box(inner: a) }
pub fn main(box: Box(Int)) { box.unknown }
"
);
}
#[test]
fn unknown_record_field_2() {
// An unknown field should report the possible fields' labels
assert_module_error!(
"
pub type Box(a) { Box(inner: a) }
pub fn main(box: Box(Box(Int))) { box.inner.unknown }"
);
}
#[test]
fn unnecessary_spread_operator() {
assert_module_error!(
"
type Triple {
Triple(a: Int, b: Int, c: Int)
}
fn main() {
let triple = Triple(1,2,3)
let Triple(a, b, c, ..) = triple
}"
);
}
#[test]
fn duplicate_var_in_record_pattern() {
// Duplicate var in record
assert_module_error!(
r#"type X { X(a: Int, b: Int, c: Int) }
fn x() {
case X(1,2,3) { X(x, y, x) -> 1 }
}"#
);
}
#[test]
fn duplicate_label_shorthands_in_record_pattern() {
// Duplicate var in record
assert_module_error!(
r#"type X { X(a: Int, b: Int, c: Int) }
fn x() {
case X(1,2,3) { X(a:, b:, c: a) -> 1 }
}"#
);
}
#[test]
fn guard_record_wrong_arity() {
// Constructor in guard clause errors
assert_module_error!(
r#"type X { X(a: Int, b: Float) }
fn x() {
case X(1, 2.0) { x if x == X(1) -> 1 _ -> 2 }
}"#
);
}
#[test]
fn subject_int_float_guard_tuple() {
assert_module_error!(
r#"type X { X(a: Int, b: Float) }
fn x() { case X(1, 2.0) { x if x == X(2.0, 1) -> 1 _ -> 2 } }"#
);
}
#[test]
fn type_variables_in_body() {
// Type variables are shared between function annotations and let annotations within their body
assert_module_error!(
"
pub type Box(a) {
Box(value: a)
}
pub fn go(box1: Box(a), box2: Box(b)) {
let _: Box(a) = box2
let _: Box(b) = box1
5
}"
);
}
#[test]
fn duplicate_function_names() {
// We cannot declare two functions with the same name in a module
assert_module_error!(
"fn dupe() { 1 }
fn dupe() { 2 }"
);
}
#[test]
fn duplicate_function_names_2() {
// Different types to force a unify error if we don't detect the
// duplicate during refactoring.
assert_module_error!(
"fn dupe() { 1 }
fn dupe() { 2.0 }"
);
}
#[test]
fn duplicate_function_names_3() {
assert_module_error!(
"fn dupe() { 1 }
fn dupe(x) { x }"
);
}
#[test]
fn duplicate_function_names_4() {
assert_module_error!(
r#"fn dupe() { 1 }
@external(erlang, "a", "b")
fn dupe(x) -> x
"#
);
}
#[test]
fn duplicate_function_names_5() {
assert_module_error!(
r#"
@external(erlang, "a", "b")
fn dupe(x) -> x
fn dupe() { 1 }
"#
);
}
#[test]
fn duplicate_constructors() {
// We cannot declare two type constructors with the same name in a module
assert_module_error!(
"type Box { Box(x: Int) }
type Boxy { Box(Int) }"
);
}
#[test]
fn duplicate_constructors2() {
// We cannot declare two type constructors with the same name in a module
assert_module_error!(
"type Boxy { Box(Int) }
type Box { Box(x: Int) }"
);
}
#[test]
fn duplicate_constructors3() {
// We cannot declare two type constructors with the same name in a module
assert_module_error!("type Boxy { Box(Int) Box(Float) }");
}
#[test]
fn duplicate_alias_names() {
// We cannot reuse an alias name in the same module
assert_module_error!("type X = Int type X = Int");
}
#[test]
fn duplicate_custom_type_names() {
// We cannot declare two types with the same name in a module
assert_module_error!("type DupType { A } type DupType { B }");
}
#[test]
fn duplicate_const_names() {
// We cannot declare two const with the same name in a module
assert_module_error!(
"const duplicate = 1
pub const duplicate = 1"
);
}
#[test]
fn duplicate_const_and_function_names_const_fn() {
// We cannot declare const and functions with the same name in a module
// https://github.com/gleam-lang/gleam/issues/2069
assert_module_error!(
"const duplicate = 1
fn duplicate() { 2 }"
);
}
#[test]
fn duplicate_const_const() {
assert_module_error!(
"const wibble = 1
const wibble = 2"
);
}
#[test]
fn duplicate_fn_fn() {
assert_module_error!(
"fn wibble() { 1 }
fn wibble() { 2 }"
);
}
#[test]
fn duplicate_extfn_extfn() {
assert_module_error!(
r#"
@external(erlang, "module1", "function1")
fn wibble() -> Float
@external(erlang, "module2", "function2")
fn wibble() -> Float
"#
);
}
#[test]
fn duplicate_extfn_fn() {
assert_module_error!(
"
@external(erlang, \"module1\", \"function1\")
fn wibble() -> Float
fn wibble() { 2 }"
);
}
#[test]
fn duplicate_fn_extfn() {
assert_module_error!(
"fn wibble() { 1 }
@external(erlang, \"module2\", \"function2\")
fn wibble() -> Float
"
);
}
#[test]
fn duplicate_const_extfn() {
assert_module_error!(
"const wibble = 1
@external(erlang, \"module2\", \"function2\")
fn wibble() -> Float
"
);
}
#[test]
fn duplicate_extfn_const() {
assert_module_error!(
"
@external(erlang, \"module1\", \"function1\")
fn wibble() -> Float
const wibble = 2"
);
}
#[test]
fn duplicate_const_fn() {
assert_module_error!(
"const wibble = 1
fn wibble() { 2 }"
);
}
#[test]
fn duplicate_fn_const() {
assert_module_error!(
"fn wibble() { 1 }
const wibble = 2"
);
}
#[test]
fn invalid_const_name() {
assert_module_error!("const myInvalid_Constant = 42");
}
#[test]
fn invalid_parameter_name() {
assert_module_error!("fn add(numA: Int, num_b: Int) { numA + num_b }");
}
#[test]
fn invalid_parameter_name2() {
assert_module_error!("fn pass(label paramName: Bool) { paramName }");
}
#[test]
fn invalid_parameter_name3() {
assert_error!("let add = fn(numA: Int, num_b: Int) { numA + num_b }");
}
#[test]
fn invalid_parameter_discard_name() {
assert_module_error!("fn ignore(_ignoreMe: Bool) { 98 }");
}
#[test]
fn invalid_parameter_discard_name2() {
assert_module_error!("fn ignore(labelled_discard _ignoreMe: Bool) { 98 }");
}
#[test]
fn invalid_parameter_discard_name3() {
assert_error!("let ignore = fn(_ignoreMe: Bool) { 98 }");
}
#[test]
fn invalid_parameter_label() {
assert_module_error!("fn func(thisIsALabel param: Int) { param }");
}
#[test]
fn invalid_parameter_label2() {
assert_module_error!("fn ignore(thisIsALabel _ignore: Int) { 25 }");
}
#[test]
fn invalid_constructor_name() {
assert_module_error!("type MyType { Int_Value(Int) }");
}
#[test]
fn invalid_constructor_arg_name() {
assert_module_error!("type IntWrapper { IntWrapper(innerInt: Int) }");
}
#[test]
fn invalid_custom_type_name() {
assert_module_error!("type Boxed_value { Box(Int) }");
}
#[test]
fn invalid_type_alias_name() {
assert_module_error!("type Fancy_Bool = Bool");
}
#[test]
fn invalid_function_name() {
assert_module_error!("fn doStuff() {}");
}
#[test]
fn invalid_variable_name() {
assert_error!("let theAnswer = 42");
}
#[test]
fn invalid_variable_discard_name() {
assert_error!("let _boringNumber = 72");
}
#[test]
fn invalid_use_name() {
assert_module_error!(
"fn use_test(f) { f(Nil) }
pub fn main() { use useVar <- use_test() }"
);
}
#[test]
fn invalid_use_discard_name() {
assert_module_error!(
"fn use_test(f) { f(Nil) }
pub fn main() { use _discardVar <- use_test() }"
);
}
#[test]
fn invalid_pattern_assignment_name() {
assert_error!("let assert 42 as theAnswer = 42");
}
#[test]
fn invalid_list_pattern_name() {
assert_error!("let assert [theElement] = [9.4]");
}
#[test]
fn invalid_list_pattern_discard_name() {
assert_error!("let assert [_elemOne] = [False]");
}
#[test]
fn invalid_constructor_pattern_name() {
assert_module_error!(
"pub type Box { Box(Int) } pub fn main() { let Box(innerValue) = Box(203) }"
);
}
#[test]
fn invalid_constructor_pattern_discard_name() {
assert_module_error!(
"pub type Box { Box(Int) } pub fn main() { let Box(_ignoredInner) = Box(203)}"
);
}
#[test]
fn invalid_tuple_pattern_name() {
assert_error!("let #(a, secondValue) = #(1, 2)");
}
#[test]
fn invalid_tuple_pattern_discard_name() {
assert_error!("let #(a, _secondValue) = #(1, 2)");
}
#[test]
fn invalid_bit_array_pattern_name() {
assert_error!("let assert <<bitValue>> = <<73>>");
}
#[test]
fn invalid_bit_array_pattern_discard_name() {
assert_error!("let assert <<_iDontCare>> = <<97>>");
}
#[test]
fn invalid_string_prefix_pattern_name() {
assert_error!(r#"let assert "prefix" <> coolSuffix = "prefix-suffix""#);
}
#[test]
fn invalid_string_prefix_pattern_discard_name() {
assert_error!(r#"let assert "prefix" <> _boringSuffix = "prefix-suffix""#);
}
#[test]
fn invalid_string_prefix_pattern_alias() {
assert_error!(r#"let assert "prefix" as thePrefix <> _suffix = "prefix-suffix""#);
}
#[test]
fn invalid_case_variable_name() {
assert_error!("case 21 { twentyOne -> {Nil} }");
}
#[test]
fn invalid_case_variable_discard_name() {
assert_error!("case 21 { _twentyOne -> {Nil} }");
}
#[test]
fn invalid_type_parameter_name() {
assert_module_error!("type Wrapper(innerType) {}");
}
#[test]
fn invalid_type_alias_parameter_name() {
assert_module_error!("type GleamOption(okType) = Result(okType, Nil)");
}
#[test]
fn invalid_function_type_parameter_name() {
assert_module_error!("fn identity(value: someType) { value }");
}
#[test]
fn correct_pipe_arity_error_location() {
// https://github.com/gleam-lang/gleam/issues/672
assert_module_error!(
"fn x(x, y) { x }
fn main() { 1 |> x() }"
);
}
#[test]
fn const_annotation_wrong() {
assert_module_error!("pub const group_id: Int = \"42\"");
}
#[test]
fn const_annotation_wrong_2() {
assert_module_error!("pub const numbers: List(Int) = [1, 2, 2.3]");
}
#[test]
fn const_annotation_wrong_3() {
assert_module_error!("pub const numbers: List(Int) = [1.1, 2.2, 3.3]");
}
#[test]
fn const_annotation_wrong_4() {
assert_module_error!("pub const pair: #(Int, Float) = #(4.1, 1)");
}
#[test]
fn const_multiple_errors_mismatched_types() {
assert_module_error!(
"const mismatched_types: String = 7
const invalid_annotation: MyInvalidType = \"str\""
);
}
#[test]
fn const_multiple_errors_invalid_annotation() {
assert_module_error!(
"const invalid_annotation: MyInvalidType = \"str\"
const invalid_value: String = MyInvalidValue"
);
}
#[test]
fn const_multiple_errors_invalid_value() {
assert_module_error!(
"const invalid_value: String = MyInvalidValue
const invalid_unannotated_value = [1, 2.0]"
);
}
#[test]
fn const_multiple_errors_invalid_unannotated_value() {
assert_module_error!(
"const invalid_unannotated_value = [1, 2.0]
const invalid_everything: MyInvalidType = MyInvalidValue"
);
}
#[test]
fn const_multiple_errors_invalid_annotation_and_value() {
assert_module_error!(
"const invalid_everything: MyInvalidType = MyInvalidValue
const mismatched_types: String = 7"
);
}
#[test]
fn const_multiple_errors_are_local_with_annotation() {
assert_module_error!(
"const num: String = 7
const tpl: String = #(Ok(1), MyInvalidType, 3)
const assignment1: String = num
const assignment2: String = tpl"
);
}
#[test]
fn const_multiple_errors_are_local_with_inferred_value() {
assert_module_error!(
"const str: MyInvalidType = \"str\"
const assignment: String = str"
);
}
#[test]
fn const_multiple_errors_are_local_with_unbound_value() {
assert_module_error!(
"const lst = [1, 2.0]
const unbound: MyInvalidType = MyInvalidType
const assignment1: String = lst
const assignment2: String = unbound"
);
}
#[test]
fn const_usage_wrong() {
assert_module_error!(
"const pair = #(1, 2.0)
fn main() { 1 == pair }"
);
}
#[test]
fn const_heterogenus_list() {
assert_module_error!("const pair = [1, 1.0]");
}
#[test]
fn custom_type_module_constants() {
assert_module_error!(
r#"type X { X }
const x = unknown.X"#
);
}
#[test]
fn unknown_label() {
assert_module_error!(
r#"type X { X(a: Int, b: Float) }
fn x() {
let x = X(a: 1, c: 2.0)
x
}"#
);
}
#[test]
fn unknown_label_shorthand() {
assert_module_error!(
r#"type X { X(a: Int, b: Float) }
fn x() {
let c = 2.0
let x = X(a: 1, c:)
x
}"#
);
}
#[test]
fn wrong_type_var() {
// A unification error should show the type var as named by user
// See https://github.com/gleam-lang/gleam/issues/1256
assert_module_error!(
r#"fn wibble(x: String) { x }
fn multi_result(x: some_name) {
wibble(x)
}"#
);
}
#[test]
fn wrong_type_arg() {
assert_module_error!(
r#"
fn wibble(x: List(Int)) { x }
fn main(y: List(something)) {
wibble(y)
}"#
);
}
#[test]
fn wrong_type_ret() {
// See https://github.com/gleam-lang/gleam/pull/1407#issuecomment-1001162876
assert_module_error!(
r#"pub fn main(x: something) -> Int {
let y = x
y
}"#
);
}
#[test]
fn wrong_type_update() {
// A variable of the wrong type given to a record update
assert_module_error!(
"
pub type Person {
Person(name: String, age: Int)
}
pub type Box(a) {
Box(a)
}
pub fn update_person(person: Person, box: Box(a)) {
Person(..box)
}"
);
}
#[test]
fn unknown_variable_update() {
// An undefined variable given to a record update
assert_module_error!(
"
pub type Person {
Person(name: String, age: Int)
}
pub fn update_person() {
Person(..person)
}"
);
}
#[test]
fn unknown_field_update() {
// An unknown field given to a record update
assert_module_error!(
"
pub type Person {
Person(name: String)
}
pub fn update_person(person: Person) {
Person(..person, one: 5)
}"
);
}
#[test]
fn unknown_field_update2() {
// An unknown field given to a record update
assert_module_error!(
"
pub type Person {
Person(name: String, age: Int, size: Int)
}
pub fn update_person(person: Person) {
Person(..person, size: 66, one: 5, age: 3)
}"
);
}
#[test]
fn unknown_constructor_update() {
// An unknown record constructor being used in a record update
assert_module_error!(
"
pub type Person {
Person(name: String, age: Int)
}
pub fn update_person(person: Person) {
NotAPerson(..person)
}"
);
}
#[test]
fn not_a_constructor_update() {
// Something other than a record constructor being used in a record update
assert_module_error!(
"
pub type Person {
Person(name: String, age: Int)
}
pub fn identity(a) { a }
pub fn update_person(person: Person) {
identity(..person)
}"
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | true |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/exhaustiveness.rs | compiler-core/src/type_/tests/exhaustiveness.rs | use crate::{assert_error, assert_module_error, assert_no_warnings, assert_warning};
#[test]
fn whatever() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
_ -> 0
}
}
"
);
}
#[test]
fn nil() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
Nil -> 0
}
}
"
);
}
#[test]
fn bool() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
True -> 1
False -> 0
}
}
"
);
}
#[test]
fn bool_true() {
assert_module_error!(
"
pub fn main(x) {
case x {
True -> 1
}
}
"
);
}
#[test]
fn bool_false() {
assert_module_error!(
"
pub fn main(x) {
case x {
False -> 1
}
}
"
);
}
#[test]
fn result() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
Ok(_) -> 1
Error(_) -> 2
}
}
"
);
}
#[test]
fn result_ok() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(_) -> 1
}
}
"
);
}
#[test]
fn result_error() {
assert_module_error!(
"
pub fn main(x) {
case x {
Error(_) -> 1
}
}
"
);
}
#[test]
fn result_nil() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
Ok(Nil) -> 1
Error(Nil) -> 2
}
}
"
);
}
#[test]
fn result_nil_ok() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(Nil) -> 1
}
}
"
);
}
#[test]
fn result_nil_error() {
assert_module_error!(
"
pub fn main(x) {
case x {
Error(Nil) -> 1
}
}
"
);
}
#[test]
fn result_bool() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
Ok(True) -> 1
Ok(False) -> 3
Error(True) -> 2
Error(False) -> 4
}
}
"
);
}
#[test]
fn result_bool_1() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(False) -> 1
Error(True) -> 2
Error(False) -> 3
}
}
"
);
}
#[test]
fn result_bool_2() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(True) -> 1
Error(True) -> 2
Error(False) -> 3
}
}
"
);
}
#[test]
fn result_bool_3() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(True) -> 1
Ok(False) -> 2
Error(False) -> 3
}
}
"
);
}
#[test]
fn result_bool_4() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(True) -> 1
Ok(False) -> 2
Error(True) -> 3
}
}
"
);
}
#[test]
fn result_bool_5() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(True) -> 1
Ok(False) -> 2
}
}
"
);
}
#[test]
fn result_bool_6() {
assert_module_error!(
"
pub fn main(x) {
case x {
Error(True) -> 1
Error(False) -> 2
}
}
"
);
}
#[test]
fn result_bool_7() {
assert_module_error!(
"
pub fn main(x) {
case x {
Error(True) -> 1
}
}
"
);
}
#[test]
fn result_bool_8() {
assert_module_error!(
"
pub fn main(x) {
case x {
Ok(False) -> 1
}
}
"
);
}
#[test]
fn list() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
[_, ..] -> 1
[] -> 2
}
}
"
);
}
#[test]
fn list_empty() {
assert_module_error!(
"
pub fn main(x) {
case x {
[] -> 1
}
}
"
);
}
#[test]
fn list_non_empty() {
assert_module_error!(
"
pub fn main(x) {
case x {
[_, ..] -> 1
}
}
"
);
}
#[test]
fn list_one() {
assert_module_error!(
"
pub fn main(x) {
case x {
[_] -> 1
}
}
"
);
}
#[test]
fn list_one_two() {
assert_module_error!(
"
pub fn main(x) {
case x {
[_] -> 1
[_, _] -> 1
}
}
"
);
}
#[test]
fn list_zero_one_two() {
assert_module_error!(
"
pub fn main(x) {
case x {
[] -> 1
[_] -> 1
[_, _] -> 1
}
}
"
);
}
#[test]
fn list_zero_one_two_any() {
assert_no_warnings!(
"
pub fn main(x) {
case x {
[] -> 1
[_] -> 1
[_, _] -> 1
[_, _, ..] -> 1
}
}
"
);
}
#[test]
fn list_zero_two_any() {
assert_module_error!(
"
pub fn main(x) {
case x {
[] -> 1
[_, _] -> 1
[_, _, ..] -> 1
}
}
"
);
}
#[test]
fn string() {
assert_no_warnings!(
r#"
pub fn main(x) {
case x {
"" -> 1
"a" -> 1
"b" -> 1
_ -> 1
}
}
"#
);
}
#[test]
fn string_1() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
"" -> 1
}
}
"#
);
}
#[test]
fn string_2() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
"a" -> 1
}
}
"#
);
}
#[test]
fn string_3() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
"a" -> 1
"b" -> 1
}
}
"#
);
}
#[test]
fn bit_array() {
assert_no_warnings!(
r#"
pub fn main(x) {
case x {
<<>> -> 1
<<1>> -> 1
<<2>> -> 1
_ -> 1
}
}
"#
);
}
#[test]
fn bit_array_1() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
<<>> -> 1
<<1>> -> 1
<<2>> -> 1
}
}
"#
);
}
#[test]
fn bit_array_2() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
<<>> -> 1
<<1>> -> 1
}
}
"#
);
}
#[test]
fn int() {
assert_no_warnings!(
r#"
pub fn main(x) {
case x {
0 -> 1
1 -> 1
2 -> 1
_ -> 1
}
}
"#
);
}
#[test]
fn int_1() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
0 -> 1
1 -> 1
2 -> 1
}
}
"#
);
}
#[test]
fn int_2() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
0 -> 1
1 -> 1
}
}
"#
);
}
#[test]
fn float() {
assert_no_warnings!(
r#"
pub fn main(x) {
case x {
0.0 -> 1
1.1 -> 1
2.2 -> 1
_ -> 1
}
}
"#
);
}
#[test]
fn float_1() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
0.0 -> 1
1.1 -> 1
2.2 -> 1
}
}
"#
);
}
#[test]
fn float_2() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
0.0 -> 1
1.1 -> 1
}
}
"#
);
}
#[test]
fn list_bool_1() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
[] -> 1
[True] -> 2
[_, _, ..] -> 2
}
}
"#
);
}
#[test]
fn list_bool_2() {
assert_module_error!(
r#"
pub fn main(x) {
case x {
[] -> 1
[True] -> 2
[_, False] -> 2
[_, _, _, ..] -> 2
}
}
"#
);
}
#[test]
fn discard_all_fields() {
assert_no_warnings!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(..) -> 1
}
}
"#
);
}
#[test]
fn discard_1() {
assert_no_warnings!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(a: True, ..) -> 1
Thing(a: False, ..) -> 1
}
}
"#
);
}
#[test]
fn discard_2() {
assert_module_error!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(a: True, ..) -> 1
}
}
"#
);
}
#[test]
fn discard_3() {
assert_module_error!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(a: False, ..) -> 1
}
}
"#
);
}
#[test]
fn discard_4() {
assert_module_error!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(a: True, ..) -> 1
}
}
"#
);
}
#[test]
fn discard_5() {
assert_module_error!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(a: False, ..) -> 1
}
}
"#
);
}
#[test]
fn discard_6() {
assert_module_error!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(False, ..) -> 1
}
}
"#
);
}
#[test]
fn label_1() {
assert_module_error!(
r#"
pub type Thing {
Thing(a: Bool, b: Bool)
}
pub fn main(x) {
case x {
Thing(a: False, b: True) -> 1
Thing(b: False, a: True) -> 1
}
}
"#
);
}
#[test]
fn guard() {
assert_module_error!(
r#"
pub fn main(x, y) {
case x {
_ if y -> 1
}
}
"#
);
}
#[test]
fn guard_1() {
assert_module_error!(
r#"
pub fn main(x, y) {
case x {
True if y -> 1
False -> 2
}
}
"#
);
}
#[test]
fn custom_1() {
assert_module_error!(
r#"
pub type Type {
One
Two
}
pub fn main(x) {
case x {
One -> 1
}
}
"#
);
}
#[test]
fn custom_2() {
assert_module_error!(
r#"
pub type Type {
One
Two
Three(Type)
}
pub fn main(x) {
case x {
One -> 1
Two -> 2
Three(One) -> 4
}
}
"#
);
}
#[test]
fn redundant_1() {
assert_warning!(
r#"
pub fn main(x) {
case x {
_ -> 1
_ -> 2
}
}
"#
);
}
#[test]
fn redundant_2() {
assert_warning!(
r#"
pub fn main(x) {
case x {
True -> 1
False -> 2
True -> 3
}
}
"#
);
}
//https://github.com/gleam-lang/gleam/issues/2651
#[test]
fn redundant_3() {
assert_warning!(
r#"
pub fn main(x) {
case x {
59 -> "gleew"
14 -> "glabber"
1 -> ""
_ -> "glooper"
2 -> ""
3 -> "glen"
4 -> "glew"
}
}
"#
);
}
#[test]
fn redundant_4() {
assert_warning!(
r#"
pub fn main(x) {
case x {
"P" -> 4
_ -> 3
"geeper!" -> 5
}
}
"#
);
}
#[test]
fn redundant_5() {
assert_warning!(
r#"
pub fn main(x) {
case x {
"P" -> 4
"" -> 65
"P" -> 19
_ -> 3
}
}
"#
);
}
#[test]
fn redundant_int_with_underscores() {
assert_warning!(
r#"
pub fn main(x) {
case x {
10 -> "ten"
1_0 -> "also ten"
_ -> "other"
}
}
"#
);
}
#[test]
fn redundant_int_with_multiple_underscores() {
assert_warning!(
r#"
pub fn main(x) {
case x {
1_000_000 -> "one million"
1000000 -> "also one million"
_ -> "other"
}
}
"#
);
}
#[test]
fn redundant_float_with_different_formatting() {
assert_warning!(
r#"
pub fn main(x) {
case x {
1.0 -> "one"
1.00 -> "also one"
_ -> "other"
}
}
"#
);
}
#[test]
fn redundant_float_with_no_trailing_decimal() {
assert_warning!(
r#"
pub fn main(x) {
case x {
1.0 -> "one"
1. -> "another one"
_ -> "other"
}
}
"#
);
}
#[test]
fn redundant_float_with_underscore() {
assert_warning!(
r#"
pub fn main(x) {
case x {
10.0 -> "ten"
1_0.0 -> "also ten"
_ -> "other"
}
}
"#
);
}
#[test]
fn redundant_float_scientific_notation() {
assert_warning!(
r#"
pub fn main(x) {
case x {
10.0 -> "ten"
1.0e1 -> "also ten"
_ -> "other"
}
}
"#
);
}
#[test]
fn redundant_float_scientific_notation_and_underscore() {
assert_warning!(
r#"
pub fn main(x) {
case x {
1.0e2 -> "one hundred"
1_0_0.0 -> "one hundred again"
_ -> "other"
}
}
"#
);
}
#[test]
fn let_1() {
assert_module_error!(
r#"
pub fn main(x) {
let True = x
0
}
"#
);
}
#[test]
fn tuple_0() {
assert_module_error!(
r#"
pub fn main(x, y) {
case #(x, y) {
#(True, _) -> 1
}
}
"#
);
}
// https://github.com/gleam-lang/gleam/issues/2577
#[test]
fn nested_type_parameter_usage() {
assert_module_error!(
r#"
pub type Returned(a) {
Returned(List(a))
}
fn wibble(user: Returned(#())) -> Int {
let Returned([#()]) = user
1
}
"#
);
}
#[test]
fn empty_case_of_external() {
// This external type has no known constructors, and we want to make sure
// that an empty case expression is not valid for it.
assert_module_error!(
r#"
pub type Thingy
pub fn main(x: Thingy) {
case x {
}
}
"#
);
}
#[test]
fn empty_case_of_generic() {
// This generic type has no known constructors, and we want to make sure
// that an empty case expression is not valid for it.
assert_module_error!(
r#"
pub fn main(x: something) {
case x {
}
}
"#
);
}
#[test]
fn reference_absent_type() {
// This test is here because this code previously caused the compiler
// to crash, and we want to make sure that it doesn't break again
assert_module_error!(
"
type Wibble {
One(Int)
Two(Absent)
}
pub fn main(wibble) {
case wibble {
One(x) -> x
}
}
"
);
}
#[test]
fn case_error_prints_module_names() {
assert_module_error!(
("wibble", "pub type Wibble { Wibble Wobble }"),
"
import wibble
pub type Things { Thing1 Thing2(Int) }
pub fn main(wobble_thing) {
case wobble_thing {
#(wibble.Wibble, Thing1) -> Nil
}
}
",
);
}
#[test]
fn case_error_prints_module_alias() {
assert_module_error!(
("wibble", "pub type Wibble { Wibble Wobble }"),
"
import wibble as wobble
pub fn main(wibble) {
case wibble {
wobble.Wibble -> Nil
}
}
",
);
}
#[test]
fn case_error_prints_unqualified_value() {
assert_module_error!(
("wibble", "pub type Wibble { Wibble Wobble }"),
"
import wibble.{Wibble, Wobble}
pub fn main(wibble) {
case wibble {
Wibble -> Nil
}
}
",
);
}
#[test]
fn case_error_prints_aliased_unqualified_value() {
assert_module_error!(
("wibble", "pub type Wibble { Wibble Wobble }"),
"
import wibble.{Wibble, Wobble as Wubble}
pub fn main(wibble) {
case wibble {
Wibble -> Nil
}
}
",
);
}
#[test]
fn case_error_prints_prelude_module_unqualified() {
assert_module_error!(
"
pub fn main(result: Result(Nil, Nil)) {
case result {
Ok(Nil) -> Nil
}
}
"
);
}
#[test]
fn case_error_prints_prelude_module_when_shadowed() {
assert_module_error!(
"
import gleam
type MyResult { Ok Error }
pub fn main(res: Result(Int, Nil)) {
case res {
gleam.Ok(n) -> Nil
}
}
"
);
}
#[test]
fn case_error_prints_module_when_shadowed() {
assert_module_error!(
("mod", "pub type Wibble { Wibble Wobble }"),
"
import mod.{Wibble}
type Wibble { Wibble Wobble }
pub fn main() {
let wibble = mod.Wibble
case wibble {
mod.Wobble -> Nil
}
}
"
);
}
#[test]
fn case_error_prints_module_when_aliased_and_shadowed() {
assert_module_error!(
("mod", "pub type Wibble { Wibble Wobble }"),
"
import mod.{Wibble as Wobble}
type Wibble { Wobble Wubble }
pub fn main() {
let wibble = mod.Wibble
case wibble {
mod.Wobble -> Nil
}
}
"
);
}
#[test]
fn case_error_prints_unqualifed_when_aliased() {
assert_module_error!(
("mod", "pub type Wibble { Wibble Wobble }"),
"
import mod.{Wibble as Wobble}
type Wibble { Wibble Wubble }
pub fn main() {
let wibble = mod.Wibble
case wibble {
mod.Wobble -> Nil
}
}
"
);
}
// The following few tests all verify that the compiler provides useful errors
// when there are no case arms, instead of just suggesting `_` as it did previously.
#[test]
fn empty_case_of_bool() {
assert_module_error!(
"
pub fn main(b: Bool) {
case b {}
}
"
);
}
#[test]
fn empty_case_of_custom_type() {
assert_module_error!(
"
pub type Wibble { Wibble Wobble Wubble }
pub fn main(wibble: Wibble) {
case wibble {}
}
"
);
}
#[test]
fn empty_case_of_list() {
assert_error!(
"
let list = []
case list {}
"
);
}
#[test]
fn empty_case_of_int() {
assert_error!(
"
let num = 24
case num {}
"
);
}
#[test]
fn empty_case_of_float() {
assert_error!(
"
let age = 10.6
case age {}
"
);
}
#[test]
fn empty_case_of_string() {
assert_error!(
r#"
let name = "John Doe"
case name {}
"#
);
}
#[test]
fn empty_case_of_multi_pattern() {
assert_module_error!(
"
pub fn main(a: Result(a, b), b: Bool) {
case a, b {}
}
"
);
}
#[test]
fn inexhaustive_multi_pattern() {
assert_error!(
"
let a = Ok(1)
let b = True
case a, b {
Error(_), _ -> Nil
}
"
);
}
#[test]
fn inexhaustive_multi_pattern2() {
assert_module_error!(
"
pub fn main(a: Result(Int, Nil), b: Bool) {
case a, b {
Ok(1), True -> Nil
}
}
"
);
}
#[test]
fn inexhaustive_multi_pattern3() {
assert_error!(
"
let a = Ok(1)
let b = True
case a, b {
_, False -> Nil
}
"
);
}
#[test]
fn inexhaustive_multi_pattern4() {
assert_module_error!(
"
pub fn main(c: Bool) {
let a = 12
let b = 3.14
case a, b, c {
1, 2.0, True -> Nil
}
}
"
);
}
#[test]
fn inexhaustive_multi_pattern5() {
assert_module_error!(
"
pub fn main(c: Bool) {
let a = 12
let b = 3.14
case a, b, c {
12, _, False -> Nil
}
}
"
);
}
#[test]
fn inferred_variant() {
assert_no_warnings!(
"
pub type Wibble {
Wibble(Bool)
Wobble(Int)
}
pub fn main() {
let wibble = Wibble(False)
case wibble {
Wibble(True) -> 1
Wibble(False) -> 0
}
}
",
);
}
#[test]
fn inferred_variant2() {
assert_no_warnings!(
"
pub type Wibble {
Wibble
Wobble
}
pub fn main(b: Bool) {
let wibble = Wibble
case wibble, b {
Wibble, True -> True
Wibble, False -> False
}
}
",
);
}
#[test]
fn inferred_variant3() {
assert_no_warnings!(
"
pub type Wibble {
Wibble(Int, Float, Bool)
Wobble(String)
}
pub fn main() {
let wibble = Wibble(1, 3.14, False)
let Wibble(_int, _float, _bool) = wibble
}
",
);
}
#[test]
fn other_variant_unreachable_when_inferred() {
assert_warning!(
"
pub type Wibble {
Wibble
Wobble
}
pub fn main() {
let always_wobble = Wobble
case always_wobble {
Wibble -> panic
Wobble -> Nil
}
}
"
);
}
#[test]
fn other_variant_unreachable_when_inferred2() {
assert_warning!(
"
pub type Wibble {
Wibble
Wobble
Wubble
}
pub fn main() {
let always_wobble = Wobble
case always_wobble {
Wibble | Wubble -> panic
Wobble -> Nil
}
}
"
);
}
#[test]
fn unreachable_string_pattern_after_prefix() {
assert_warning!(
r#"pub fn main() {
let string = ""
case string {
"wib" <> rest -> rest
"wibble" -> "a"
_ -> "b"
}
}"#
);
}
#[test]
fn reachable_string_pattern_after_prefix() {
assert_no_warnings!(
r#"pub fn main() {
let string = ""
case string {
"wib" <> rest if True -> rest
"wibble" -> "a"
_ -> "b"
}
}"#
);
}
#[test]
fn reachable_string_pattern_after_prefix_1() {
assert_no_warnings!(
r#"pub fn main() {
let string = ""
case string {
"wibble" <> rest -> rest
"wib" -> "a"
_ -> "b"
}
}"#
);
}
#[test]
fn unreachable_prefix_pattern_after_prefix() {
assert_warning!(
r#"pub fn main() {
let string = ""
case string {
"wib" <> rest -> rest
"wibble" <> rest -> rest
_ -> "a"
}
}"#
);
}
#[test]
fn reachable_prefix_pattern_after_prefix() {
assert_no_warnings!(
r#"pub fn main() {
let string = ""
case string {
"wib" <> rest if True -> rest
"wibble" <> rest -> rest
_ -> "a"
}
}"#
);
}
#[test]
fn reachable_prefix_pattern_after_prefix_1() {
assert_no_warnings!(
r#"pub fn main() {
let string = ""
case string {
"wibble" <> rest -> rest
"wib" <> rest -> rest
_ -> "a"
}
}"#
);
}
#[test]
fn multiple_unreachable_prefix_patterns() {
assert_warning!(
r#"pub fn main() {
let string = ""
case string {
"wib" <> rest -> rest
"wibble" <> rest -> rest
"wibblest" <> rest -> rest
_ -> "a"
}
}"#
);
}
#[test]
fn multiple_unreachable_prefix_patterns_1() {
assert_warning!(
r#"pub fn main() {
let string = ""
case string {
"wib" <> rest if True -> rest
"wibble" <> rest -> rest
"wibblest" <> rest -> rest
_ -> "a"
}
}"#
);
}
#[test]
fn bit_array_bits_catches_everything() {
assert_warning!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<_:bits>> -> 1
<<1>> -> 2
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_bytes_needs_catch_all() {
assert_module_error!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<_:bytes>> -> 1
}
}"#
);
}
#[test]
fn bit_array_overlapping_patterns_are_redundant() {
assert_warning!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<1, a:size(16)>> -> a
<<1, b:size(8)-unit(2)>> -> b
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_similar_overlapping_patterns_are_not_redundant() {
assert_no_warnings!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<1, a:size(16)>> -> a
<<2, b:size(8)-unit(2)>> -> b
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_overlapping_redundant_patterns_with_variable_size() {
assert_warning!(
r#"pub fn main() {
let bit_array = <<>>
let len = 3
case bit_array {
<<a:size(len), _:size(16)>> -> a
<<_:size(len), b:size(8)-unit(2)>> -> b
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_overlapping_redundant_patterns_with_variable_size_2() {
assert_warning!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<len, _:size(len)-unit(3)>> -> 1
<<len, _:size(len)-unit(2), 1:size(len)>> -> 2
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_overlapping_patterns_with_variable_size_not_redundant() {
assert_no_warnings!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<len, 1:size(len)-unit(3)>> -> 1
<<len, _:size(len)-unit(2), 1:size(len)>> -> 2
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_patterns_with_different_length_with_same_name_are_not_redundant() {
assert_no_warnings!(
r#"pub fn main() {
let bit_array = <<>>
let len = 10
case bit_array {
<<_, _:size(len)-unit(3)>> -> 1
// Down here len is not the same as the len above, so the branch below is
// not redundant!
<<len, _:size(len)-unit(3)>> -> 2
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_patterns_with_different_length_with_same_name_are_not_redundant_1() {
assert_no_warnings!(
r#"pub fn main() {
let bit_array = <<>>
let len = 10
case bit_array {
<<len, _:size(len)-unit(3)>> -> 1
// Down here len is not the same as the len above, so the branch below is
// not redundant!
<<_, _:size(len)-unit(3)>> -> 2
_ -> 2
}
}"#
);
}
#[test]
fn bit_array_patterns_with_different_length_with_same_name_are_not_redundant_2() {
assert_no_warnings!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<_, len, _:size(len)>> -> 1
// Down here len is not the same as the len above, so the branch below is
// not redundant!
<<len, _, _:size(len)>> -> 2
_ -> 2
}
}"#
);
}
#[test]
fn same_catch_all_bytes_are_redundant() {
assert_warning!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<_:bytes>> -> <<>>
<<a:bytes>> -> a
_ -> <<>>
}
}"#
);
}
#[test]
fn different_catch_all_bytes_are_not_redundant() {
assert_no_warnings!(
r#"pub fn main() {
let bit_array = <<>>
case bit_array {
<<_, _:bytes>> -> <<>>
<<_:bytes>> -> <<>>
_ -> <<>>
}
}"#
);
}
// https://github.com/gleam-lang/gleam/issues/2616
#[test]
fn duplicated_alternative_patterns() {
assert_warning!(
"
pub fn main() {
let x = 1
case x {
2 | 2 -> 2
_ -> panic
}
}
"
);
}
// https://github.com/gleam-lang/gleam/issues/2616
#[test]
fn duplicated_pattern_in_alternative() {
assert_warning!(
"
pub fn main() {
let x = 1
case x {
2 -> x
1 | 2 -> x - 4
_ -> panic
}
}
"
);
}
// https://github.com/gleam-lang/gleam/issues/2616
#[test]
fn duplicated_pattern_with_multiple_alternatives() {
assert_warning!(
"
pub fn main() {
let x = 1
case x {
1 -> 1
3 -> 3
5 -> 5
1 | 2 | 3 | 4 | 5 -> x - 1
_ -> panic
}
}
"
);
}
#[test]
fn unreachable_multi_pattern() {
assert_warning!(
"
pub fn main() {
let x = 1
let y = 2
case x, y {
1, 2 -> True
1, 2 -> False
_, _ -> panic
}
}
"
);
}
#[test]
fn unreachable_alternative_multi_pattern() {
assert_warning!(
"
pub fn main() {
let x = 1
let y = 2
case x, y {
1, 2 -> True
3, 4 | 1, 2 -> False
_, _ -> panic
}
}
"
);
}
// https://github.com/gleam-lang/gleam/issues/4586
#[test]
fn compiler_does_not_crash_when_defining_duplicate_alternative_variables() {
assert_error!(
"
case todo {
#(a, b) | #(a, a as b) -> todo
}
"
);
}
// https://github.com/gleam-lang/gleam/issues/4626
#[test]
fn correct_missing_patterns_for_opaque_type() {
assert_module_error!(
(
"mod",
"pub opaque type Wibble { Wibble(Int) Wobble(String) }"
),
"
import mod
pub fn main(w: mod.Wibble) {
case w {}
}
"
);
}
#[test]
fn correct_missing_patterns_for_opaque_type_in_definition_module() {
assert_module_error!(
"
pub opaque type Wibble { Wibble(Int) Wobble(String) }
pub fn main(w: Wibble) {
case w {}
}
"
);
}
#[test]
// https://github.com/gleam-lang/gleam/issues/4278
fn redundant_missing_patterns() {
assert_module_error!(
r#"
fn wibble(b: Bool, i: Int) {
case b, i {
False, 1 -> todo
True, 2 -> todo
}
}
pub fn main() { wibble(False, 1) }"#
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/assert.rs | compiler-core/src/type_/tests/assert.rs | use crate::{assert_error, assert_infer, assert_module_infer, assert_warning};
#[test]
fn bool_value() {
assert_infer!(
"
let value = True
assert value
",
"Nil"
);
}
#[test]
fn equality_check() {
assert_infer!(
"
let value = 10
assert value == 10
",
"Nil"
);
}
#[test]
fn comparison() {
assert_infer!(
"
let value = 4
assert value < 5
",
"Nil"
);
}
#[test]
fn function_call() {
assert_module_infer!(
"
fn bool() {
True
}
pub fn main() {
assert bool()
}
",
vec![("main", "fn() -> Nil")]
);
}
#[test]
fn bool_literal() {
assert_warning!(
"
pub fn main() {
assert True
}
"
);
}
#[test]
fn negation_of_bool_literal() {
assert_warning!(
"
pub fn main() {
assert !False
}
"
);
}
#[test]
fn equality_check_on_literals() {
assert_warning!(
"
pub fn main() {
assert 1 == 2
}
"
);
}
#[test]
fn comparison_on_literals() {
assert_warning!(
"
pub fn main() {
assert 1 < 2
}
"
);
}
#[test]
fn with_message() {
assert_infer!(r#"assert True as "This should never panic""#, "Nil");
}
#[test]
fn compound_message() {
assert_infer!(
r#"assert 1 == 2 as { "one" <> " is never equal to " <> "two" }"#,
"Nil"
);
}
#[test]
fn mismatched_types() {
assert_error!("assert 10");
}
#[test]
fn wrong_message_type() {
assert_error!("assert True as 10");
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/use_.rs | compiler-core/src/type_/tests/use_.rs | use crate::{assert_error, assert_infer, assert_module_error, assert_module_infer, assert_warning};
#[test]
fn arity_1() {
assert_module_infer!(
r#"
pub fn main() {
use <- pair()
123
}
fn pair(f) {
let x = f()
#(x, x)
}
"#,
vec![("main", "fn() -> #(Int, Int)")],
)
}
#[test]
fn arity_2() {
assert_module_infer!(
r#"
pub fn main() {
use <- pair(1.0)
123
}
fn pair(x, f) {
let y = f()
#(x, y)
}
"#,
vec![("main", "fn() -> #(Float, Int)")],
)
}
#[test]
fn arity_3() {
assert_module_infer!(
r#"
pub fn main() {
use <- trip(1.0, "")
123
}
fn trip(x, y, f) {
let z = f()
#(x, y, z)
}
"#,
vec![("main", "fn() -> #(Float, String, Int)")],
)
}
#[test]
fn call_is_variable() {
assert_infer!(
r#"
let call = fn(f) { f() }
use <- call
123
"#,
"Int"
);
}
#[test]
fn call_is_literal() {
assert_infer!(
r#"
use <- fn(f) { f() }
123.0
"#,
"Float"
);
}
#[test]
fn call_is_capture() {
assert_infer!(
r#"
let f = fn(a, b) { a() + b }
use <- f(_, 123)
123
"#,
"Int"
);
}
#[test]
fn invalid_call_is_number() {
assert_error!(
r#"
use <- 123
123
"#
);
}
#[test]
fn wrong_arity() {
assert_error!(
r#"
let f = fn(callback) { callback(1, 2) }
use <- f
123
"#
);
}
#[test]
fn use_with_function_that_doesnt_take_callback_as_last_arg_1() {
assert_error!(
r#"
let f = fn(a) { a + 1 }
use <- f
123
"#
);
}
#[test]
fn use_with_function_that_doesnt_take_callback_as_last_arg_2() {
assert_error!(
r#"
let f = fn() { 1 }
use <- f
123
"#
);
}
#[test]
fn use_with_function_that_doesnt_take_callback_as_last_arg_3() {
assert_error!(
r#"
let f = fn(a, b) { a + b }
use <- f(1)
123
"#
);
}
#[test]
fn wrong_arity_less_than_required() {
assert_error!(
r#"
let f = fn(a, b) { 1 }
use <- f
123
"#
);
}
#[test]
fn wrong_arity_less_than_required_2() {
assert_error!(
r#"
let f = fn(a, b, c) { 1 }
use <- f(1)
123
"#
);
}
#[test]
fn wrong_arity_more_than_required() {
assert_error!(
r#"
let f = fn(a, b) { 1 }
use <- f(1, 2)
123
"#
);
}
#[test]
fn wrong_arity_more_than_required_2() {
assert_error!(
r#"
let f = fn(a, b) { 1 }
use <- f(1, 2, 3)
123
"#
);
}
#[test]
fn no_callback_body() {
assert_warning!(
r#"
pub fn main() {
let thingy = fn(f) { f() }
use <- thingy()
}
"#
);
}
#[test]
fn invalid_callback_type() {
assert_error!(
r#"
let x = fn(f) { f() + 1 }
use <- x()
Nil
"#
);
}
#[test]
fn invalid_callback_type_2() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f() }
use <- x()
let n = 1
n + 2
"#
);
}
#[test]
fn invalid_callback_type_3() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f() }
let y = fn(f) { 1 + f() }
use <- x()
use <- y()
let n = 1
n + 1
"#
);
}
#[test]
fn invalid_callback_type_4() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f() }
let y = fn(f) { 1 + f() }
let z = fn(f) { 1.0 +. f() }
use <- x()
use <- y()
let n = 1
use <- z()
1.0
"#
);
}
#[test]
fn wrong_callback_arity() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f() }
use _ <- x()
"Giacomo!"
"#
);
}
#[test]
fn wrong_callback_arity_2() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f(1) }
use <- x()
"Giacomo!"
"#
);
}
#[test]
fn wrong_callback_arity_3() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f(1) }
use _, _ <- x()
"Giacomo!"
"#
);
}
#[test]
fn wrong_callback_arg() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f(1) }
use n <- x()
n <> "Giacomo!"
"#
);
}
#[test]
fn wrong_callback_arg_with_wrong_annotation() {
assert_error!(
r#"
let x = fn(f) { "Hello, " <> f(1) }
use n: String <- x()
n <> "Giacomo!"
"#
);
}
#[test]
fn wrong_callback_arg_2() {
assert_module_error!(
r#"
pub type Box {
Box(Int)
}
pub fn main() {
let x = fn(f) { "Hello, " <> f(Box(1)) }
use Box("hi") <- x()
"Giacomo!"
}
"#
);
}
#[test]
fn wrong_callback_arg_3() {
assert_module_error!(
r#"
pub type Box {
Box(Int)
}
pub fn main() {
let x = fn(f) { "Hello, " <> f(1) }
use Box(1) <- x()
"Giacomo!"
}
"#
);
}
#[test]
fn discard() {
assert_infer!(
r#"
let x = fn(f) { f(123) }
use _ <- x()
Nil
"#,
"Nil",
);
}
#[test]
fn discard_named() {
assert_infer!(
r#"
let x = fn(f) { f(123) }
use _wibble <- x()
Nil
"#,
"Nil",
);
}
#[test]
fn just_use_in_fn_body() {
assert_warning!(
r#"
pub fn main() {
use <- wibble()
}
fn wibble(f) {
f()
}
"#
);
}
#[test]
fn labels() {
assert_module_infer!(
r#"
pub fn main() {
use x <- apply(arg: 1)
x
}
fn apply(fun fun, arg arg) {
fun(arg)
}
"#,
vec![("main", "fn() -> Int")],
);
}
#[test]
fn patterns() {
assert_module_infer!(
r#"
pub fn main() {
use Box(x) <- apply(Box(1))
x
}
type Box(a) {
Box(a)
}
fn apply(arg, fun) {
fun(arg)
}
"#,
vec![("main", "fn() -> Int")],
);
}
#[test]
fn multiple_patterns() {
assert_module_infer!(
r#"
pub fn main() {
use Box(x), Box(y), Box(z) <- apply(Box(1))
x + y + z
}
type Box(a) {
Box(a)
}
fn apply(arg, fun) {
fun(arg, arg, arg)
}
"#,
vec![("main", "fn() -> Int")],
);
}
#[test]
fn typed_pattern() {
assert_module_infer!(
r#"
pub fn main() {
use Box(x): Box(Int), Box(y), Box(z) <- apply(Box(1))
x + y + z
}
type Box(a) {
Box(a)
}
fn apply(arg, fun) {
fun(arg, arg, arg)
}
"#,
vec![("main", "fn() -> Int")],
);
}
#[test]
fn typed_pattern_wrong_type() {
assert_module_error!(
r#"
pub fn main() {
use Box(x): Box(Bool), Box(y), Box(z) <- apply(Box(1))
x + y + z
}
type Box(a) {
Box(a)
}
fn apply(arg, fun) {
fun(arg, arg, arg)
}
"#
);
}
#[test]
fn multiple_bad_statement_use_fault_tolerance() {
assert_error!(
r#"
let x = fn(f) { f() + 1 }
use <- x()
1 + 2.0
3.0 + 4
5
"#
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/imports.rs | compiler-core/src/type_/tests/imports.rs | use crate::{assert_module_error, assert_module_infer};
// https://github.com/gleam-lang/gleam/issues/1760
#[test]
fn import_value_with_same_name_as_imported_module() {
assert_module_infer!(
("other", "pub const other = 1"),
"
import other.{other}
pub const a = other
",
vec![("a", "Int")],
);
}
#[test]
fn imported_constant_record() {
assert_module_infer!(
("one/two", "pub type Thing { Thing(Int) }"),
"
import one/two
pub const a = two.Thing(1)
",
vec![("a", "Thing")],
);
}
#[test]
fn using_private_constructor() {
assert_module_error!(
("one", "type Two { Two }"),
"import one
pub fn main() {
one.Two
}",
);
}
#[test]
fn using_private_constructor_pattern() {
assert_module_error!(
("one", "type Two { Two }"),
"import one
pub fn main(x) {
let one.Two = x
}",
);
}
#[test]
fn using_opaque_constructor() {
assert_module_error!(
("one", "pub opaque type Two { Two }"),
"import one
pub fn main() {
one.Two
}",
);
}
#[test]
fn using_private_function() {
assert_module_error!(
("one", "fn two() { 2 }"),
"import one
pub fn main() {
one.two
}",
);
}
#[test]
fn using_private_type_alias() {
assert_module_error!(
("one", "type X = Int"),
"import one
pub fn main() {
one.X
}",
);
}
#[test]
fn using_private_unqualified_type_alias() {
assert_module_error!(
("one", "type X = Int"),
"import one.{X}
pub fn main() {
X
}",
);
}
#[test]
fn using_private_external_type() {
assert_module_error!(
("one", "type X"),
"import one
pub fn main() {
one.X
}",
);
}
#[test]
fn using_private_unqualified_external_type() {
assert_module_error!(
("one", "type X"),
"import one.{X}
pub fn main() {
X
}",
);
}
#[test]
fn using_private_custom_type() {
assert_module_error!(
("one", "type X { Y }"),
"import one
pub fn main() {
one.X
}",
);
}
#[test]
fn using_private_unqualified_custom_type() {
assert_module_error!(
("one", "type X { Y }"),
"import one.{X}
pub fn main() {
X
}",
);
}
#[test]
fn unqualified_using_private_constructor() {
assert_module_error!(
("one", "type Two { Two }"),
"import one.{Two}
pub fn main() {
Two
}",
);
}
#[test]
fn unqualified_using_private_constructor_pattern() {
assert_module_error!(
("one", "type Two { Two }"),
"import one.{Two}
pub fn main(x) {
let Two = x
}",
);
}
#[test]
fn unqualified_using_opaque_constructor() {
assert_module_error!(
("one", "pub opaque type Two { Two }"),
"import one.{Two}
pub fn main() {
Two
}",
);
}
#[test]
fn unqualified_using_private_function() {
assert_module_error!(
("one", "fn two() { 2 }"),
"import one.{two}
pub fn main() {
two
}",
);
}
#[test]
fn import_type() {
assert_module_infer!(
("one", "pub type One = Int"),
"import one.{type One}
pub fn main() -> One {
todo
}
",
vec![("main", "fn() -> Int")],
);
}
#[test]
fn import_type_duplicate() {
assert_module_error!(
("one", "pub type One = Int"),
"import one.{One, type One}
pub fn main() -> One {
todo
}
",
);
}
#[test]
fn import_type_duplicate_with_as() {
assert_module_error!(
("one", "pub type One = Int"),
"import one.{type One as MyOne, type One as MyOne}
pub type X = One
",
);
}
#[test]
fn import_type_duplicate_with_as_multiline() {
assert_module_error!(
("one", "pub type One = Int"),
"import one.{
type One as MyOne,
type One as MyOne
}
pub type X = One
",
);
}
// https://github.com/gleam-lang/gleam/issues/2379
#[test]
fn deprecated_type_import_conflict() {
assert_module_infer!(
("one", "pub type X { X }"),
"import one.{X, type X}",
vec![]
);
}
#[test]
fn aliased_unqualified_type_and_value() {
assert_module_infer!(
("one", "pub type X { X }"),
"import one.{X as XX, type X as XX}",
vec![]
);
}
#[test]
fn deprecated_type_import_conflict_two_modules() {
assert_module_infer!(
("one", "pub type X { X }"),
("two", "pub type X { X }"),
"
import one.{type X as Y}
import two.{X}
",
vec![]
);
}
#[test]
fn imported_constructor_instead_of_type() {
assert_module_error!(
("module", "pub type Wibble { Wibble }"),
"import module.{Wibble}
pub fn main(x: Wibble) {
todo
}",
);
}
#[test]
fn import_errors_do_not_block_analysis() {
// An error in an import doesn't stop the rest of the module being analysed
assert_module_error!(
"import unknown_module
pub fn main() {
1 + Nil
}"
);
}
#[test]
fn unqualified_import_errors_do_not_block_later_unqualified() {
assert_module_error!(
"import gleam.{Unknown, type Int as Integer}
pub fn main() -> Integer {
Nil
}"
);
}
#[test]
fn module_alias_used_as_a_name() {
assert_module_error!(
("one/two", ""),
"
import one/two
pub fn main() {
two
}
"
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/dead_code_detection.rs | compiler-core/src/type_/tests/dead_code_detection.rs | use crate::{assert_no_warnings, assert_warning};
#[test]
fn unused_recursive_function() {
assert_warning!(
"
fn unused(value: Int) -> Int {
case value {
0 -> 0
_ -> unused(value - 1)
}
}
"
);
}
#[test]
fn unused_mutually_recursive_functions() {
assert_warning!(
"
fn wibble(value: Int) -> Int {
wobble(value)
}
fn wobble(value: Int) -> Int {
wibble(value)
}
"
);
}
#[test]
fn constant_only_referenced_by_unused_function() {
assert_warning!(
"
const value = 10
fn unused() {
value
}
"
);
}
#[test]
fn constant_only_referenced_by_unused_constant() {
assert_warning!(
"
const value = 10
const value_twice = #(value, value)
"
);
}
#[test]
fn constant_referenced_by_public_constant() {
assert_no_warnings!(
"
const value = 10
pub const value_twice = #(value, value)
"
);
}
#[test]
fn type_variant_only_referenced_by_unused_function() {
assert_warning!(
"
type Wibble {
Wibble
Wobble
}
fn unused() {
Wibble
}
pub fn used() {
let _ = Wobble
Nil
}
"
);
}
#[test]
fn type_marked_as_used_if_variant_used() {
assert_no_warnings!(
"
type PrivateType {
PrivateConstructor
}
pub fn public_function() {
let _constructor = PrivateConstructor
Nil
}
"
);
}
#[test]
fn type_and_variant_unused() {
assert_warning!(
"
type PrivateType {
PrivateConstructor
}
"
);
}
#[test]
fn type_used_by_public_alias() {
assert_no_warnings!(
"
type PrivateType
pub type PublicAlias = PrivateType
"
);
}
#[test]
fn imported_module_only_referenced_by_unused_function() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble
fn unused() {
wibble.Wibble
}
"
);
}
#[test]
fn imported_module_alias_only_referenced_by_unused_function() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble as wobble
fn unused() {
wobble.Wibble
}
"
);
}
#[test]
fn imported_module_alias_only_referenced_by_unused_function_with_unqualified() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble.{type Wibble} as wobble
fn unused() {
wobble.Wibble
}
pub fn main() -> Wibble {
panic
}
"
);
}
#[test]
fn imported_module_used_by_public_function() {
assert_no_warnings!(
(
"thepackage",
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble
pub fn main() {
wibble.Wibble(4)
}
"
);
}
#[test]
fn imported_module_used_in_type() {
assert_no_warnings!(
(
"thepackage",
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble
pub fn main() -> wibble.Wibble {
panic
}
"
);
}
#[test]
fn imported_module_used_by_public_constant() {
assert_no_warnings!(
(
"thepackage",
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble
pub const value = wibble.Wibble(42)
"
);
}
#[test]
fn imported_module_used_in_type_variant() {
assert_no_warnings!(
(
"thepackage",
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble
pub type Wobble {
Wobble(w: wibble.Wibble)
}
"
);
}
#[test]
fn imported_module_used_in_type_alias() {
assert_no_warnings!(
(
"thepackage",
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble
pub type Wobble = wibble.Wibble
"
);
}
#[test]
fn imported_value_only_referenced_by_unused_function() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble(Int)
}
"
),
"
import wibble.{Wibble}
fn unused() {
Wibble
}
"
);
}
#[test]
fn imported_type_only_referenced_by_unused_function() {
assert_warning!(
(
"wibble",
"
pub type Wibble
"
),
"
import wibble.{type Wibble}
fn unused() -> Wibble {
panic
}
"
);
}
#[test]
fn imported_value_used_by_public_function() {
assert_no_warnings!(
("thepackage", "wibble", "pub type Wibble { Wibble }"),
"
import wibble.{Wibble}
pub fn main() {
Wibble
}
"
);
}
#[test]
fn imported_type_used_by_public_function() {
assert_no_warnings!(
("thepackage", "wibble", "pub type Wibble { Wibble }"),
"
import wibble.{type Wibble}
pub fn main() -> Wibble {
wibble.Wibble
}
"
);
}
#[test]
fn imported_type_used_by_public_function_parameter() {
assert_no_warnings!(
("thepackage", "wibble", "pub type Wibble { Wibble }"),
"
import wibble.{type Wibble}
pub fn main(a: Wibble) {
a
}
"
);
}
#[test]
fn unused_type_alias() {
assert_warning!(
"
type Wibble = Int
"
);
}
#[test]
fn private_type_alias_only_referenced_by_unused_function() {
assert_warning!(
"
type Wibble = Int
fn unused() -> Wibble {
10
}
"
);
}
#[test]
fn private_type_alias_underlying_type_referenced_by_public_function() {
assert_warning!(
"
type Wibble = Int
pub fn used() -> Int {
10
}
"
);
}
#[test]
fn private_type_alias_referenced_by_public_function() {
assert_no_warnings!(
"
type Wibble = Int
pub fn used() -> Wibble {
10
}
"
);
}
#[test]
fn shadowed_imported_value_marked_unused() {
assert_warning!(
(
"wibble",
"
pub const wibble = 1
"
),
"
import wibble.{wibble}
pub const wibble = 2
"
);
}
#[test]
fn used_shadowed_imported_value() {
assert_warning!(
(
"thepackage",
"wibble",
"
pub const wibble = 1
"
),
"
import wibble.{wibble}
pub const wibble = wibble
"
);
}
#[test]
fn imported_module_marked_unused_when_shadowed_in_record_access() {
assert_warning!(
(
"wibble",
"
pub const wibble = 1
"
),
"
import wibble
type Wibble {
Wibble(wobble: Int)
}
pub fn main() {
let wibble = Wibble(10)
// This does not reference the `wibble` module!
wibble.wobble
}
"
);
}
#[test]
fn local_variable_marked_unused_when_shadowed_in_module_access() {
assert_warning!(
(
"wibble",
"
pub const wibble = 1
"
),
"
import wibble
pub fn main() {
let wibble = 10
// This does not reference the `wibble` variable!
wibble.wibble
}
"
);
}
#[test]
// https://github.com/gleam-lang/gleam/issues/3552
fn constructor_used_if_type_alias_shadows_it() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble(String)
}
"
),
r#"
import wibble.{Wibble}
type Wibble =
wibble.Wibble
pub fn main() {
Wibble("hello")
}
"#
);
}
#[test]
fn imported_type_and_constructor_with_same_name() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble
}
"
),
"
import wibble.{type Wibble, Wibble}
pub fn main() {
Wibble
}
"
);
}
#[test]
fn imported_type_and_constructor_with_same_name2() {
assert_warning!(
(
"wibble",
"
pub type Wibble {
Wibble
}
"
),
"
import wibble.{type Wibble, Wibble}
pub fn main() -> Wibble {
wibble.Wibble
}
"
);
}
#[test]
fn imported_type_and_constructor_with_same_name3() {
assert_no_warnings!(
(
"thepackage",
"wibble",
"
pub type Wibble {
Wibble
}
"
),
"
import wibble.{type Wibble, Wibble}
pub fn main() -> Wibble {
Wibble
}
"
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/guards.rs | compiler-core/src/type_/tests/guards.rs | use crate::{assert_module_error, assert_module_infer};
#[test]
fn nested_record_access() {
assert_module_infer!(
r#"
pub type A {
A(b: B)
}
pub type B {
B(c: C)
}
pub type C {
C(d: Bool)
}
pub fn a(a: A) {
case a {
_ if a.b.c.d -> 1
_ -> 0
}
}
"#,
vec![
("A", "fn(B) -> A"),
("B", "fn(C) -> B"),
("C", "fn(Bool) -> C"),
("a", "fn(A) -> Int"),
],
);
}
#[test]
fn string_variable_access() {
assert_module_error!(
r#"
pub fn a(a: String) {
case a {
_ if a.b -> 1
_ -> 0
}
}
"#
);
}
#[test]
fn qualified_record() {
assert_module_infer!(
("wibble", "pub type Wibble { Wibble Wobble }"),
"
import wibble
pub fn main(wibble: wibble.Wibble) {
case wibble {
w if w == wibble.Wobble -> True
_ -> False
}
}
",
vec![("main", "fn(Wibble) -> Bool")]
);
}
#[test]
fn qualified_record_with_arguments() {
assert_module_infer!(
(
"wibble",
"pub type Wibble { Wibble(Int) Wobble(Int, Float) }"
),
"
import wibble
pub fn main(wibble: wibble.Wibble) {
case wibble {
w if w == wibble.Wobble(1, 3.8) -> True
_ -> False
}
}
",
vec![("main", "fn(Wibble) -> Bool")]
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/let_assert.rs | compiler-core/src/type_/tests/let_assert.rs | use crate::{assert_error, assert_infer};
#[test]
fn empty_list() {
assert_infer!("let assert [] = [] 1", "Int");
}
#[test]
fn list_one() {
assert_infer!("let assert [a] = [1] a", "Int");
}
#[test]
fn list_two() {
assert_infer!("let assert [a, 2] = [1] a", "Int");
}
#[test]
fn list_spread() {
assert_infer!("let assert [a, ..] = [1] a", "Int");
}
#[test]
fn list_spread_discard() {
assert_infer!("let assert [a, .._] = [1] a", "Int");
}
#[test]
fn list_spread_discard_comma_after() {
assert_infer!("let assert [a, .._,] = [1] a", "Int");
}
#[test]
fn in_fn() {
assert_infer!("fn(x) { let assert [a] = x a }", "fn(List(a)) -> a");
}
#[test]
fn in_fn_list_int() {
assert_infer!("fn(x) { let assert [a] = x a + 1 }", "fn(List(Int)) -> Int");
}
#[test]
fn discard_named() {
assert_infer!("let assert _x = 1 2.0", "Float");
}
#[test]
fn discard() {
assert_infer!("let assert _ = 1 2.0", "Float");
}
#[test]
fn tuple() {
assert_infer!("let assert #(tag, x) = #(1.0, 1) x", "Int");
}
#[test]
fn tuple_in_fn() {
assert_infer!("fn(x) { let assert #(a, b) = x a }", "fn(#(a, b)) -> a");
}
#[test]
fn annotation() {
assert_infer!("let assert 5: Int = 5 5", "Int");
}
#[test]
fn new_syntax() {
assert_infer!("let assert Ok(x) = Error(1)", "Result(a, Int)");
}
#[test]
fn expression() {
assert_infer!("let assert x = 1", "Int");
}
#[test]
fn expression1() {
assert_infer!("let assert x = { let assert x = 1 }", "Int");
}
#[test]
fn expression2() {
assert_infer!("let assert x = { let assert x = 1. }", "Float");
}
#[test]
fn expression3() {
assert_infer!("let assert 1 = 1", "Int");
}
#[test]
fn message() {
assert_infer!(
r#"
let assert Ok(inner) = Ok(10) as "This clearly never fails"
inner
"#,
"Int"
);
}
#[test]
fn non_string_message() {
assert_error!("let assert 1 = 2 as 3");
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/externals.rs | compiler-core/src/type_/tests/externals.rs | use crate::{
assert_js_module_error, assert_js_module_infer, assert_module_error, assert_module_infer,
};
// https://github.com/gleam-lang/gleam/issues/2324
#[test]
fn javascript_only_function_used_by_erlang_module() {
let module = r#"@external(javascript, "one", "two")
fn js_only() -> Int
pub fn main() {
js_only()
}
"#;
assert_module_error!(module);
assert_js_module_infer!(module, vec![("main", "fn() -> Int")]);
}
#[test]
fn erlang_only_function_used_by_javascript_module() {
let module = r#"@external(erlang, "one", "two")
fn erlang_only() -> Int
pub fn main() {
erlang_only()
}
"#;
assert_js_module_error!(module);
assert_module_infer!(module, vec![("main", "fn() -> Int")]);
}
#[test]
fn unused_javascript_only_function_is_not_rejected_on_erlang_target() {
assert_module_infer!(
r#"@external(javascript, "one", "two")
fn js_only() -> Int
pub fn main() {
10
}
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
fn unused_erlang_only_function_is_not_rejected_on_javascript_target() {
assert_js_module_infer!(
r#"@external(erlang, "one", "two")
fn erlang_only() -> Int
pub fn main() {
10
}
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
fn erlang_only_function_with_javascript_external() {
let module = r#"
@external(erlang, "one", "two")
fn erlang_only() -> Int
@external(javascript, "one", "two")
fn all_targets() -> Int {
erlang_only()
}
pub fn main() {
all_targets()
}
"#;
let expected = vec![("main", "fn() -> Int")];
assert_module_infer!(module, expected.clone());
assert_js_module_infer!(module, expected);
}
#[test]
fn javascript_only_function_with_erlang_external() {
let module = r#"
@external(javascript, "one", "two")
fn javascript_only() -> Int
@external(erlang, "one", "two")
fn all_targets() -> Int {
javascript_only()
}
pub fn main() {
all_targets()
}
"#;
let expected = vec![("main", "fn() -> Int")];
assert_module_infer!(module, expected.clone());
assert_js_module_infer!(module, expected);
}
#[test]
fn javascript_only_function_with_javascript_external() {
let module = r#"@external(javascript, "one", "two")
fn javascript_only() -> Int
@external(javascript, "one", "two")
pub fn uh_oh() -> Int {
javascript_only()
}
"#;
assert_js_module_infer!(module, vec![("uh_oh", "fn() -> Int")]);
assert_module_error!(module);
}
#[test]
fn erlang_only_function_with_erlang_external() {
let module = r#"@external(erlang, "one", "two")
fn erlang_only() -> Int
@external(erlang, "one", "two")
pub fn uh_oh() -> Int {
erlang_only()
}
"#;
assert_js_module_error!(module);
assert_module_infer!(module, vec![("uh_oh", "fn() -> Int")]);
}
#[test]
fn erlang_targeted_function_cant_contain_javascript_only_function() {
let module = r#"@target(erlang)
pub fn erlang_only() -> Int {
javascript_only()
}
@external(javascript, "one", "two")
fn javascript_only() -> Int
"#;
assert_js_module_infer!(module, vec![]);
assert_module_error!(module);
}
#[test]
fn javascript_targeted_function_cant_contain_erlang_only_function() {
let module = r#"@target(javascript)
pub fn javascript_only() -> Int {
erlang_only()
}
@external(erlang, "one", "two")
fn erlang_only() -> Int
"#;
assert_module_infer!(module, vec![]);
assert_js_module_error!(module);
}
#[test]
fn imported_javascript_only_function() {
assert_module_error!(
(
"module",
r#"@external(javascript, "one", "two")
pub fn javascript_only() -> Int"#
),
"import module
pub fn main() {
module.javascript_only()
}",
);
}
#[test]
fn javascript_only_constant() {
assert_module_error!(
(
"module",
r#"@external(javascript, "one", "two")
fn javascript_only() -> Int
const constant = javascript_only
pub const javascript_only_constant = constant
"#
),
"import module
pub fn main() {
module.javascript_only_constant()
}",
);
}
#[test]
fn public_javascript_external() {
let module = r#"@external(javascript, "one", "two")
pub fn main() -> Int
"#;
assert_module_error!(module);
assert_js_module_infer!(module, vec![("main", "fn() -> Int")]);
}
#[test]
fn public_erlang_external() {
let module = r#"@external(erlang, "one", "two")
pub fn main() -> Int
"#;
assert_module_infer!(module, vec![("main", "fn() -> Int")]);
assert_js_module_error!(module);
}
#[test]
fn unsupported_target_for_unused_import() {
// If we import a function which doesn't support the current target,
// even if we don't use it, the compiler should error
assert_module_error!(
(
"mod",
r#"@external(javascript, "wibble", "wobble") pub fn wobble()"#
),
"import mod.{wobble}"
);
}
#[test]
fn supported_target_for_imported_value() {
assert_module_infer!(
(
"mod",
r#"@external(erlang, "wibble", "wobble") pub fn wobble() -> Int"#
),
"import mod.{wobble}
pub const wobble = wobble",
vec![("wobble", "fn() -> Int")],
);
}
#[test]
fn javascript_mjs() {
assert_js_module_infer!(
r#"@external(javascript, "one.mjs", "two")
pub fn main() -> Int
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
fn javascript_cjs() {
assert_js_module_infer!(
r#"@external(javascript, "one.cjs", "two")
pub fn main() -> Int
"#,
vec![("main", "fn() -> Int")]
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/assignments.rs | compiler-core/src/type_/tests/assignments.rs | use crate::assert_infer;
#[test]
fn let_() {
assert_infer!("let x = 1 2", "Int");
}
#[test]
fn let_1() {
assert_infer!("let x = 1 x", "Int");
}
#[test]
fn let_2() {
assert_infer!("let x = 2.0 x", "Float");
}
#[test]
fn let_3() {
assert_infer!("let x = 2 let y = x y", "Int");
}
#[test]
fn let_4() {
assert_infer!(
"let #(#(_, _) as x, _) = #(#(0, 1.0), []) x",
"#(Int, Float)"
);
}
#[test]
fn let_5() {
assert_infer!("let x: String = \"\" x", "String");
}
#[test]
fn let_6() {
assert_infer!("let x: #(Int, Int) = #(5, 5) x", "#(Int, Int)",);
}
#[test]
fn let_7() {
assert_infer!("let x: #(Int, Float) = #(5, 5.0) x", "#(Int, Float)",);
}
#[test]
fn let_8() {
assert_infer!("let assert [1, 2, ..x]: List(Int) = [1,2,3] x", "List(Int)",);
}
#[test]
fn let_9() {
assert_infer!(
"let assert #(5, [..x]): #(Int, List(Int)) = #(5, [1,2,3]) x",
"List(Int)",
);
}
#[test]
fn let_10() {
assert_infer!(
"let assert #(5.0, [..x]): #(Float, List(Int)) = #(5.0, [1,2,3]) x",
"List(Int)",
);
}
#[test]
fn let_11() {
assert_infer!("let x: List(_) = [] x", "List(a)");
}
#[test]
fn let_12() {
assert_infer!("let x: List(_) = [1] x", "List(Int)");
}
#[test]
fn let_13() {
assert_infer!("let assert [a] = [1] a", "Int");
}
#[test]
fn let_14() {
assert_infer!("let assert [a, 2] = [1] a", "Int");
}
#[test]
fn let_15() {
assert_infer!("let assert [a, .. b] = [1] a", "Int");
}
#[test]
fn let_16() {
assert_infer!("let assert [a, .. _] = [1] a", "Int");
}
#[test]
fn let_17() {
assert_infer!("fn(x) { let assert [a] = x a }", "fn(List(a)) -> a");
}
#[test]
fn let_18() {
assert_infer!("fn(x) { let assert [a] = x a + 1 }", "fn(List(Int)) -> Int");
}
#[test]
fn let_19() {
assert_infer!("let _x = 1 2.0", "Float");
}
#[test]
fn let_20() {
assert_infer!("let _ = 1 2.0", "Float");
}
#[test]
fn let_21() {
assert_infer!("let #(tag, x) = #(1.0, 1) x", "Int");
}
#[test]
fn let_22() {
assert_infer!("fn(x) { let #(a, b) = x a }", "fn(#(a, b)) -> a");
}
#[test]
fn let_23() {
assert_infer!("let assert [] = [] 1", "Int");
}
#[test]
fn let_24() {
assert_infer!("let assert Ok(..) = Ok(10)", "Result(Int, a)");
}
#[test]
fn let_25() {
assert_infer!("let assert \"hello\" as a <> _ = \"\" a", "String");
}
// https://github.com/gleam-lang/gleam/issues/1991
#[test]
fn no_scoped_var_collision() {
assert_infer!("let x = 1 { let x = 1.0 } x", "Int");
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/functions.rs | compiler-core/src/type_/tests/functions.rs | use crate::{assert_module_error, assert_module_infer};
// https://github.com/gleam-lang/gleam/issues/1860
#[test]
fn unlabelled_after_labelled() {
assert_module_error!(
"fn main(wibble wibber, wobber) {
Nil
}"
);
}
// https://github.com/gleam-lang/gleam/issues/1860
#[test]
fn unlabelled_after_labelled_with_type() {
assert_module_error!(
"fn main(wibble wibber, wobber: Int) {
Nil
}"
);
}
// https://github.com/gleam-lang/gleam/issues/1860
#[test]
fn unlabelled_after_labelled_external() {
assert_module_error!(
r#"
@external(erlang, "", "")
fn main(wibble x: Int, y: Int) -> Int
"#
);
}
// https://github.com/gleam-lang/gleam/issues/1860
#[test]
fn all_labelled() {
assert_module_infer!(
r#"pub fn prepend(to list: List(a), this item: a) -> List(a) {
[item, ..list]
}
"#,
vec![(r#"prepend"#, r#"fn(List(a), a) -> List(a)"#)]
);
}
// https://github.com/gleam-lang/gleam/issues/1814
#[test]
fn out_of_order_generalisation() {
assert_module_infer!(
r#"
pub fn main() {
call(fn() {
"Hello"
})
}
fn call(f: fn() -> a) {
f()
}
"#,
vec![(r#"main"#, r#"fn() -> String"#)]
);
}
// https://github.com/gleam-lang/gleam/issues/2275
#[test]
fn bug_2275() {
assert_module_infer!(
r#"
pub fn zero() {
one()
}
fn one() {
one()
two()
}
fn two() {
two
Nil
}
"#,
vec![(r#"zero"#, r#"fn() -> Nil"#)]
);
}
// https://github.com/gleam-lang/gleam/issues/2275
#[test]
fn bug_2275_2_self_references() {
assert_module_infer!(
r#"
pub fn zero() {
one()
}
fn one() {
one()
two()
}
fn two() {
two
two
Nil
}
"#,
vec![(r#"zero"#, r#"fn() -> Nil"#)]
);
}
// https://github.com/gleam-lang/gleam/issues/2275
#[test]
fn bug_2275_again() {
assert_module_infer!(
r#"
pub fn aaa(input) {
case [] {
[] -> input
_ -> {
let input2 = bbb()
aaa(input2)
}
}
}
pub fn bbb() {
ccc() + bbb()
}
pub fn ccc() {
ccc() + bbb()
}
"#,
vec![
(r#"aaa"#, r#"fn(Int) -> Int"#),
(r#"bbb"#, r#"fn() -> Int"#),
(r#"ccc"#, r#"fn() -> Int"#),
]
);
}
#[test]
fn deprecated_function() {
assert_module_infer!(
r#"
@deprecated("use wibble instead")
pub fn main() {
Nil
}"#,
vec![(r#"main"#, r#"fn() -> Nil"#)]
);
}
// https://github.com/gleam-lang/gleam/issues/2303
#[test]
fn recursive_type() {
assert_module_error!(
r#"
pub fn one(x) {
two([x])
}
pub fn two(x) {
one(x)
}
"#
);
}
#[test]
fn no_impl_function_fault_tolerance() {
// A function not having an implementation does not stop analysis.
assert_module_error!(
r#"
pub fn no_impl() -> Nil
pub type X = UnknownType
"#
);
}
#[test]
fn bad_body_function_fault_tolerance() {
// A function having an invalid body does not stop analysis.
assert_module_error!(
r#"
pub fn bad(x: Int) -> Float {
// Invalid body.
"" + ""
}
pub fn user() -> Float {
// This checks that the bad function is still usable, the types coming from
// its annotations. This function is valid.
bad(1)
}
// Another bad function to make sure that analysis has not stopped. This error
// should also be emitted.
pub fn bad_2() {
bad(Nil)
}
"#
);
}
#[test]
fn annotation_mismatch_function_fault_tolerance() {
// A function having an invalid body does not stop analysis.
assert_module_error!(
r#"
pub fn bad(x: Int) -> Float {
// This does not match the return annotation
1
}
pub fn user() -> Float {
// This checks that the bad function is still usable, the types coming from
// its annotations. This function is valid.
bad(1)
}
// Another bad function to make sure that analysis has not stopped. This error
// should also be emitted.
pub fn bad_2() {
bad(Nil)
}
"#
);
}
#[test]
fn invalid_javascript_external_do_not_stop_analysis() {
// Both these have errors! We do not stop on the first one.
assert_module_error!(
r#"
@external(javascript, "somemodule", "() => 123")
pub fn one() -> Nil {
Nil
}
pub fn two() -> Nil {
""
}
"#
);
}
#[test]
fn multiple_bad_statement_assignment_fault_tolerance() {
assert_module_error!(
r#"
pub fn main() {
let a = 1 + 2.0
let b = 3 + 4.0
let c = a + b
}
"#
);
}
#[test]
fn multiple_bad_statement_assignment_with_annotation_fault_tolerance() {
assert_module_error!(
r#"
pub fn main() {
let a: Int = "not an int"
let b: String = 1
let c = a + 2
}
"#
);
}
#[test]
fn multiple_bad_statement_assignment_with_annotation_fault_tolerance2() {
assert_module_error!(
r#"
pub fn main() {
// Since the value is invalid the type is the annotation
let a: Int = Junk
let b: String = 1
let c = a + 2
}
"#
);
}
#[test]
fn multiple_bad_statement_assignment_with_pattern_fault_tolerance2() {
assert_module_error!(
r#"
pub fn main() {
// Since the pattern is invalid no variable is created
let Junk(a) = 7
// Pattern is valid but does not type check
let Ok(b) = 1
let c = a + b
}
"#
);
}
#[test]
fn multiple_bad_statement_expression_fault_tolerance() {
assert_module_error!(
r#"
pub fn main() {
1 + 2.0
3 + 4.0
let c = 1 + 2
}
"#
);
}
#[test]
fn function_call_incorrect_arg_types_fault_tolerance() {
assert_module_error!(
r#"
fn add(x: Int, y: Int) {
x + y
}
pub fn main() {
add(1.0, 1.0)
}
"#
);
}
#[test]
fn function_call_incorrect_arity_fault_tolerance() {
assert_module_error!(
r#"
fn add(x: Int, y: Int) {
x + y
}
pub fn main() {
add(1.0)
}
"#
);
}
#[test]
fn function_call_incorrect_arity_with_labels_fault_tolerance() {
assert_module_error!(
r#"
fn wibble(wibble arg1: fn() -> Int, wobble arg2: Int) -> Int {
arg1() + arg2
}
pub fn main() {
wibble(wobble: "")
}
"#
);
}
#[test]
fn function_call_incorrect_arity_with_label_shorthand_fault_tolerance() {
assert_module_error!(
r#"
fn wibble(wibble arg1: fn() -> Int, wobble arg2: Int) -> Int {
arg1() + arg2
}
pub fn main() {
let wobble = ""
wibble(wobble:)
}
"#
);
}
#[test]
fn function_call_incorrect_arity_with_labels_fault_tolerance2() {
assert_module_error!(
r#"
fn wibble(wibble arg1: fn() -> Int, wobble arg2: Int, wabble arg3: Int) -> Int {
arg1() + arg2 + arg3
}
pub fn main() {
wibble(fn() {""}, wobble: "")
}
"#
);
}
#[test]
fn function_call_incorrect_arity_with_label_shorthand_fault_tolerance2() {
assert_module_error!(
r#"
fn wibble(wibble arg1: fn() -> Int, wobble arg2: Int, wabble arg3: Int) -> Int {
arg1() + arg2 + arg3
}
pub fn main() {
let wobble = ""
wibble(fn() {""}, wobble:)
}
"#
);
}
#[test]
fn case_clause_pattern_fault_tolerance() {
assert_module_error!(
r#"
pub fn main() {
let wibble = True
case wibble {
True -> 0
Wibble -> 1
Wibble2 -> 2
_ -> 3
}
}
"#
);
}
#[test]
fn case_clause_guard_fault_tolerance() {
assert_module_error!(
r#"
pub fn main() {
let wibble = True
case wibble {
a if a == Wibble -> 0
b if b == Wibble -> 0
_ -> 1
}
}
"#
);
}
#[test]
fn case_clause_then_fault_tolerance() {
assert_module_error!(
r#"
pub fn main() {
let wibble = True
case wibble {
True -> {
1.0 + 1.0
}
_ -> {
1.0 + 1.0
}
}
}
"#
);
}
// https://github.com/gleam-lang/gleam/issues/2504
#[test]
fn provide_arg_type_to_fn_implicit_ok() {
assert_module_infer!(
r#"
pub fn main() {
let z = #(1,2)
fn(x) { x.0 }(z)
}
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
fn provide_arg_type_to_fn_explicit_ok() {
assert_module_infer!(
r#"
pub fn main() {
let z = #(1,2)
fn(x: #(Int, Int)) { x.0 }(z)
}
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
fn provide_arg_type_to_fn_implicit_error() {
assert_module_error!(
r#"
pub fn main() {
let z = #(1,2)
fn(x) { x.2 }(z)
}
"#
);
}
#[test]
fn provide_arg_type_to_fn_explicit_error() {
assert_module_error!(
r#"
pub fn main() {
let z = #(1,2)
fn(x: #(Int, Int)) { x.2 }(z)
}
"#
);
}
#[test]
fn provide_arg_type_to_fn_arg_infer_error() {
assert_module_error!(
r#"
pub fn main() {
fn(x) { x.2 }(z)
}
"#
);
}
#[test]
fn provide_arg_type_to_fn_not_a_tuple() {
assert_module_error!(
r#"
pub fn main() {
let z = "not a tuple"
fn(x) { x.2 }(z)
}
"#
);
}
#[test]
fn provide_two_args_type_to_fn() {
assert_module_infer!(
r#"
pub fn main() {
let a = #(1,2)
let b = #(1,2)
fn(x, y) { x.0 + y.1 }(a, b)
}
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
fn provide_one_arg_type_to_two_args_fn() {
assert_module_error!(
r#"
pub fn main() {
let a = #(1,2)
fn(x, y) { x.0 + y.1 }(a)
}
"#
);
}
#[test]
fn provide_two_args_type_to_fn_wrong_types() {
assert_module_error!(
r#"
pub fn main() {
let a = 1
let b = "not an int"
fn(x, y) { x + y }(a, b)
}
"#
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/echo.rs | compiler-core/src/type_/tests/echo.rs | use crate::assert_module_infer;
#[test]
pub fn echo_has_same_type_as_printed_expression() {
assert_module_infer!(
r#"
pub fn main() {
echo 1
}
"#,
vec![("main", "fn() -> Int")]
);
}
#[test]
pub fn echo_has_same_type_as_printed_expression_2() {
assert_module_infer!(
r#"
pub fn main() {
let wibble = todo
echo wibble
}
"#,
vec![("main", "fn() -> a")]
);
}
#[test]
pub fn echo_in_pipeline_acts_as_the_identity_function() {
assert_module_infer!(
r#"
pub fn main() {
[1, 2, 3]
|> echo
}
"#,
vec![("main", "fn() -> List(Int)")]
);
}
#[test]
pub fn echo_in_pipeline_acts_as_the_identity_function_2() {
assert_module_infer!(
r#"
pub fn main() {
1
|> echo
|> fn(_: Int) { True }
}
"#,
vec![("main", "fn() -> Bool")]
);
}
#[test]
pub fn echo_in_pipeline_acts_as_the_identity_function_3() {
assert_module_infer!(
r#"
pub fn main() {
[1, 2, 3]
|> echo
|> echo
|> wibble
}
fn wibble(_: List(Int)) -> List(String) { todo }
"#,
vec![("main", "fn() -> List(String)")]
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/pipes.rs | compiler-core/src/type_/tests/pipes.rs | use crate::{assert_module_error, assert_module_infer, assert_no_warnings};
// https://github.com/gleam-lang/gleam/issues/2392
#[test]
fn empty_list() {
assert_module_infer!(
"
pub fn a() {
fn(_) { Nil }
}
pub fn b(_) {
fn(_) { Nil }
}
pub fn c() {
Nil
|> b(
Nil
|> a(),
)
}",
vec![
("a", "fn() -> fn(a) -> Nil"),
("b", "fn(a) -> fn(b) -> Nil"),
("c", "fn() -> Nil"),
]
);
}
// https://github.com/gleam-lang/gleam/pull/3406#discussion_r1683068647
#[test]
fn pipe_rewrite_with_missing_argument() {
assert_module_infer!(
r#"
pub fn main() {
let f = fn(a, b) { fn(c) { a + b + c } }
1 |> f(2)
}
"#,
vec![("main", "fn() -> fn(Int) -> Int")]
);
}
#[test]
fn pipe_regression_gh3515() {
// https://github.com/gleam-lang/gleam/issues/3515
assert_module_infer!(
r#"
fn relu(t) {
fn(theta: String) {
// use t and theta and return a Float
0.0
}
}
pub fn k_relu(k: Int) {
fn(t: Float) {
fn(theta: String) {
case k {
0 -> t
_ -> {
// following code is OK on gleam 1.3.2,
// but raised error on gleam 1.4.1
// The key here is that it is not a direct function call,
// but a "var" call, which points to the same function.
let next_layer = theta |> relu(t) |> k_relu(k - 1)
theta |> next_layer
}
}
}
}
}"#,
vec![("k_relu", "fn(Int) -> fn(Float) -> fn(String) -> Float")],
);
}
#[test]
fn pipe_callback_var_function1() {
assert_module_infer!(
r#"
pub fn main() {
let f = fn(a) { fn(b) { #(a, b) } }
let x = 1 |> f()
}
"#,
vec![("main", "fn() -> fn(a) -> #(Int, a)")],
);
}
#[test]
fn pipe_callback_var_function2() {
assert_module_infer!(
r#"
pub fn main() {
let f = fn(a) { fn(b) { #(a, b) } }
let x = 1 |> f(1)
}
"#,
vec![("main", "fn() -> #(Int, Int)")],
);
}
#[test]
fn pipe_callback_correct_arity1() {
assert_module_infer!(
r#"
fn callback(a: Int) {
fn() -> String {
"Called"
}
}
pub fn main() {
let x = 1 |> callback()
}
"#,
vec![("main", "fn() -> fn() -> String")],
);
}
#[test]
fn pipe_callback_correct_arity2() {
assert_module_infer!(
r#"
fn callback(a: Float) {
fn(b: Int) -> String {
"Called"
}
}
pub fn main() {
let x = 1 |> callback(2.5)
}
"#,
vec![("main", "fn() -> String")],
);
}
#[test]
fn pipe_callback_wrong_arity() {
assert_module_error!(
r#"
fn callback(a: Int) {
fn() -> String {
"Called"
}
}
pub fn main() {
let x = 1 |> callback(2)
}
"#
);
}
#[test]
fn no_warnings_when_piping_into_labelled_capture_as_first_argument() {
assert_no_warnings!(
"
fn wibble(label1 a, label2 b, lots c, of d, labels e) {
a + b * c - d / e
}
pub fn main() {
1 |> wibble(label1: _, label2: 2, lots: 3, of: 4, labels: 5)
}
"
);
}
#[test]
fn no_warnings_when_piping_into_labelled_capture_as_only_argument() {
assert_no_warnings!(
"
fn wibble(descriptive_label value) {
value
}
pub fn main() {
42 |> wibble(descriptive_label: _)
}
"
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/conditional_compilation.rs | compiler-core/src/type_/tests/conditional_compilation.rs | use crate::assert_module_infer;
#[test]
fn excluded_error() {
assert_module_infer!(
"@target(javascript)
pub type X = Y
pub const x = 1
",
vec![("x", "Int")],
);
}
#[test]
fn alias() {
assert_module_infer!(
"@target(erlang)
pub type X = Int
pub const x: X = 1
",
vec![("x", "Int")],
);
}
#[test]
fn alias_in_block() {
assert_module_infer!(
"@target(erlang)
pub type X = Int
@target(erlang)
pub const x: X = 1
",
vec![("x", "Int")],
);
}
#[test]
fn generalising() {
assert_module_infer!(
"
@target(erlang)
pub fn id(x) { x }
@target(erlang)
pub fn x() { id(1) }
",
vec![("id", "fn(a) -> a"), ("x", "fn() -> Int")],
);
}
#[test]
fn excluded_generalising() {
assert_module_infer!(
"
@target(javascript)
pub fn id(x) { x }
@target(javascript)
pub fn x() { id(1) }
pub const y = 1
",
vec![("y", "Int")],
);
}
#[test]
fn included_const_ref_earlier() {
assert_module_infer!(
"
@target(erlang)
const x = 1
pub fn main() { x }
",
vec![("main", "fn() -> Int")],
);
}
#[test]
fn included_const_ref_later() {
assert_module_infer!(
"pub fn main() { x }
@target(erlang)
const x = 1
",
vec![("main", "fn() -> Int")],
);
}
#[test]
fn target_does_not_need_to_be_the_first_attribute() {
// In previous versions of Gleam the `@target` attribute had to be the
// first attribute.
assert_module_infer!(
r#"
@external(erlang, "blah", "wub")
@target(erlang)
pub fn main() -> Int
"#,
vec![("main", "fn() -> Int")],
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/version_inference.rs | compiler-core/src/type_/tests/version_inference.rs | use hexpm::version::Version;
use super::compile_module;
fn infer_version(module: &str) -> Version {
compile_module("test_module", module, None, vec![])
.expect("module to compile")
.type_info
.minimum_required_version
}
#[test]
fn internal_annotation_on_constant_requires_v1_1() {
let version = infer_version(
"
@internal
pub const wibble = 1
",
);
assert_eq!(version, Version::new(1, 1, 0));
}
#[test]
fn internal_annotation_on_type_requires_v1_1() {
let version = infer_version(
"
@internal
pub type Wibble
",
);
assert_eq!(version, Version::new(1, 1, 0));
}
#[test]
fn internal_annotation_on_function_requires_v1_1() {
let version = infer_version(
"
@internal
pub fn wibble() {}
",
);
assert_eq!(version, Version::new(1, 1, 0));
}
#[test]
fn nested_tuple_access_requires_v1_1() {
let version = infer_version(
"
pub fn main() {
let tuple = #(1, #(1, 1))
tuple.1.0
}
",
);
assert_eq!(version, Version::new(1, 1, 0));
}
#[test]
fn javascript_external_module_with_at_requires_v1_2() {
let version = infer_version(
"
@external(javascript, \"module@module\", \"func\")
pub fn main() {}
",
);
assert_eq!(version, Version::new(1, 2, 0));
}
#[test]
fn int_plus_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1 + 1 == 2 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn float_plus_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1.0 +. 1.0 == 2.0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn int_minus_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1 - 1 == 0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn float_minus_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1.0 -. 1.0 == 0.0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn int_multiplication_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1 * 1 == 0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn float_multiplication_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1.0 *. 1.0 == 0.0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn int_divide_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1 / 1 == 0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn float_divide_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1.0 /. 1.0 == 0.0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn int_remainder_in_guards_requires_v1_3() {
let version = infer_version(
"
pub fn main() {
case todo {
_ if 1 % 1 == 0 -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 3, 0));
}
#[test]
fn label_shorthand_in_constand_requires_v1_4() {
let version = infer_version(
"
pub type Wibble { Wibble(wibble: Int) }
pub const wibble = 1
pub const wobble = Wibble(wibble:)
",
);
assert_eq!(version, Version::new(1, 4, 0));
}
#[test]
fn label_shorthand_in_call_requires_v1_4() {
let version = infer_version(
"
pub type Wibble { Wibble(wibble: Int) }
pub fn main() {
let wibble = 1
Wibble(wibble:)
}
",
);
assert_eq!(version, Version::new(1, 4, 0));
}
#[test]
fn label_shorthand_in_pattern_requires_v1_4() {
let version = infer_version(
"
pub type Wibble { Wibble(wibble: Int) }
pub fn main() {
case Wibble(1) {
Wibble(wibble:) -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 4, 0));
}
#[test]
fn label_shorthand_in_record_update_requires_v1_4() {
let version = infer_version(
"
pub type Vec2 { Vec2(x: Int, y: Int) }
pub fn main() {
let x = 1
Vec2(..Vec2(0, 0), x:)
}
",
);
assert_eq!(version, Version::new(1, 4, 0));
}
#[test]
fn constant_string_concatenation_requires_v1_4() {
let version = infer_version("pub const string = \"wibble\" <> \"wobble\"");
assert_eq!(version, Version::new(1, 4, 0));
}
#[test]
fn missing_utf_8_option_in_bit_array_segment_requires_v1_5() {
let version = infer_version(
"
pub fn main() {
<<\"hello\", \" world!\">>
}
",
);
assert_eq!(version, Version::new(1, 5, 0));
}
#[test]
fn missing_utf_8_option_in_bit_array_constant_segment_requires_v1_5() {
let version = infer_version("const bits = <<\"hello\", \" world!\">>");
assert_eq!(version, Version::new(1, 5, 0));
}
#[test]
fn missing_utf_8_option_in_bit_array_pattern_segment_requires_v1_5() {
let version = infer_version(
"
pub fn main() {
case todo {
<<\"hello\", \" world!\">> -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 5, 0));
}
#[test]
fn missing_float_option_in_bit_array_segment_requires_v1_10() {
let version = infer_version(
"
pub fn main() {
<<1.2>>
}
",
);
assert_eq!(version, Version::new(1, 10, 0));
}
#[test]
fn missing_float_option_in_bit_array_constant_segment_requires_v1_10() {
let version = infer_version("const bits = <<1.2>>");
assert_eq!(version, Version::new(1, 10, 0));
}
#[test]
fn missing_float_option_in_bit_array_pattern_segment_requires_v1_10() {
let version = infer_version(
"
pub fn main() {
case todo {
<<1.11>> -> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 10, 0));
}
#[test]
fn const_record_update_requires_v1_14() {
let version = infer_version(
"
pub type Wibble { Wibble(a: Int, b: Int) }
const base = Wibble(1, 2)
const wobble = Wibble(..base, a: 3)
",
);
assert_eq!(version, Version::new(1, 14, 0));
}
#[test]
fn inference_picks_the_bigger_of_two_versions() {
let version = infer_version(
"
pub fn main() {
case todo {
<<\"hello\", \" world!\">> -> todo
_ if 1 + 1 == 2-> todo
_ -> todo
}
}
",
);
assert_eq!(version, Version::new(1, 5, 0));
}
#[test]
fn inference_picks_the_bigger_of_two_versions_2() {
let version = infer_version(
"
@external(javascript, \"module@module\", \"func\")
pub fn main() {
let tuple = #(1, #(1, 1))
tuple.1.0
}
",
);
assert_eq!(version, Version::new(1, 2, 0));
}
#[test]
fn bool_assert_requires_v1_11() {
let version = infer_version(
"
pub fn main() {
assert 1 != 2
}
",
);
assert_eq!(version, Version::new(1, 11, 0));
}
#[test]
fn expression_in_expression_segment_size_requires_v1_12() {
let version = infer_version(
"
pub fn main() {
<<1:size(3 * 8)>>
}
",
);
assert_eq!(version, Version::new(1, 12, 0));
}
#[test]
fn expression_in_pattern_segment_size_requires_v1_12() {
let version = infer_version(
"
pub fn main(x) {
case x {
<<_:size(3*8)>> -> 1
_ -> 2
}
}",
);
assert_eq!(version, Version::new(1, 12, 0));
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
gleam-lang/gleam | https://github.com/gleam-lang/gleam/blob/f424547f02e621f1c5f28749786e05eda7feb098/compiler-core/src/type_/tests/custom_types.rs | compiler-core/src/type_/tests/custom_types.rs | use crate::{assert_module_error, assert_module_infer, assert_warning};
// https://github.com/gleam-lang/gleam/issues/2215
#[test]
fn generic_phantom() {
assert_module_infer!(
r#"
pub type Test(a) {
MakeTest(field: Test(Int))
}
"#,
vec![("MakeTest", "fn(Test(Int)) -> Test(a)")]
);
}
#[test]
fn deprecated_type() {
assert_warning!(
r#"
@deprecated("Dont use this!")
pub type Cat {
Cat(name: String, cuteness: Int)
}
pub fn name() -> String {
let c = Cat("Numi", 20)
c.name
}
"#
);
}
#[test]
fn deprecated_all_varients_type() {
assert_module_error!(
r#"
pub type Numbers {
@deprecated("1")
One
@deprecated("2")
Two
}
"#
);
}
#[test]
fn deprecated_varients_type() {
assert_warning!(
r#"
pub type Numbers {
@deprecated("1")
One
Two
}
pub fn num() {
let _one = One
let _two = Two
Nil
}
"#
);
}
#[test]
fn depreacted_type_deprecate_varient_err() {
assert_module_error!(
r#"
@deprecated("2")
pub type Numbers {
@deprecated("1")
One
Two
}
pub fn num() {
let _two = Two
Nil
}
"#
);
}
#[test]
fn fault_tolerance() {
// An error in a custom type does not stop analysis
assert_module_error!(
r#"
pub type Cat {
Cat(UnknownType)
}
pub type Kitten = AnotherUnknownType
"#
);
}
#[test]
fn duplicate_variable_error_does_not_stop_analysis() {
// Both these aliases have errors! We do not stop on the first one.
assert_module_error!(
r#"
type Two(a, a) {
Two(a, a)
}
type Three(a, a) {
Three
}
"#
);
}
#[test]
fn conflict_with_import() {
// We cannot declare a type with the same name as an imported type
assert_module_error!(
("wibble", "pub type A { B }"),
"import wibble.{type A} type A { C }",
);
}
#[test]
fn generic_record_update1() {
// A record update on polymorphic types with a field of different type
assert_module_infer!(
"
pub type Box(a) {
Box(value: a, i: Int)
}
pub fn update_box(box: Box(Int), value: String) {
Box(..box, value: value)
}",
vec![
("Box", "fn(a, Int) -> Box(a)"),
("update_box", "fn(Box(Int), String) -> Box(String)")
]
);
}
#[test]
fn generic_record_update2() {
// A record update on polymorphic types with generic fields of a different type
assert_module_infer!(
"
pub type Box(a) {
Box(value: a, i: Int)
}
pub fn update_box(box: Box(a), value: b) {
Box(..box, value: value)
}",
vec![
("Box", "fn(a, Int) -> Box(a)"),
("update_box", "fn(Box(a), b) -> Box(b)")
]
);
}
#[test]
fn inferred_variant_record_update_change_type_parameter() {
assert_module_infer!(
r#"
pub type Box(a) {
Locked(password: String, value: a)
Unlocked(password: String, value: a)
}
pub fn main() {
let box = Locked("ungu€$$4bLe", 11)
case box {
Locked(..) as box -> Locked(..box, value: True)
Unlocked(..) as box -> Unlocked(..box, value: False)
}
}
"#,
vec![
("Locked", "fn(String, a) -> Box(a)"),
("Unlocked", "fn(String, a) -> Box(a)"),
("main", "fn() -> Box(Bool)")
]
);
}
#[test]
fn pattern_match_correct_labeled_field() {
assert_module_error!(
r#"
type Fish {
Starfish()
Jellyfish(name: String, jiggly: Bool)
}
fn handle_fish(fish: Fish) {
case fish {
Starfish() -> False
Jellyfish(jiggly:) -> jiggly // <- error is here
}
}
"#
);
}
#[test]
fn pattern_match_correct_pos_field() {
assert_module_error!(
r#"
type Fish {
Starfish()
Jellyfish(String, Bool)
}
fn handle_fish(fish: Fish) {
case fish {
Starfish() -> False
Jellyfish(jiggly) -> jiggly
}
}
"#
);
}
| rust | Apache-2.0 | f424547f02e621f1c5f28749786e05eda7feb098 | 2026-01-04T15:40:22.554517Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.