repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
gitui-org/gitui | https://github.com/gitui-org/gitui/blob/d68f366b1b7106223a0b5ad2481a782a7bd68883/filetreelist/src/treeitems_iter.rs | filetreelist/src/treeitems_iter.rs | use crate::{filetreeitems::FileTreeItems, item::FileTreeItem};
pub struct TreeItemsIterator<'a> {
tree: &'a FileTreeItems,
index: usize,
increments: Option<usize>,
max_amount: usize,
}
impl<'a> TreeItemsIterator<'a> {
pub const fn new(
tree: &'a FileTreeItems,
start: usize,
max_amount: usize,
) -> Self {
TreeItemsIterator {
max_amount,
increments: None,
index: start,
tree,
}
}
}
impl<'a> Iterator for TreeItemsIterator<'a> {
type Item = (usize, &'a FileTreeItem);
fn next(&mut self) -> Option<Self::Item> {
if self.increments.unwrap_or_default() < self.max_amount {
let items = &self.tree.tree_items;
let mut init = self.increments.is_none();
if let Some(i) = self.increments.as_mut() {
*i += 1;
} else {
self.increments = Some(0);
}
loop {
if !init {
self.index += 1;
}
init = false;
if self.index >= self.tree.len() {
break;
}
let elem = &items[self.index];
if elem.info().is_visible() {
return Some((self.index, &items[self.index]));
}
}
}
None
}
}
| rust | MIT | d68f366b1b7106223a0b5ad2481a782a7bd68883 | 2026-01-04T15:40:16.730844Z | false |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/src/lib.rs | src/lib.rs | pub(crate) mod colors;
pub(crate) mod input;
pub use colors::*;
pub use input::*;
use std::io::{self, BufReader, Read, Write};
use clap::ValueEnum;
pub enum Base {
Binary,
Octal,
Decimal,
Hexadecimal,
}
#[derive(Copy, Clone)]
pub enum ByteCategory {
Null,
AsciiPrintable,
AsciiWhitespace,
AsciiOther,
NonAscii,
}
pub enum IncludeMode {
File(String), // filename
Stdin,
Slice,
Off,
}
#[derive(Copy, Clone, Debug, Default, ValueEnum)]
#[non_exhaustive]
pub enum CharacterTable {
/// Show printable ASCII characters as-is, '⋄' for NULL bytes, ' ' for
/// space, '_' for other ASCII whitespace, '•' for other ASCII characters,
/// and '×' for non-ASCII bytes.
#[default]
Default,
/// Show printable ASCII as-is, ' ' for space, '.' for everything else.
Ascii,
/// Show printable EBCDIC as-is, ' ' for space, '.' for everything else.
#[value(name = "codepage-1047")]
CP1047,
/// Uses code page 437 (for non-ASCII bytes).
#[value(name = "codepage-437")]
CP437,
/// Uses braille characters for non-printable bytes.
Braille,
}
#[derive(Copy, Clone, Debug, Default, ValueEnum)]
#[non_exhaustive]
pub enum ColorScheme {
/// Show the default colors: bright black for NULL bytes, green for ASCII
/// space characters and non-printable ASCII, cyan for printable ASCII characters,
/// and yellow for non-ASCII bytes.
#[default]
Default,
/// Show bright black for NULL bytes, cyan for printable ASCII characters, a gradient
/// from pink to violet for non-printable ASCII characters and a heatmap-like gradient
/// from red to yellow to white for non-ASCII bytes.
Gradient,
}
#[derive(Copy, Clone, Debug, Default, ValueEnum)]
pub enum Endianness {
/// Print out groups in little-endian format.
Little,
/// Print out groups in big-endian format.
#[default]
Big,
}
#[derive(PartialEq)]
enum Squeezer {
Print,
Delete,
Ignore,
Disabled,
}
#[derive(Copy, Clone)]
struct Byte(u8);
impl Byte {
fn category(self) -> ByteCategory {
if self.0 == 0x00 {
ByteCategory::Null
} else if self.0.is_ascii_graphic() {
ByteCategory::AsciiPrintable
} else if self.0.is_ascii_whitespace() {
ByteCategory::AsciiWhitespace
} else if self.0.is_ascii() {
ByteCategory::AsciiOther
} else {
ByteCategory::NonAscii
}
}
fn color(self, color_scheme: ColorScheme) -> &'static [u8] {
use crate::ByteCategory::*;
match color_scheme {
ColorScheme::Default => match self.category() {
Null => COLOR_NULL.as_bytes(),
AsciiPrintable => COLOR_ASCII_PRINTABLE.as_bytes(),
AsciiWhitespace => COLOR_ASCII_WHITESPACE.as_bytes(),
AsciiOther => COLOR_ASCII_OTHER.as_bytes(),
NonAscii => COLOR_NONASCII.as_bytes(),
},
ColorScheme::Gradient => match self.category() {
Null => COLOR_NULL_RGB,
AsciiWhitespace if self.0 == b' ' => &COLOR_GRADIENT_ASCII_PRINTABLE[0],
AsciiPrintable => &COLOR_GRADIENT_ASCII_PRINTABLE[(self.0 - b' ') as usize],
AsciiWhitespace | AsciiOther => {
if self.0 == 0x7f {
COLOR_DEL
} else {
&COLOR_GRADIENT_ASCII_NONPRINTABLE[self.0 as usize - 1]
}
}
NonAscii => &COLOR_GRADIENT_NONASCII[(self.0 - 128) as usize],
},
}
}
fn as_char(self, character_table: CharacterTable) -> char {
use crate::ByteCategory::*;
match character_table {
CharacterTable::Default => match self.category() {
Null => '⋄',
AsciiPrintable => self.0 as char,
AsciiWhitespace if self.0 == 0x20 => ' ',
AsciiWhitespace => '_',
AsciiOther => '•',
NonAscii => '×',
},
CharacterTable::Ascii => match self.category() {
Null => '.',
AsciiPrintable => self.0 as char,
AsciiWhitespace if self.0 == 0x20 => ' ',
AsciiWhitespace => '.',
AsciiOther => '.',
NonAscii => '.',
},
CharacterTable::CP1047 => CP1047[self.0 as usize],
CharacterTable::CP437 => CP437[self.0 as usize],
CharacterTable::Braille => match self.category() {
// null is important enough to get its own symbol
Null => '⋄',
AsciiPrintable => self.0 as char,
AsciiWhitespace if self.0 == b' ' => ' ',
// `\t`, `\n` and `\r` are important enough to get their own symbols
AsciiWhitespace if self.0 == b'\t' => '→',
AsciiWhitespace if self.0 == b'\n' => '↵',
AsciiWhitespace if self.0 == b'\r' => '←',
AsciiWhitespace | AsciiOther | NonAscii => {
/// Adjust the bits from the original number to a new number.
///
/// Bit positions in braille are adjusted as follows:
///
/// ```text
/// 0 3 => 0 1
/// 1 4 => 2 3
/// 2 5 => 4 5
/// 6 7 => 6 7
/// ```
fn to_braille_bits(byte: u8) -> u8 {
let mut out = 0;
for (from, to) in [0, 3, 1, 4, 2, 5, 6, 7].into_iter().enumerate() {
out |= (byte >> from & 1) << to;
}
out
}
char::from_u32(0x2800 + to_braille_bits(self.0) as u32).unwrap()
}
},
}
}
}
struct BorderElements {
left_corner: char,
horizontal_line: char,
column_separator: char,
right_corner: char,
}
#[derive(Clone, Copy, Debug, Default, ValueEnum)]
pub enum BorderStyle {
/// Draw a border with Unicode characters.
#[default]
Unicode,
/// Draw a border with ASCII characters.
Ascii,
/// Do not draw a border at all.
None,
}
impl BorderStyle {
fn header_elems(&self) -> Option<BorderElements> {
match self {
BorderStyle::Unicode => Some(BorderElements {
left_corner: '┌',
horizontal_line: '─',
column_separator: '┬',
right_corner: '┐',
}),
BorderStyle::Ascii => Some(BorderElements {
left_corner: '+',
horizontal_line: '-',
column_separator: '+',
right_corner: '+',
}),
BorderStyle::None => None,
}
}
fn footer_elems(&self) -> Option<BorderElements> {
match self {
BorderStyle::Unicode => Some(BorderElements {
left_corner: '└',
horizontal_line: '─',
column_separator: '┴',
right_corner: '┘',
}),
BorderStyle::Ascii => Some(BorderElements {
left_corner: '+',
horizontal_line: '-',
column_separator: '+',
right_corner: '+',
}),
BorderStyle::None => None,
}
}
fn outer_sep(&self) -> char {
match self {
BorderStyle::Unicode => '│',
BorderStyle::Ascii => '|',
BorderStyle::None => ' ',
}
}
fn inner_sep(&self) -> char {
match self {
BorderStyle::Unicode => '┊',
BorderStyle::Ascii => '|',
BorderStyle::None => ' ',
}
}
}
pub struct PrinterBuilder<'a, Writer: Write> {
writer: &'a mut Writer,
show_color: bool,
show_char_panel: bool,
show_position_panel: bool,
border_style: BorderStyle,
use_squeeze: bool,
panels: u64,
group_size: u8,
base: Base,
endianness: Endianness,
character_table: CharacterTable,
include_mode: IncludeMode,
color_scheme: ColorScheme,
}
impl<'a, Writer: Write> PrinterBuilder<'a, Writer> {
pub fn new(writer: &'a mut Writer) -> Self {
PrinterBuilder {
writer,
show_color: true,
show_char_panel: true,
show_position_panel: true,
border_style: BorderStyle::Unicode,
use_squeeze: true,
panels: 2,
group_size: 1,
base: Base::Hexadecimal,
endianness: Endianness::Big,
character_table: CharacterTable::Default,
include_mode: IncludeMode::Off,
color_scheme: ColorScheme::Default,
}
}
pub fn show_color(mut self, show_color: bool) -> Self {
self.show_color = show_color;
self
}
pub fn show_char_panel(mut self, show_char_panel: bool) -> Self {
self.show_char_panel = show_char_panel;
self
}
pub fn show_position_panel(mut self, show_position_panel: bool) -> Self {
self.show_position_panel = show_position_panel;
self
}
pub fn with_border_style(mut self, border_style: BorderStyle) -> Self {
self.border_style = border_style;
self
}
pub fn enable_squeezing(mut self, enable: bool) -> Self {
self.use_squeeze = enable;
self
}
pub fn num_panels(mut self, num: u64) -> Self {
self.panels = num;
self
}
pub fn group_size(mut self, num: u8) -> Self {
self.group_size = num;
self
}
pub fn with_base(mut self, base: Base) -> Self {
self.base = base;
self
}
pub fn endianness(mut self, endianness: Endianness) -> Self {
self.endianness = endianness;
self
}
pub fn character_table(mut self, character_table: CharacterTable) -> Self {
self.character_table = character_table;
self
}
pub fn include_mode(mut self, include: IncludeMode) -> Self {
self.include_mode = include;
self
}
pub fn color_scheme(mut self, color_scheme: ColorScheme) -> Self {
self.color_scheme = color_scheme;
self
}
pub fn build(self) -> Printer<'a, Writer> {
Printer {
idx: 0,
line_buf: vec![0x0; 8 * self.panels as usize],
writer: self.writer,
show_char_panel: self.show_char_panel,
show_position_panel: self.show_position_panel,
show_color: self.show_color,
curr_color: None,
color_scheme: self.color_scheme,
border_style: self.border_style,
byte_hex_panel: (0u8..=u8::MAX)
.map(|i| match self.base {
Base::Binary => format!("{i:08b}"),
Base::Octal => format!("{i:03o}"),
Base::Decimal => format!("{i:03}"),
Base::Hexadecimal => format!("{i:02x}"),
})
.collect(),
byte_char_panel: (0u8..=u8::MAX)
.map(|i| format!("{}", Byte(i).as_char(self.character_table)))
.collect(),
byte_hex_panel_g: (0u8..=u8::MAX).map(|i| format!("{i:02x}")).collect(),
squeezer: if self.use_squeeze {
Squeezer::Ignore
} else {
Squeezer::Disabled
},
display_offset: 0,
panels: self.panels,
squeeze_byte: 0x00,
group_size: self.group_size,
base_digits: match self.base {
Base::Binary => 8,
Base::Octal => 3,
Base::Decimal => 3,
Base::Hexadecimal => 2,
},
endianness: self.endianness,
include_mode: self.include_mode,
}
}
}
pub struct Printer<'a, Writer: Write> {
idx: u64,
/// the buffer containing all the bytes in a line for character printing
line_buf: Vec<u8>,
writer: &'a mut Writer,
show_char_panel: bool,
show_position_panel: bool,
show_color: bool,
curr_color: Option<&'static [u8]>,
color_scheme: ColorScheme,
border_style: BorderStyle,
byte_hex_panel: Vec<String>,
byte_char_panel: Vec<String>,
// same as previous but in Fixed(242) gray color, for position panel
byte_hex_panel_g: Vec<String>,
squeezer: Squeezer,
display_offset: u64,
/// The number of panels to draw.
panels: u64,
squeeze_byte: usize,
/// The number of octets per group.
group_size: u8,
/// The number of digits used to write the base.
base_digits: u8,
/// Whether to show groups in little or big endian format.
endianness: Endianness,
/// Whether to output in C include file style.
include_mode: IncludeMode,
}
impl<'a, Writer: Write> Printer<'a, Writer> {
pub fn display_offset(&mut self, display_offset: u64) -> &mut Self {
self.display_offset = display_offset;
self
}
fn panel_sz(&self) -> usize {
// add one to include the trailing space of a group
let group_sz = self.base_digits as usize * self.group_size as usize + 1;
let group_per_panel = 8 / self.group_size as usize;
// add one to include the leading space
1 + group_sz * group_per_panel
}
fn write_border(&mut self, border_elements: BorderElements) -> io::Result<()> {
let h = border_elements.horizontal_line;
let c = border_elements.column_separator;
let l = border_elements.left_corner;
let r = border_elements.right_corner;
let h8 = h.to_string().repeat(8);
let h_repeat = h.to_string().repeat(self.panel_sz());
if self.show_position_panel {
write!(self.writer, "{l}{h8}{c}")?;
} else {
write!(self.writer, "{l}")?;
}
for _ in 0..self.panels - 1 {
write!(self.writer, "{h_repeat}{c}")?;
}
if self.show_char_panel {
write!(self.writer, "{h_repeat}{c}")?;
} else {
write!(self.writer, "{h_repeat}")?;
}
if self.show_char_panel {
for _ in 0..self.panels - 1 {
write!(self.writer, "{h8}{c}")?;
}
writeln!(self.writer, "{h8}{r}")?;
} else {
writeln!(self.writer, "{r}")?;
}
Ok(())
}
pub fn print_header(&mut self) -> io::Result<()> {
if let Some(e) = self.border_style.header_elems() {
self.write_border(e)?
}
Ok(())
}
pub fn print_footer(&mut self) -> io::Result<()> {
if let Some(e) = self.border_style.footer_elems() {
self.write_border(e)?
}
Ok(())
}
fn print_position_panel(&mut self) -> io::Result<()> {
self.writer.write_all(
self.border_style
.outer_sep()
.encode_utf8(&mut [0; 4])
.as_bytes(),
)?;
if self.show_color {
self.writer.write_all(COLOR_OFFSET.as_bytes())?;
}
if self.show_position_panel {
match self.squeezer {
Squeezer::Print => {
self.writer.write_all(b"*")?;
if self.show_color {
self.writer.write_all(COLOR_RESET.as_bytes())?;
}
self.writer.write_all(b" ")?;
}
Squeezer::Ignore | Squeezer::Disabled | Squeezer::Delete => {
let byte_index: [u8; 8] = (self.idx + self.display_offset).to_be_bytes();
let mut i = 0;
while byte_index[i] == 0x0 && i < 4 {
i += 1;
}
for &byte in byte_index.iter().skip(i) {
self.writer
.write_all(self.byte_hex_panel_g[byte as usize].as_bytes())?;
}
if self.show_color {
self.writer.write_all(COLOR_RESET.as_bytes())?;
}
}
}
self.writer.write_all(
self.border_style
.outer_sep()
.encode_utf8(&mut [0; 4])
.as_bytes(),
)?;
}
Ok(())
}
fn print_char(&mut self, i: u64) -> io::Result<()> {
match self.squeezer {
Squeezer::Print | Squeezer::Delete => self.writer.write_all(b" ")?,
Squeezer::Ignore | Squeezer::Disabled => {
if let Some(&b) = self.line_buf.get(i as usize) {
if self.show_color && self.curr_color != Some(Byte(b).color(self.color_scheme))
{
self.writer.write_all(Byte(b).color(self.color_scheme))?;
self.curr_color = Some(Byte(b).color(self.color_scheme));
}
self.writer
.write_all(self.byte_char_panel[b as usize].as_bytes())?;
} else {
self.squeezer = Squeezer::Print;
}
}
}
if i == 8 * self.panels - 1 {
if self.show_color {
self.writer.write_all(COLOR_RESET.as_bytes())?;
self.curr_color = None;
}
self.writer.write_all(
self.border_style
.outer_sep()
.encode_utf8(&mut [0; 4])
.as_bytes(),
)?;
} else if i % 8 == 7 {
if self.show_color {
self.writer.write_all(COLOR_RESET.as_bytes())?;
self.curr_color = None;
}
self.writer.write_all(
self.border_style
.inner_sep()
.encode_utf8(&mut [0; 4])
.as_bytes(),
)?;
}
Ok(())
}
pub fn print_char_panel(&mut self) -> io::Result<()> {
for i in 0..self.line_buf.len() {
self.print_char(i as u64)?;
}
Ok(())
}
fn print_byte(&mut self, i: usize, b: u8) -> io::Result<()> {
match self.squeezer {
Squeezer::Print => {
if !self.show_position_panel && i == 0 {
if self.show_color {
self.writer.write_all(COLOR_OFFSET.as_bytes())?;
}
self.writer
.write_all(self.byte_char_panel[b'*' as usize].as_bytes())?;
if self.show_color {
self.writer.write_all(COLOR_RESET.as_bytes())?;
}
} else if i % (self.group_size as usize) == 0 {
self.writer.write_all(b" ")?;
}
for _ in 0..self.base_digits {
self.writer.write_all(b" ")?;
}
}
Squeezer::Delete => self.writer.write_all(b" ")?,
Squeezer::Ignore | Squeezer::Disabled => {
if i % (self.group_size as usize) == 0 {
self.writer.write_all(b" ")?;
}
if self.show_color && self.curr_color != Some(Byte(b).color(self.color_scheme)) {
self.writer.write_all(Byte(b).color(self.color_scheme))?;
self.curr_color = Some(Byte(b).color(self.color_scheme));
}
self.writer
.write_all(self.byte_hex_panel[b as usize].as_bytes())?;
}
}
// byte is last in panel
if i % 8 == 7 {
if self.show_color {
self.curr_color = None;
self.writer.write_all(COLOR_RESET.as_bytes())?;
}
self.writer.write_all(b" ")?;
// byte is last in last panel
if i as u64 % (8 * self.panels) == 8 * self.panels - 1 {
self.writer.write_all(
self.border_style
.outer_sep()
.encode_utf8(&mut [0; 4])
.as_bytes(),
)?;
} else {
self.writer.write_all(
self.border_style
.inner_sep()
.encode_utf8(&mut [0; 4])
.as_bytes(),
)?;
}
}
Ok(())
}
fn reorder_buffer_to_little_endian(&self, buf: &mut [u8]) {
let n = buf.len();
let group_sz = self.group_size as usize;
for idx in (0..n).step_by(group_sz) {
let remaining = n - idx;
let total = remaining.min(group_sz);
buf[idx..idx + total].reverse();
}
}
pub fn print_bytes(&mut self) -> io::Result<()> {
let mut buf = self.line_buf.clone();
if matches!(self.endianness, Endianness::Little) {
self.reorder_buffer_to_little_endian(&mut buf);
};
for (i, &b) in buf.iter().enumerate() {
self.print_byte(i, b)?;
}
Ok(())
}
/// Loop through the given `Reader`, printing until the `Reader` buffer
/// is exhausted.
pub fn print_all<Reader: Read>(&mut self, reader: Reader) -> io::Result<()> {
let mut is_empty = true;
let mut buf = BufReader::new(reader);
// special handler for include mode
match &self.include_mode {
// Input from a file
// Output like `unsigned char <filename>[] = { ... }; unsigned int <filename>_len = ...;`
IncludeMode::File(filename) => {
// convert non-alphanumeric characters to '_'
let var_name = filename
.chars()
.map(|c| if c.is_alphanumeric() { c } else { '_' })
.collect::<String>();
writeln!(self.writer, "unsigned char {}[] = {{", var_name)?;
let total_bytes = self.print_bytes_in_include_style(&mut buf)?;
writeln!(self.writer, "}};")?;
writeln!(
self.writer,
"unsigned int {}_len = {};",
var_name, total_bytes
)?;
return Ok(());
}
IncludeMode::Stdin | IncludeMode::Slice => {
self.print_bytes_in_include_style(&mut buf)?;
return Ok(());
}
IncludeMode::Off => {}
}
let leftover = loop {
// read a maximum of 8 * self.panels bytes from the reader
if let Ok(n) = buf.read(&mut self.line_buf) {
if n > 0 && n < 8 * self.panels as usize {
// if less are read, that indicates end of file after
if is_empty {
self.print_header()?;
is_empty = false;
}
let mut leftover = n;
// loop until input is ceased
if let Some(s) = loop {
if let Ok(n) = buf.read(&mut self.line_buf[leftover..]) {
leftover += n;
// there is no more input being read
if n == 0 {
self.line_buf.resize(leftover, 0);
break Some(leftover);
}
// amount read has exceeded line buffer
if leftover >= 8 * self.panels as usize {
break None;
}
}
} {
break Some(s);
};
} else if n == 0 {
// if no bytes are read, that indicates end of file
if self.squeezer == Squeezer::Delete {
// empty the last line when ending is squeezed
self.line_buf.clear();
break Some(0);
}
break None;
}
}
if is_empty {
self.print_header()?;
}
// squeeze is active, check if the line is the same
// skip print if still squeezed, otherwise print and deactivate squeeze
if matches!(self.squeezer, Squeezer::Print | Squeezer::Delete) {
if self
.line_buf
.chunks_exact(std::mem::size_of::<usize>())
.all(|w| usize::from_ne_bytes(w.try_into().unwrap()) == self.squeeze_byte)
{
if self.squeezer == Squeezer::Delete {
self.idx += 8 * self.panels;
continue;
}
} else {
self.squeezer = Squeezer::Ignore;
}
}
// print the line
self.print_position_panel()?;
self.print_bytes()?;
if self.show_char_panel {
self.print_char_panel()?;
}
self.writer.write_all(b"\n")?;
if is_empty {
self.writer.flush()?;
is_empty = false;
}
// increment index to next line
self.idx += 8 * self.panels;
// change from print to delete if squeeze is still active
if self.squeezer == Squeezer::Print {
self.squeezer = Squeezer::Delete;
}
// repeat the first byte in the line until it's a usize
// compare that usize with each usize chunk in the line
// if they are all the same, change squeezer to print
let repeat_byte = (self.line_buf[0] as usize) * (usize::MAX / 255);
if !matches!(self.squeezer, Squeezer::Disabled | Squeezer::Delete)
&& self
.line_buf
.chunks_exact(std::mem::size_of::<usize>())
.all(|w| usize::from_ne_bytes(w.try_into().unwrap()) == repeat_byte)
{
self.squeezer = Squeezer::Print;
self.squeeze_byte = repeat_byte;
};
};
// special ending
if is_empty {
self.base_digits = 2;
self.print_header()?;
if self.show_position_panel {
write!(self.writer, "{0:9}", "│")?;
}
write!(
self.writer,
"{0:2}{1:2$}{0}{0:>3$}",
"│",
"No content",
self.panel_sz() - 1,
self.panel_sz() + 1,
)?;
if self.show_char_panel {
write!(self.writer, "{0:>9}{0:>9}", "│")?;
}
writeln!(self.writer)?;
} else if let Some(n) = leftover {
// last line is incomplete
self.squeezer = Squeezer::Ignore;
self.print_position_panel()?;
self.print_bytes()?;
self.squeezer = Squeezer::Print;
for i in n..8 * self.panels as usize {
self.print_byte(i, 0)?;
}
if self.show_char_panel {
self.squeezer = Squeezer::Ignore;
self.print_char_panel()?;
self.squeezer = Squeezer::Print;
for i in n..8 * self.panels as usize {
self.print_char(i as u64)?;
}
}
self.writer.write_all(b"\n")?;
}
self.print_footer()?;
self.writer.flush()?;
Ok(())
}
/// Print the bytes in C include file style
/// Return the number of bytes read
fn print_bytes_in_include_style<Reader: Read>(
&mut self,
buf: &mut BufReader<Reader>,
) -> Result<usize, io::Error> {
let mut buffer = [0; 1024];
let mut total_bytes = 0;
let mut is_first_chunk = true;
let mut line_counter = 0;
loop {
match buf.read(&mut buffer) {
Ok(0) => break, // EOF
Ok(bytes_read) => {
total_bytes += bytes_read;
for &byte in &buffer[..bytes_read] {
if line_counter % 12 == 0 {
if !is_first_chunk || line_counter > 0 {
writeln!(self.writer, ",")?;
}
// indentation of first line
write!(self.writer, " ")?;
is_first_chunk = false;
} else {
write!(self.writer, ", ")?;
}
write!(self.writer, "0x{:02x}", byte)?;
line_counter += 1;
}
}
Err(e) => return Err(e),
}
}
writeln!(self.writer)?;
Ok(total_bytes)
}
}
#[cfg(test)]
mod tests {
use std::io;
use std::str;
use super::*;
fn assert_print_all_output<Reader: Read>(input: Reader, expected_string: String) {
let mut output = vec![];
let mut printer = PrinterBuilder::new(&mut output)
.show_color(false)
.show_char_panel(true)
.show_position_panel(true)
.with_border_style(BorderStyle::Unicode)
.enable_squeezing(true)
.num_panels(2)
.group_size(1)
.with_base(Base::Hexadecimal)
.endianness(Endianness::Big)
.character_table(CharacterTable::Default)
.include_mode(IncludeMode::Off)
.color_scheme(ColorScheme::Default)
.build();
printer.print_all(input).unwrap();
let actual_string: &str = str::from_utf8(&output).unwrap();
assert_eq!(actual_string, expected_string,)
}
#[test]
fn empty_file_passes() {
let input = io::empty();
let expected_string = "\
┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐
│ │ No content │ │ │ │
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘
"
.to_owned();
assert_print_all_output(input, expected_string);
}
#[test]
fn short_input_passes() {
let input = io::Cursor::new(b"spam");
let expected_string = "\
┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐
│00000000│ 73 70 61 6d ┊ │spam ┊ │
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘
"
.to_owned();
assert_print_all_output(input, expected_string);
}
#[test]
fn display_offset() {
let input = io::Cursor::new(b"spamspamspamspamspam");
let expected_string = "\
┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐
│deadbeef│ 73 70 61 6d 73 70 61 6d ┊ 73 70 61 6d 73 70 61 6d │spamspam┊spamspam│
│deadbeff│ 73 70 61 6d ┊ │spam ┊ │
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | true |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/src/tests.rs | src/tests.rs | use super::*;
#[test]
fn unit_multipliers() {
use Unit::*;
assert_eq!(Kilobyte.get_multiplier(), 1000 * Byte.get_multiplier());
assert_eq!(Megabyte.get_multiplier(), 1000 * Kilobyte.get_multiplier());
assert_eq!(Gigabyte.get_multiplier(), 1000 * Megabyte.get_multiplier());
assert_eq!(Terabyte.get_multiplier(), 1000 * Gigabyte.get_multiplier());
assert_eq!(Kibibyte.get_multiplier(), 1024 * Byte.get_multiplier());
assert_eq!(Mebibyte.get_multiplier(), 1024 * Kibibyte.get_multiplier());
assert_eq!(Gibibyte.get_multiplier(), 1024 * Mebibyte.get_multiplier());
assert_eq!(Tebibyte.get_multiplier(), 1024 * Gibibyte.get_multiplier());
}
#[test]
fn test_process_sign() {
use ByteOffsetKind::*;
use ByteOffsetParseError::*;
assert_eq!(process_sign_of("123"), Ok(("123", ForwardFromBeginning)));
assert_eq!(process_sign_of("+123"), Ok(("123", ForwardFromLastOffset)));
assert_eq!(process_sign_of("-123"), Ok(("123", BackwardFromEnd)));
assert_eq!(process_sign_of("-"), Err(EmptyAfterSign));
assert_eq!(process_sign_of("+"), Err(EmptyAfterSign));
assert_eq!(process_sign_of(""), Err(Empty));
}
#[test]
fn test_parse_as_hex() {
assert_eq!(try_parse_as_hex_number("73"), None);
assert_eq!(try_parse_as_hex_number("0x1337"), Some(Ok(0x1337)));
assert!(matches!(try_parse_as_hex_number("0xnope"), Some(Err(_))));
assert!(matches!(try_parse_as_hex_number("0x-1"), Some(Err(_))));
}
#[test]
fn extract_num_and_unit() {
use ByteOffsetParseError::*;
use Unit::*;
// byte is default unit
assert_eq!(extract_num_and_unit_from("4"), Ok((4, Byte)));
// blocks are returned without customization
assert_eq!(
extract_num_and_unit_from("2blocks"),
Ok((2, Block { custom_size: None }))
);
// no normalization is performed
assert_eq!(extract_num_and_unit_from("1024kb"), Ok((1024, Kilobyte)));
// unit without number results in error
assert_eq!(
extract_num_and_unit_from("gib"),
Err(EmptyWithUnit("gib".to_string()))
);
// empty string results in error
assert_eq!(extract_num_and_unit_from(""), Err(Empty));
// an invalid unit results in an error
assert_eq!(
extract_num_and_unit_from("25litres"),
Err(InvalidUnit("litres".to_string()))
);
}
#[test]
fn test_parse_byte_offset() {
use ByteOffsetParseError::*;
macro_rules! success {
($input: expr, $expected_kind: ident $expected_value: expr) => {
success!($input, $expected_kind $expected_value; block_size: DEFAULT_BLOCK_SIZE)
};
($input: expr, $expected_kind: ident $expected_value: expr; block_size: $block_size: expr) => {
assert_eq!(
parse_byte_offset($input, PositiveI64::new($block_size).unwrap()),
Ok(
ByteOffset {
value: NonNegativeI64::new($expected_value).unwrap(),
kind: ByteOffsetKind::$expected_kind,
}
),
);
};
}
macro_rules! error {
($input: expr, $expected_err: expr) => {
assert_eq!(
parse_byte_offset($input, PositiveI64::new(DEFAULT_BLOCK_SIZE).unwrap()),
Err($expected_err),
);
};
}
success!("0", ForwardFromBeginning 0);
success!("1", ForwardFromBeginning 1);
success!("1", ForwardFromBeginning 1);
success!("100", ForwardFromBeginning 100);
success!("+100", ForwardFromLastOffset 100);
success!("0x0", ForwardFromBeginning 0);
success!("0xf", ForwardFromBeginning 15);
success!("0xdeadbeef", ForwardFromBeginning 3_735_928_559);
success!("1KB", ForwardFromBeginning 1000);
success!("2MB", ForwardFromBeginning 2000000);
success!("3GB", ForwardFromBeginning 3000000000);
success!("4TB", ForwardFromBeginning 4000000000000);
success!("+4TB", ForwardFromLastOffset 4000000000000);
success!("1GiB", ForwardFromBeginning 1073741824);
success!("2TiB", ForwardFromBeginning 2199023255552);
success!("+2TiB", ForwardFromLastOffset 2199023255552);
success!("0xff", ForwardFromBeginning 255);
success!("0xEE", ForwardFromBeginning 238);
success!("+0xFF", ForwardFromLastOffset 255);
success!("1block", ForwardFromBeginning 512; block_size: 512);
success!("2block", ForwardFromBeginning 1024; block_size: 512);
success!("1block", ForwardFromBeginning 4; block_size: 4);
success!("2block", ForwardFromBeginning 8; block_size: 4);
// empty string is invalid
error!("", Empty);
// These are also bad.
error!("+", EmptyAfterSign);
error!("-", EmptyAfterSign);
error!("K", InvalidNumAndUnit("K".to_owned()));
error!("k", InvalidNumAndUnit("k".to_owned()));
error!("m", InvalidNumAndUnit("m".to_owned()));
error!("block", EmptyWithUnit("block".to_owned()));
// leading/trailing space is invalid
error!(" 0", InvalidNumAndUnit(" 0".to_owned()));
error!("0 ", InvalidUnit(" ".to_owned()));
// Signs after the hex prefix make no sense
error!("0x-12", SignFoundAfterHexPrefix('-'));
// This was previously accepted but shouldn't be.
error!("0x+12", SignFoundAfterHexPrefix('+'));
// invalid suffix
error!("1234asdf", InvalidUnit("asdf".to_owned()));
// bad numbers
error!("asdf1234", InvalidNumAndUnit("asdf1234".to_owned()));
error!("a1s2d3f4", InvalidNumAndUnit("a1s2d3f4".to_owned()));
// multiplication overflows u64
error!("20000000TiB", UnitMultiplicationOverflow);
assert!(
match parse_byte_offset("99999999999999999999", PositiveI64::new(512).unwrap()) {
// We can't check against the kind of the `ParseIntError`, so we'll just make sure it's the
// same as trying to do the parse directly.
Err(ParseNum(e)) => e == "99999999999999999999".parse::<i64>().unwrap_err(),
_ => false,
}
);
}
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | false |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/src/main.rs | src/main.rs | use std::fs::File;
use std::io::{self, prelude::*, BufWriter, SeekFrom};
use std::num::{NonZeroI64, NonZeroU64};
use std::path::PathBuf;
use clap::builder::ArgPredicate;
use clap::{ArgAction, CommandFactory, Parser, ValueEnum};
use clap_complete::aot::{generate, Shell};
use anyhow::{anyhow, bail, Context, Result};
use const_format::formatcp;
use thiserror::Error as ThisError;
use terminal_size::terminal_size;
use hexyl::{
Base, BorderStyle, CharacterTable, ColorScheme, Endianness, IncludeMode, Input, PrinterBuilder,
};
use hexyl::{
COLOR_ASCII_OTHER, COLOR_ASCII_PRINTABLE, COLOR_ASCII_WHITESPACE, COLOR_NONASCII, COLOR_NULL,
COLOR_RESET,
};
#[cfg(test)]
mod tests;
const DEFAULT_BLOCK_SIZE: i64 = 512;
const LENGTH_HELP_TEXT: &str = "Only read N bytes from the input. The N argument can also include \
a unit with a decimal prefix (kB, MB, ..) or binary prefix (kiB, \
MiB, ..), or can be specified using a hex number. The short \
option '-l' can be used as an alias.
Examples: --length=64, --length=4KiB, --length=0xff";
const SKIP_HELP_TEXT: &str = "Skip the first N bytes of the input. The N argument can also \
include a unit (see `--length` for details).
A negative value is valid and will seek from the end of the file.";
const BLOCK_SIZE_HELP_TEXT: &str = "Sets the size of the `block` unit to SIZE.
Examples: --block-size=1024, --block-size=4kB";
const DISPLAY_OFFSET_HELP_TEXT: &str = "Add N bytes to the displayed file position. The N \
argument can also include a unit (see `--length` for \
details).
A negative value is valid and calculates an offset relative to the end of the file.";
const TERMINAL_WIDTH_HELP_TEXT: &str = "Sets the number of terminal columns to be displayed.
Since the terminal width may not be an evenly divisible by the width per hex data column, this \
will use the greatest number of hex data panels that can \
fit in the requested width but still leave some space to \
the right.
Cannot be used with other width-setting options.";
#[derive(Debug, Parser)]
#[command(version, about, max_term_width(90))]
struct Opt {
/// The file to display. If no FILE argument is given, read from STDIN.
#[arg(value_name("FILE"))]
file: Option<PathBuf>,
#[arg(
help(LENGTH_HELP_TEXT),
short('n'),
long,
visible_short_alias('c'),
visible_alias("bytes"),
short_alias('l'),
value_name("N")
)]
length: Option<String>,
#[arg(help(SKIP_HELP_TEXT), short, long, value_name("N"))]
skip: Option<String>,
#[arg(
help(BLOCK_SIZE_HELP_TEXT),
long,
default_value(formatcp!("{DEFAULT_BLOCK_SIZE}")),
value_name("SIZE")
)]
block_size: String,
/// Displays all input data. Otherwise any number of groups of output lines
/// which would be identical to the preceding group of lines, are replaced
/// with a line comprised of a single asterisk.
#[arg(short('v'), long)]
no_squeezing: bool,
/// When to use colors.
#[arg(
long,
value_enum,
default_value_t,
value_name("WHEN"),
default_value_if("plain", ArgPredicate::IsPresent, Some("never"))
)]
color: ColorWhen,
/// Whether to draw a border.
#[arg(
long,
value_enum,
default_value_t,
value_name("STYLE"),
default_value_if("plain", ArgPredicate::IsPresent, Some("none"))
)]
border: BorderStyle,
/// Display output with --no-characters, --no-position, --border=none, and
/// --color=never.
#[arg(short, long)]
plain: bool,
/// Do not show the character panel on the right.
#[arg(long)]
no_characters: bool,
/// Show the character panel on the right. This is the default, unless
/// --no-characters has been specified.
#[arg(
short('C'),
long,
action(ArgAction::SetTrue),
overrides_with("no_characters")
)]
characters: (),
/// Defines how bytes are mapped to characters.
#[arg(long, value_enum, default_value_t, value_name("FORMAT"))]
character_table: CharacterTable,
/// Defines the color scheme for the characters.
#[arg(long, value_enum, default_value_t, value_name("FORMAT"))]
color_scheme: ColorScheme,
/// Whether to display the position panel on the left.
#[arg(short('P'), long)]
no_position: bool,
#[arg(
help(DISPLAY_OFFSET_HELP_TEXT),
short('o'),
long,
default_value("0"),
value_name("N")
)]
display_offset: String,
/// Sets the number of hex data panels to be displayed. `--panels=auto` will
/// display the maximum number of hex data panels based on the current
/// terminal width. By default, hexyl will show two panels, unless the
/// terminal is not wide enough for that.
#[arg(long, value_name("N"))]
panels: Option<String>,
/// Number of bytes/octets that should be grouped together. You can use the
/// '--endianness' option to control the ordering of the bytes within a
/// group. '--groupsize' can be used as an alias (xxd-compatibility).
#[arg(
short('g'),
long,
value_enum,
default_value_t,
alias("groupsize"),
value_name("N")
)]
group_size: GroupSize,
/// Whether to print out groups in little-endian or big-endian format. This
/// option only has an effect if the '--group-size' is larger than 1. '-e'
/// can be used as an alias for '--endianness=little'.
#[arg(long, value_enum, default_value_t, value_name("FORMAT"))]
endianness: Endianness,
/// An alias for '--endianness=little'.
#[arg(short('e'), hide(true), overrides_with("endianness"))]
little_endian_format: bool,
/// Sets the base used for the bytes. The possible options are binary,
/// octal, decimal, and hexadecimal.
#[arg(short('b'), long, default_value("hexadecimal"), value_name("B"))]
base: String,
#[arg(
help(TERMINAL_WIDTH_HELP_TEXT),
long,
value_name("N"),
conflicts_with("panels")
)]
terminal_width: Option<NonZeroU64>,
/// Print a table showing how different types of bytes are colored.
#[arg(long)]
print_color_table: bool,
/// Output in C include file style (similar to xxd -i).
#[arg(
short('i'),
long("include"),
help = "Output in C include file style",
conflicts_with("little_endian_format"),
conflicts_with("endianness")
)]
include_mode: bool,
/// Show shell completion for a certain shell
#[arg(long, value_name("SHELL"))]
completion: Option<Shell>,
}
#[derive(Clone, Debug, Default, ValueEnum)]
enum ColorWhen {
/// Always use colorized output.
#[default]
Always,
/// Only displays colors if the output goes to an interactive terminal.
Auto,
/// Do not use colorized output.
Never,
/// Override the NO_COLOR environment variable.
Force,
}
#[derive(Clone, Debug, Default, ValueEnum)]
enum GroupSize {
/// Grouped together every byte/octet.
#[default]
#[value(name = "1")]
One,
/// Grouped together every 2 bytes/octets.
#[value(name = "2")]
Two,
/// Grouped together every 4 bytes/octets.
#[value(name = "4")]
Four,
/// Grouped together every 8 bytes/octets.
#[value(name = "8")]
Eight,
}
impl From<GroupSize> for u8 {
fn from(number: GroupSize) -> Self {
match number {
GroupSize::One => 1,
GroupSize::Two => 2,
GroupSize::Four => 4,
GroupSize::Eight => 8,
}
}
}
fn run() -> Result<()> {
let opt = Opt::parse();
if opt.print_color_table {
return print_color_table().map_err(|e| anyhow!(e));
}
if let Some(sh) = opt.completion {
let mut cmd = Opt::command();
let name = cmd.get_name().to_string();
generate(sh, &mut cmd, name, &mut io::stdout());
return Ok(());
}
let stdin = io::stdin();
let mut reader = match &opt.file {
Some(filename) => {
if filename.is_dir() {
bail!("'{}' is a directory.", filename.to_string_lossy());
}
let file = File::open(filename)?;
Input::File(file)
}
None => Input::Stdin(stdin.lock()),
};
if let Some(hex_number) = try_parse_as_hex_number(&opt.block_size) {
return hex_number
.map_err(|e| anyhow!(e))
.and_then(|x| {
PositiveI64::new(x).ok_or_else(|| anyhow!("block size argument must be positive"))
})
.map(|_| ());
}
let (num, unit) = extract_num_and_unit_from(&opt.block_size)?;
if let Unit::Block { custom_size: _ } = unit {
return Err(anyhow!(
"can not use 'block(s)' as a unit to specify block size"
));
};
let block_size = num
.checked_mul(unit.get_multiplier())
.ok_or_else(|| anyhow!(ByteOffsetParseError::UnitMultiplicationOverflow))
.and_then(|x| {
PositiveI64::new(x).ok_or_else(|| anyhow!("block size argument must be positive"))
})?;
let skip_arg = opt
.skip
.as_ref()
.map(|s| {
parse_byte_offset(s, block_size).context(anyhow!(
"failed to parse `--skip` arg {:?} as byte count",
s
))
})
.transpose()?;
let skip_offset = if let Some(ByteOffset { kind, value }) = skip_arg {
let value = value.into_inner();
reader
.seek(match kind {
ByteOffsetKind::ForwardFromBeginning | ByteOffsetKind::ForwardFromLastOffset => {
SeekFrom::Current(value)
}
ByteOffsetKind::BackwardFromEnd => SeekFrom::End(value.checked_neg().unwrap()),
})
.map_err(|_| {
anyhow!(
"Failed to jump to the desired input position. \
This could be caused by a negative offset that is too large or by \
an input that is not seek-able (e.g. if the input comes from a pipe)."
)
})?
} else {
0
};
let parse_byte_count = |s| -> Result<u64> {
Ok(parse_byte_offset(s, block_size)?
.assume_forward_offset_from_start()?
.into())
};
let mut reader = if let Some(ref length) = opt.length {
let length = parse_byte_count(length).context(anyhow!(
"failed to parse `--length` arg {:?} as byte count",
length
))?;
Box::new(reader.take(length))
} else {
reader.into_inner()
};
let no_color = std::env::var_os("NO_COLOR").is_some();
let show_color = match opt.color {
ColorWhen::Never => false,
ColorWhen::Always => !no_color,
ColorWhen::Force => true,
ColorWhen::Auto => {
if no_color {
false
} else {
supports_color::on(supports_color::Stream::Stdout)
.map(|level| level.has_basic)
.unwrap_or(false)
}
}
};
let border_style = opt.border;
let &squeeze = &!opt.no_squeezing;
let show_char_panel = !opt.no_characters && !opt.plain;
let show_position_panel = !opt.no_position && !opt.plain;
let display_offset: u64 = parse_byte_count(&opt.display_offset).context(anyhow!(
"failed to parse `--display-offset` arg {:?} as byte count",
opt.display_offset
))?;
let max_panels_fn = |terminal_width: u64, base_digits: u64, group_size: u64| {
let offset = if show_position_panel { 10 } else { 1 };
let col_width = if show_char_panel {
((8 / group_size) * (base_digits * group_size + 1)) + 2 + 8
} else {
((8 / group_size) * (base_digits * group_size + 1)) + 2
};
if (terminal_width.saturating_sub(offset)) / col_width < 1 {
1
} else {
(terminal_width - offset) / col_width
}
};
let base = if let Ok(base_num) = opt.base.parse::<u8>() {
match base_num {
2 => Ok(Base::Binary),
8 => Ok(Base::Octal),
10 => Ok(Base::Decimal),
16 => Ok(Base::Hexadecimal),
_ => Err(anyhow!(
"The number provided is not a valid base. Valid bases are 2, 8, 10, and 16."
)),
}
} else {
match opt.base.as_str() {
"b" | "bin" | "binary" => Ok(Base::Binary),
"o" | "oct" | "octal" => Ok(Base::Octal),
"d" | "dec" | "decimal" => Ok(Base::Decimal),
"x" | "hex" | "hexadecimal" => Ok(Base::Hexadecimal),
_ => Err(anyhow!(
"The base provided is not valid. Valid bases are \"b\", \"o\", \"d\", and \"x\"."
)),
}
}?;
let base_digits = match base {
Base::Binary => 8,
Base::Octal => 3,
Base::Decimal => 3,
Base::Hexadecimal => 2,
};
let group_size = u8::from(opt.group_size);
let terminal_width = terminal_size().map(|s| s.0 .0 as u64).unwrap_or(80);
let panels = if opt.panels.as_deref() == Some("auto") {
max_panels_fn(terminal_width, base_digits, group_size.into())
} else if let Some(panels) = opt.panels {
panels
.parse::<NonZeroU64>()
.map(u64::from)
.context(anyhow!(
"failed to parse `--panels` arg {:?} as unsigned nonzero integer",
panels
))?
} else if let Some(terminal_width) = opt.terminal_width {
max_panels_fn(terminal_width.into(), base_digits, group_size.into())
} else {
std::cmp::min(
2,
max_panels_fn(terminal_width, base_digits, group_size.into()),
)
};
let endianness = if opt.little_endian_format {
Endianness::Little
} else {
opt.endianness
};
let character_table = opt.character_table;
let color_scheme = opt.color_scheme;
let mut stdout = BufWriter::new(io::stdout().lock());
let include_mode = match opt.include_mode {
// include mode on
true => {
if opt.file.is_some() {
// input from a file
IncludeMode::File(
opt.file
.as_ref()
.unwrap()
.file_name()
.and_then(|n| n.to_str())
.unwrap_or("file")
.to_string(),
)
} else {
// input from stdin
IncludeMode::Stdin
}
}
// include mode off
false => IncludeMode::Off,
};
let mut printer = PrinterBuilder::new(&mut stdout)
.show_color(show_color)
.show_char_panel(show_char_panel)
.show_position_panel(show_position_panel)
.with_border_style(border_style)
.enable_squeezing(squeeze)
.num_panels(panels)
.group_size(group_size)
.with_base(base)
.endianness(endianness)
.character_table(character_table)
.include_mode(include_mode)
.color_scheme(color_scheme)
.build();
printer.display_offset(skip_offset + display_offset);
printer.print_all(&mut reader).map_err(|e| anyhow!(e))?;
Ok(())
}
fn main() {
let result = run();
if let Err(err) = result {
if let Some(io_error) = err.downcast_ref::<io::Error>() {
if io_error.kind() == ::std::io::ErrorKind::BrokenPipe {
std::process::exit(0);
}
}
eprintln!("Error: {err:?}");
std::process::exit(1);
}
}
#[derive(Clone, Copy, Debug, Default, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct NonNegativeI64(i64);
impl NonNegativeI64 {
pub fn new(x: i64) -> Option<Self> {
if x.is_negative() {
None
} else {
Some(Self(x))
}
}
pub fn into_inner(self) -> i64 {
self.0
}
}
impl From<NonNegativeI64> for u64 {
fn from(x: NonNegativeI64) -> u64 {
u64::try_from(x.0)
.expect("invariant broken: NonNegativeI64 should contain a non-negative i64 value")
}
}
fn print_color_table() -> io::Result<()> {
let mut stdout = BufWriter::new(io::stdout().lock());
writeln!(stdout, "hexyl color reference:\n")?;
// NULL bytes
stdout.write_all(COLOR_NULL.as_bytes())?;
writeln!(stdout, "⋄ NULL bytes (0x00)")?;
stdout.write_all(COLOR_RESET.as_bytes())?;
// ASCII printable
stdout.write_all(COLOR_ASCII_PRINTABLE.as_bytes())?;
writeln!(stdout, "a ASCII printable characters (0x20 - 0x7E)")?;
stdout.write_all(COLOR_RESET.as_bytes())?;
// ASCII whitespace
stdout.write_all(COLOR_ASCII_WHITESPACE.as_bytes())?;
writeln!(stdout, "_ ASCII whitespace (0x09 - 0x0D, 0x20)")?;
stdout.write_all(COLOR_RESET.as_bytes())?;
// ASCII other
stdout.write_all(COLOR_ASCII_OTHER.as_bytes())?;
writeln!(
stdout,
"• ASCII control characters (except NULL and whitespace)"
)?;
stdout.write_all(COLOR_RESET.as_bytes())?;
// Non-ASCII
stdout.write_all(COLOR_NONASCII.as_bytes())?;
writeln!(stdout, "× Non-ASCII bytes (0x80 - 0xFF)")?;
stdout.write_all(COLOR_RESET.as_bytes())?;
Ok(())
}
#[derive(Clone, Copy, Debug, Default, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct PositiveI64(i64);
impl PositiveI64 {
pub fn new(x: i64) -> Option<Self> {
if x < 1 {
None
} else {
Some(Self(x))
}
}
pub fn into_inner(self) -> i64 {
self.0
}
}
impl From<PositiveI64> for u64 {
fn from(x: PositiveI64) -> u64 {
u64::try_from(x.0)
.expect("invariant broken: PositiveI64 should contain a positive i64 value")
}
}
#[derive(Debug, PartialEq)]
enum Unit {
Byte,
Kilobyte,
Megabyte,
Gigabyte,
Terabyte,
Kibibyte,
Mebibyte,
Gibibyte,
Tebibyte,
/// a customizable amount of bytes
Block {
custom_size: Option<NonZeroI64>,
},
}
impl Unit {
const fn get_multiplier(self) -> i64 {
match self {
Self::Byte => 1,
Self::Kilobyte => 1000,
Self::Megabyte => 1_000_000,
Self::Gigabyte => 1_000_000_000,
Self::Terabyte => 1_000_000_000_000,
Self::Kibibyte => 1 << 10,
Self::Mebibyte => 1 << 20,
Self::Gibibyte => 1 << 30,
Self::Tebibyte => 1 << 40,
Self::Block {
custom_size: Some(size),
} => size.get(),
Self::Block { custom_size: None } => DEFAULT_BLOCK_SIZE,
}
}
}
const HEX_PREFIX: &str = "0x";
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
enum ByteOffsetKind {
ForwardFromBeginning,
ForwardFromLastOffset,
BackwardFromEnd,
}
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
struct ByteOffset {
value: NonNegativeI64,
kind: ByteOffsetKind,
}
#[derive(Clone, Debug, ThisError)]
#[error(
"negative offset specified, but only positive offsets (counts) are accepted in this context"
)]
struct NegativeOffsetSpecifiedError;
impl ByteOffset {
fn assume_forward_offset_from_start(
&self,
) -> Result<NonNegativeI64, NegativeOffsetSpecifiedError> {
let &Self { value, kind } = self;
match kind {
ByteOffsetKind::ForwardFromBeginning | ByteOffsetKind::ForwardFromLastOffset => {
Ok(value)
}
ByteOffsetKind::BackwardFromEnd => Err(NegativeOffsetSpecifiedError),
}
}
}
#[derive(Clone, Debug, Eq, PartialEq, ThisError)]
enum ByteOffsetParseError {
#[error("no character data found, did you forget to write it?")]
Empty,
#[error("no digits found after sign, did you forget to write them?")]
EmptyAfterSign,
#[error(
"found {0:?} sign after hex prefix ({:?}); signs should go before it",
HEX_PREFIX
)]
SignFoundAfterHexPrefix(char),
#[error("{0:?} is not of the expected form <pos-integer>[<unit>]")]
InvalidNumAndUnit(String),
#[error("{0:?} is a valid unit, but an integer should come before it")]
EmptyWithUnit(String),
#[error("invalid unit {0:?}")]
InvalidUnit(String),
#[error("failed to parse integer part")]
ParseNum(#[source] std::num::ParseIntError),
#[error("count multiplied by the unit overflowed a signed 64-bit integer; are you sure it should be that big?")]
UnitMultiplicationOverflow,
}
fn parse_byte_offset(n: &str, block_size: PositiveI64) -> Result<ByteOffset, ByteOffsetParseError> {
use ByteOffsetParseError::*;
let (n, kind) = process_sign_of(n)?;
let into_byte_offset = |value| {
Ok(ByteOffset {
value: NonNegativeI64::new(value).unwrap(),
kind,
})
};
if let Some(hex_number) = try_parse_as_hex_number(n) {
return hex_number.map(into_byte_offset)?;
}
let (num, mut unit) = extract_num_and_unit_from(n)?;
if let Unit::Block { custom_size: None } = unit {
unit = Unit::Block {
custom_size: Some(
NonZeroI64::new(block_size.into_inner()).expect("PositiveI64 was zero"),
),
};
}
num.checked_mul(unit.get_multiplier())
.ok_or(UnitMultiplicationOverflow)
.and_then(into_byte_offset)
}
/// Takes a string containing a base-10 number and an optional unit, and returns them with their proper types.
/// The unit must directly follow the number (e.g. no whitespace is allowed between them).
/// When no unit is given, [Unit::Byte] is assumed.
/// When the unit is [Unit::Block], it is returned without custom size.
/// No normalization is performed, that is "1024" is extracted to (1024, Byte), not (1, Kibibyte).
fn extract_num_and_unit_from(n: &str) -> Result<(i64, Unit), ByteOffsetParseError> {
use ByteOffsetParseError::*;
if n.is_empty() {
return Err(Empty);
}
match n.chars().position(|c| !c.is_ascii_digit()) {
Some(unit_begin_idx) => {
let (n, raw_unit) = n.split_at(unit_begin_idx);
let unit = match raw_unit.to_lowercase().as_str() {
"" => Unit::Byte, // no "b" => Byte to allow hex nums with units
"kb" => Unit::Kilobyte,
"mb" => Unit::Megabyte,
"gb" => Unit::Gigabyte,
"tb" => Unit::Terabyte,
"kib" => Unit::Kibibyte,
"mib" => Unit::Mebibyte,
"gib" => Unit::Gibibyte,
"tib" => Unit::Tebibyte,
"block" | "blocks" => Unit::Block { custom_size: None },
_ => {
return if n.is_empty() {
Err(InvalidNumAndUnit(raw_unit.to_string()))
} else {
Err(InvalidUnit(raw_unit.to_string()))
}
}
};
let num = n.parse::<i64>().map_err(|e| {
if n.is_empty() {
EmptyWithUnit(raw_unit.to_owned())
} else {
ParseNum(e)
}
})?;
Ok((num, unit))
}
None => {
// no unit part
let num = n.parse::<i64>().map_err(ParseNum)?;
Ok((num, Unit::Byte))
}
}
}
/// Extracts a [ByteOffsetKind] based on the sign at the beginning of the given string.
/// Returns the input string without the sign (or an equal string if there wasn't any sign).
fn process_sign_of(n: &str) -> Result<(&str, ByteOffsetKind), ByteOffsetParseError> {
use ByteOffsetParseError::*;
let mut chars = n.chars();
let next_char = chars.next();
let check_empty_after_sign = || {
if chars.clone().next().is_none() {
Err(EmptyAfterSign)
} else {
Ok(chars.as_str())
}
};
match next_char {
Some('+') => Ok((
check_empty_after_sign()?,
ByteOffsetKind::ForwardFromLastOffset,
)),
Some('-') => Ok((check_empty_after_sign()?, ByteOffsetKind::BackwardFromEnd)),
None => Err(Empty),
_ => Ok((n, ByteOffsetKind::ForwardFromBeginning)),
}
}
/// If `n` starts with a hex prefix, its remaining part is returned as some number (if possible),
/// otherwise None is returned.
fn try_parse_as_hex_number(n: &str) -> Option<Result<i64, ByteOffsetParseError>> {
use ByteOffsetParseError::*;
n.strip_prefix(HEX_PREFIX).map(|num| {
let mut chars = num.chars();
match chars.next() {
Some(c @ '+') | Some(c @ '-') => {
return if chars.next().is_none() {
Err(EmptyAfterSign)
} else {
Err(SignFoundAfterHexPrefix(c))
}
}
_ => (),
}
i64::from_str_radix(num, 16).map_err(ParseNum)
})
}
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | false |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/src/colors.rs | src/colors.rs | use owo_colors::{colors, AnsiColors, Color, DynColors, OwoColorize};
use std::str::FromStr;
use std::sync::LazyLock;
pub static COLOR_NULL: LazyLock<String> =
LazyLock::new(|| init_color("NULL", AnsiColors::BrightBlack));
pub static COLOR_OFFSET: LazyLock<String> =
LazyLock::new(|| init_color("OFFSET", AnsiColors::BrightBlack));
pub static COLOR_ASCII_PRINTABLE: LazyLock<String> =
LazyLock::new(|| init_color("ASCII_PRINTABLE", AnsiColors::Cyan));
pub static COLOR_ASCII_WHITESPACE: LazyLock<String> =
LazyLock::new(|| init_color("ASCII_WHITESPACE", AnsiColors::Green));
pub static COLOR_ASCII_OTHER: LazyLock<String> =
LazyLock::new(|| init_color("ASCII_OTHER", AnsiColors::Green));
pub static COLOR_NONASCII: LazyLock<String> =
LazyLock::new(|| init_color("NONASCII", AnsiColors::Yellow));
pub const COLOR_RESET: &str = colors::Default::ANSI_FG;
fn init_color(name: &str, default_ansi: AnsiColors) -> String {
let default = DynColors::Ansi(default_ansi);
let env_var = format!("HEXYL_COLOR_{name}");
let color = match std::env::var(env_var).as_deref() {
Ok(color) => match DynColors::from_str(color) {
Ok(color) => color,
_ => default,
},
_ => default,
};
// owo_colors' API isn't designed to get the terminal codes directly for
// dynamic colors, so we use this hack to get them from the LHS of some text.
format!("{}", "|".color(color))
.split_once("|")
.unwrap()
.0
.to_owned()
}
pub const COLOR_NULL_RGB: &[u8] = &rgb_bytes(100, 100, 100);
pub const COLOR_DEL: &[u8] = &rgb_bytes(64, 128, 0);
pub const COLOR_GRADIENT_NONASCII: [[u8; 19]; 128] =
generate_color_gradient(&[(255, 0, 0, 0.0), (255, 255, 0, 0.66), (255, 255, 255, 1.0)]);
pub const COLOR_GRADIENT_ASCII_NONPRINTABLE: [[u8; 19]; 31] =
generate_color_gradient(&[(255, 0, 255, 0.0), (128, 0, 255, 1.0)]);
pub const COLOR_GRADIENT_ASCII_PRINTABLE: [[u8; 19]; 95] =
generate_color_gradient(&[(0, 128, 255, 0.0), (0, 255, 128, 1.0)]);
const fn as_dec(byte: u8) -> [u8; 3] {
[
b'0' + (byte / 100),
b'0' + ((byte % 100) / 10),
b'0' + (byte % 10),
]
}
const fn rgb_bytes(r: u8, g: u8, b: u8) -> [u8; 19] {
let mut buf = *b"\x1b[38;2;rrr;ggg;bbbm";
// r 7
buf[7] = as_dec(r)[0];
buf[8] = as_dec(r)[1];
buf[9] = as_dec(r)[2];
// g 11
buf[11] = as_dec(g)[0];
buf[12] = as_dec(g)[1];
buf[13] = as_dec(g)[2];
// b 15
buf[15] = as_dec(b)[0];
buf[16] = as_dec(b)[1];
buf[17] = as_dec(b)[2];
buf
}
const fn generate_color_gradient<const N: usize>(stops: &[(u8, u8, u8, f64)]) -> [[u8; 19]; N] {
let mut out = [rgb_bytes(0, 0, 0); N];
assert!(stops.len() >= 2, "need at least two stops for the gradient");
let mut byte = 0;
while byte < N {
let relative_byte = byte as f64 / N as f64;
let mut i = 1;
while i < stops.len() && stops[i].3 < relative_byte {
i += 1;
}
if i >= stops.len() {
i = stops.len() - 1;
}
let prev_stop = stops[i - 1];
let stop = stops[i];
let diff = stop.3 - prev_stop.3;
let t = (relative_byte - prev_stop.3) / diff;
let r = (prev_stop.0 as f64 + (t * (stop.0 as f64 - prev_stop.0 as f64))) as u8;
let g = (prev_stop.1 as f64 + (t * (stop.1 as f64 - prev_stop.1 as f64))) as u8;
let b = (prev_stop.2 as f64 + (t * (stop.2 as f64 - prev_stop.2 as f64))) as u8;
out[byte] = rgb_bytes(r, g, b);
byte += 1;
}
out
}
#[rustfmt::skip]
pub const CP437: [char; 256] = [
// Copyright (c) 2016, Delan Azabani <delan@azabani.com>
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
// modified to use the ⋄ character instead of ␀
// use https://en.wikipedia.org/w/index.php?title=Code_page_437&oldid=978947122
// not ftp://ftp.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/PC/CP437.TXT
// because we want the graphic versions of 01h–1Fh + 7Fh
'⋄','☺','☻','♥','♦','♣','♠','•','◘','○','◙','♂','♀','♪','♫','☼',
'►','◄','↕','‼','¶','§','▬','↨','↑','↓','→','←','∟','↔','▲','▼',
' ','!','"','#','$','%','&','\'','(',')','*','+',',','-','.','/',
'0','1','2','3','4','5','6','7','8','9',':',';','<','=','>','?',
'@','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O',
'P','Q','R','S','T','U','V','W','X','Y','Z','[','\\',']','^','_',
'`','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o',
'p','q','r','s','t','u','v','w','x','y','z','{','|','}','~','⌂',
'Ç','ü','é','â','ä','à','å','ç','ê','ë','è','ï','î','ì','Ä','Å',
'É','æ','Æ','ô','ö','ò','û','ù','ÿ','Ö','Ü','¢','£','¥','₧','ƒ',
'á','í','ó','ú','ñ','Ñ','ª','º','¿','⌐','¬','½','¼','¡','«','»',
'░','▒','▓','│','┤','╡','╢','╖','╕','╣','║','╗','╝','╜','╛','┐',
'└','┴','┬','├','─','┼','╞','╟','╚','╔','╩','╦','╠','═','╬','╧',
'╨','╤','╥','╙','╘','╒','╓','╫','╪','┘','┌','█','▄','▌','▐','▀',
'α','ß','Γ','π','Σ','σ','µ','τ','Φ','Θ','Ω','δ','∞','φ','ε','∩',
'≡','±','≥','≤','⌠','⌡','÷','≈','°','∙','·','√','ⁿ','²','■','ff',
];
#[rustfmt::skip]
pub const CP1047: [char; 256] = [
//
// Copyright (c) 2016,2024 IBM Corporation and other Contributors.
//
// All rights reserved. This program and the accompanying materials
// are made available under the terms of the Eclipse Public License v1.0
// which accompanies this distribution, and is available at
// http://www.eclipse.org/legal/epl-v10.html
//
// Contributors:
// Mark Taylor - Initial Contribution
//
// ref1 https://github.com/ibm-messaging/mq-smf-csv/blob/master/src/smfConv.c
// ref2 https://web.archive.org/web/20150607033635/http://www-01.ibm.com/software/globalization/cp/cp01047.html
'.','.','.','.','.','.','.','.','.','.','.','.','.','.','.','.',
'.','.','.','.','.','.','.','.','.','.','.','.','.','.','.','.',
'.','.','.','.','.','.','.','.','.','.','.','.','.','.','.','.',
'.','.','.','.','.','.','.','.','.','.','.','.','.','.','.','.',
' ','.','.','.','.','.','.','.','.','.','$','.','<','(','+','|',
'&','.','.','.','.','.','.','.','.','.','!','$','*',')',';','.',
'-','/','.','.','.','.','.','.','.','.','.',',','%','_','>','?',
'.','.','.','.','.','.','.','.','.','.',':','#','@','\'','=','.',
'.','a','b','c','d','e','f','g','h','i','.','{','.','(','+','.',
'.','j','k','l','m','n','o','p','q','r','.','}','.',')','.','.',
'.','~','s','t','u','v','w','x','y','z','.','.','.','.','.','.',
'.','.','.','.','.','.','.','.','.','.','[',']','.','.','.','-',
'{','A','B','C','D','E','F','G','H','I','.','.','.','.','.','.',
'}','J','K','L','M','N','O','P','Q','R','.','.','.','.','.','.',
'.','.','S','T','U','V','W','X','Y','Z','.','.','.','.','.','.',
'0','1','2','3','4','5','6','7','8','9','.','.','.','.','.','.'
];
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | false |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/src/input.rs | src/input.rs | use std::fs;
use std::io::{self, copy, sink, Read, Seek, SeekFrom};
pub enum Input<'a> {
File(fs::File),
Stdin(io::StdinLock<'a>),
}
impl Read for Input<'_> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match *self {
Input::File(ref mut file) => file.read(buf),
Input::Stdin(ref mut stdin) => stdin.read(buf),
}
}
}
impl Seek for Input<'_> {
fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {
fn try_skip<R>(reader: R, pos: SeekFrom, err_desc: &'static str) -> io::Result<u64>
where
R: Read,
{
let cant_seek_abs_err = || Err(io::Error::other(err_desc));
let offset = match pos {
SeekFrom::Current(o) => u64::try_from(o).or_else(|_e| cant_seek_abs_err())?,
SeekFrom::Start(_) | SeekFrom::End(_) => cant_seek_abs_err()?,
};
copy(&mut reader.take(offset), &mut sink())
}
match *self {
Input::File(ref mut file) => {
let seek_res = file.seek(pos);
if let Err(Some(libc::ESPIPE)) = seek_res.as_ref().map_err(|err| err.raw_os_error())
{
try_skip(
file,
pos,
"Pipes only support seeking forward with a relative offset",
)
} else {
seek_res
}
}
Input::Stdin(ref mut stdin) => try_skip(
stdin,
pos,
"STDIN only supports seeking forward with a relative offset",
),
}
}
}
impl<'a> Input<'a> {
pub fn into_inner(self) -> Box<dyn Read + 'a> {
match self {
Input::File(file) => Box::new(file),
Input::Stdin(stdin) => Box::new(stdin),
}
}
}
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | false |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/tests/integration_tests.rs | tests/integration_tests.rs | use assert_cmd::Command;
fn hexyl() -> Command {
let mut cmd = Command::cargo_bin("hexyl").unwrap();
cmd.current_dir("tests/examples");
cmd
}
trait PrettyAssert<S>
where
S: AsRef<str>,
{
fn pretty_stdout(self, other: S);
}
// https://github.com/assert-rs/assert_cmd/issues/121#issuecomment-849937376
//
impl<S> PrettyAssert<S> for assert_cmd::assert::Assert
where
S: AsRef<str>,
{
fn pretty_stdout(self, other: S) {
println!("{}", other.as_ref().len());
let self_str = String::from_utf8(self.get_output().stdout.clone()).unwrap();
println!("{}", self_str.len());
pretty_assertions::assert_eq!(self_str, other.as_ref());
}
}
mod basic {
use super::hexyl;
#[test]
fn can_print_simple_ascii_file() {
hexyl()
.arg("ascii")
.arg("--color=never")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│00000000│ 30 31 32 33 34 35 36 37 ┊ 38 39 61 62 63 64 65 0a │01234567┊89abcde_│\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn can_read_input_from_stdin() {
hexyl()
.arg("--color=never")
.write_stdin("abc")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│00000000│ 61 62 63 ┊ │abc ┊ │\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn fails_on_non_existing_input() {
hexyl().arg("non-existing").assert().failure();
}
#[test]
fn prints_warning_on_empty_content() {
hexyl()
.arg("empty")
.arg("--color=never")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│ │ No content │ │ │ │\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
}
mod length {
use super::hexyl;
#[test]
fn length_restricts_output_size() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--length=32")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│00000000│ 7f 45 4c 46 02 01 01 00 ┊ 00 00 00 00 00 00 00 00 │•ELF•••⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│\n\
│00000010│ 02 00 3e 00 01 00 00 00 ┊ 00 10 40 00 00 00 00 00 │•⋄>⋄•⋄⋄⋄┊⋄•@⋄⋄⋄⋄⋄│\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn fail_if_length_and_bytes_options_are_used_simultaneously() {
hexyl()
.arg("hello_world_elf64")
.arg("--length=32")
.arg("--bytes=10")
.assert()
.failure();
}
#[test]
fn fail_if_length_and_count_options_are_used_simultaneously() {
hexyl()
.arg("hello_world_elf64")
.arg("--length=32")
.arg("-l=10")
.assert()
.failure();
}
}
mod bytes {
use super::hexyl;
#[test]
fn fail_if_bytes_and_count_options_are_used_simultaneously() {
hexyl()
.arg("hello_world_elf64")
.arg("--bytes=32")
.arg("-l=10")
.assert()
.failure();
}
}
mod skip {
use super::hexyl;
#[test]
fn basic() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--skip=2")
.arg("--length=4")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│00000002│ 32 33 34 35 ┊ │2345 ┊ │\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn prints_warning_when_skipping_past_the_end() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--skip=1000")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│ │ No content │ │ │ │\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn negative_offset() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--skip=-4")
.arg("--length=3")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│0000000c│ 63 64 65 ┊ │cde ┊ │\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn fails_if_negative_offset_is_too_large() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--skip=-1MiB")
.assert()
.failure()
.stderr(predicates::str::contains("Failed to jump"));
}
}
mod display_offset {
use super::hexyl;
#[test]
fn basic() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--display-offset=0xc0ffee")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│00c0ffee│ 30 31 32 33 34 35 36 37 ┊ 38 39 61 62 63 64 65 0a │01234567┊89abcde_│\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn display_offset_and_skip() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--display-offset=0x20")
.arg("--skip=0x10")
.arg("--length=0x10")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│00000030│ 02 00 3e 00 01 00 00 00 ┊ 00 10 40 00 00 00 00 00 │•⋄>⋄•⋄⋄⋄┊⋄•@⋄⋄⋄⋄⋄│\n\
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
}
mod blocksize {
use super::hexyl;
#[test]
fn fails_for_zero_or_negative_blocksize() {
hexyl()
.arg("ascii")
.arg("--block-size=0")
.assert()
.failure();
hexyl()
.arg("ascii")
.arg("--block-size=-16")
.assert()
.failure();
}
}
mod display_settings {
use super::hexyl;
#[test]
fn plain() {
hexyl()
.arg("ascii")
.arg("--plain")
.assert()
.success()
.stdout(" 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 0a \n");
}
#[test]
fn no_chars() {
hexyl()
.arg("ascii")
.arg("--no-characters")
.arg("--color=never")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────────┬─────────────────────────┐\n\
│00000000│ 30 31 32 33 34 35 36 37 ┊ 38 39 61 62 63 64 65 0a │\n\
└────────┴─────────────────────────┴─────────────────────────┘\n",
);
}
#[test]
fn no_position() {
hexyl()
.arg("ascii")
.arg("--no-position")
.arg("--color=never")
.assert()
.success()
.stdout(
"┌─────────────────────────┬─────────────────────────┬────────┬────────┐\n\
│ 30 31 32 33 34 35 36 37 ┊ 38 39 61 62 63 64 65 0a │01234567┊89abcde_│\n\
└─────────────────────────┴─────────────────────────┴────────┴────────┘\n",
);
}
}
mod group_and_endianness {
use super::hexyl;
use super::PrettyAssert;
#[test]
fn group_2_bytes_be() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--group-size=2")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────┬─────────────────────┬────────┬────────┐\n\
│00000000│ 3031 3233 3435 3637 ┊ 3839 6162 6364 650a │01234567┊89abcde_│\n\
└────────┴─────────────────────┴─────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_2_bytes_le() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--group-size=2")
.arg("--endianness=little")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────┬─────────────────────┬────────┬────────┐\n\
│00000000│ 3130 3332 3534 3736 ┊ 3938 6261 6463 0a65 │01234567┊89abcde_│\n\
└────────┴─────────────────────┴─────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_4_bytes_be() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--group-size=4")
.assert()
.success()
.stdout(
"┌────────┬───────────────────┬───────────────────┬────────┬────────┐\n\
│00000000│ 30313233 34353637 ┊ 38396162 6364650a │01234567┊89abcde_│\n\
└────────┴───────────────────┴───────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_4_bytes_le() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--group-size=4")
.arg("--endianness=little")
.assert()
.success()
.stdout(
"┌────────┬───────────────────┬───────────────────┬────────┬────────┐\n\
│00000000│ 33323130 37363534 ┊ 62613938 0a656463 │01234567┊89abcde_│\n\
└────────┴───────────────────┴───────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_8_bytes_be() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--group-size=8")
.assert()
.success()
.stdout(
"┌────────┬──────────────────┬──────────────────┬────────┬────────┐\n\
│00000000│ 3031323334353637 ┊ 383961626364650a │01234567┊89abcde_│\n\
└────────┴──────────────────┴──────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_8_bytes_le() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--group-size=8")
.arg("--endianness=little")
.assert()
.success()
.stdout(
"┌────────┬──────────────────┬──────────────────┬────────┬────────┐\n\
│00000000│ 3736353433323130 ┊ 0a65646362613938 │01234567┊89abcde_│\n\
└────────┴──────────────────┴──────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_size_plain() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--plain")
.arg("--group-size=2")
.assert()
.success()
.stdout(" 3031 3233 3435 3637 3839 6162 6364 650a \n");
}
#[test]
fn group_size_fill_space() {
hexyl()
.arg("--color=never")
.arg("--group-size=2")
.write_stdin("abc")
.assert()
.success()
.stdout(
"┌────────┬─────────────────────┬─────────────────────┬────────┬────────┐\n\
│00000000│ 6162 63 ┊ │abc ┊ │\n\
└────────┴─────────────────────┴─────────────────────┴────────┴────────┘\n",
);
}
#[test]
fn group_size_invalid() {
hexyl()
.arg("ascii")
.arg("--color=never")
.arg("--plain")
.arg("--group-size=3")
.assert()
.failure();
}
#[test]
fn squeeze_no_chars() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4096")
.arg("--no-characters")
.assert()
.success()
.pretty_stdout(
"\
┌────────┬─────────────────────────┬─────────────────────────┐
│00000400│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │
│* │ ┊ │
│00001000│ ba 0e 00 00 00 b9 00 20 ┊ 40 00 bb 01 00 00 00 b8 │
│00001010│ 04 00 00 00 cd 80 b8 01 ┊ 00 00 00 cd 80 00 00 00 │
│00001020│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │
│* │ ┊ │
│00001400│ ┊ │
└────────┴─────────────────────────┴─────────────────────────┘
",
);
}
#[test]
fn squeeze_no_chars_one_panel() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4096")
.arg("--no-characters")
.arg("--panels=1")
.assert()
.success()
.pretty_stdout(
"\
┌────────┬─────────────────────────┐
│00000400│ 00 00 00 00 00 00 00 00 │
│* │ │
│00001000│ ba 0e 00 00 00 b9 00 20 │
│00001008│ 40 00 bb 01 00 00 00 b8 │
│00001010│ 04 00 00 00 cd 80 b8 01 │
│00001018│ 00 00 00 cd 80 00 00 00 │
│00001020│ 00 00 00 00 00 00 00 00 │
│* │ │
│00001400│ │
└────────┴─────────────────────────┘
",
);
}
#[test]
fn squeeze_no_position() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4096")
.arg("--no-position")
.assert()
.success()
.pretty_stdout(
"\
┌─────────────────────────┬─────────────────────────┬────────┬────────┐
│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│
│* ┊ │ ┊ │
│ ba 0e 00 00 00 b9 00 20 ┊ 40 00 bb 01 00 00 00 b8 │ו⋄⋄⋄×⋄ ┊@⋄ו⋄⋄⋄×│
│ 04 00 00 00 cd 80 b8 01 ┊ 00 00 00 cd 80 00 00 00 │•⋄⋄⋄××ו┊⋄⋄⋄××⋄⋄⋄│
│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│
│* ┊ │ ┊ │
│* ┊ │ ┊ │
└─────────────────────────┴─────────────────────────┴────────┴────────┘
",
);
}
#[test]
fn squeeze_no_position_one_panel() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4096")
.arg("--no-position")
.arg("--panels=1")
.assert()
.success()
.pretty_stdout(
"\
┌─────────────────────────┬────────┐
│ 00 00 00 00 00 00 00 00 │⋄⋄⋄⋄⋄⋄⋄⋄│
│* │ │
│ ba 0e 00 00 00 b9 00 20 │ו⋄⋄⋄×⋄ │
│ 40 00 bb 01 00 00 00 b8 │@⋄ו⋄⋄⋄×│
│ 04 00 00 00 cd 80 b8 01 │•⋄⋄⋄××ו│
│ 00 00 00 cd 80 00 00 00 │⋄⋄⋄××⋄⋄⋄│
│ 00 00 00 00 00 00 00 00 │⋄⋄⋄⋄⋄⋄⋄⋄│
│* │ │
│* │ │
└─────────────────────────┴────────┘
",
);
}
#[test]
fn squeeze_odd_panels_remainder_bytes() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4092") // 4 byte remainder
.arg("--panels=3")
.assert()
.success()
.pretty_stdout(
"\
┌────────┬─────────────────────────┬─────────────────────────┬─────────────────────────┬────────┬────────┬────────┐
│00000400│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│
│* │ ┊ ┊ │ ┊ ┊ │
│00001000│ ba 0e 00 00 00 b9 00 20 ┊ 40 00 bb 01 00 00 00 b8 ┊ 04 00 00 00 cd 80 b8 01 │ו⋄⋄⋄×⋄ ┊@⋄ו⋄⋄⋄×┊•⋄⋄⋄××ו│
│00001018│ 00 00 00 cd 80 00 00 00 ┊ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │⋄⋄⋄××⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│
│00001030│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│
│* │ ┊ ┊ │ ┊ ┊ │
│000013f0│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 ┊ │⋄⋄⋄⋄⋄⋄⋄⋄┊⋄⋄⋄⋄ ┊ │
└────────┴─────────────────────────┴─────────────────────────┴─────────────────────────┴────────┴────────┴────────┘
",
);
}
#[test]
fn squeeze_plain() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4096")
.arg("--plain")
.assert()
.success()
.pretty_stdout(
" \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
ba 0e 00 00 00 b9 00 20 40 00 bb 01 00 00 00 b8
04 00 00 00 cd 80 b8 01 00 00 00 cd 80 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
*
",
);
}
#[test]
fn squeeze_plain_remainder() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--skip=1024")
.arg("--length=4092") // 4 byte remainder
.arg("--plain")
.assert()
.success()
.pretty_stdout(
" \
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
ba 0e 00 00 00 b9 00 20 40 00 bb 01 00 00 00 b8
04 00 00 00 cd 80 b8 01 00 00 00 cd 80 00 00 00
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*
00 00 00 00 00 00 00 00 00 00 00 00
",
);
}
}
mod base {
use super::hexyl;
use super::PrettyAssert;
#[test]
fn base2() {
hexyl()
.arg("ascii")
.arg("--plain")
.arg("--base=binary")
.assert()
.success()
.pretty_stdout(
" 00110000 00110001 00110010 00110011 00110100 00110101 00110110 00110111 \n \
00111000 00111001 01100001 01100010 01100011 01100100 01100101 00001010 \n",
);
}
}
mod character_table {
use super::hexyl;
use super::PrettyAssert;
#[test]
fn ascii() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--character-table=ascii")
.assert()
.success()
.pretty_stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐
│00000000│ 7f 45 4c 46 02 01 01 00 ┊ 00 00 00 00 00 00 00 00 │.ELF....┊........│
│00000010│ 02 00 3e 00 01 00 00 00 ┊ 00 10 40 00 00 00 00 00 │..>.....┊..@.....│
│00000020│ 40 00 00 00 00 00 00 00 ┊ 28 20 00 00 00 00 00 00 │@.......┊( ......│
│00000030│ 00 00 00 00 40 00 38 00 ┊ 03 00 40 00 04 00 03 00 │....@.8.┊..@.....│
│00000040│ 01 00 00 00 04 00 00 00 ┊ 00 00 00 00 00 00 00 00 │........┊........│
│00000050│ 00 00 40 00 00 00 00 00 ┊ 00 00 40 00 00 00 00 00 │..@.....┊..@.....│
│00000060│ e8 00 00 00 00 00 00 00 ┊ e8 00 00 00 00 00 00 00 │........┊........│
│00000070│ 00 10 00 00 00 00 00 00 ┊ 01 00 00 00 05 00 00 00 │........┊........│
│00000080│ 00 10 00 00 00 00 00 00 ┊ 00 10 40 00 00 00 00 00 │........┊..@.....│
│00000090│ 00 10 40 00 00 00 00 00 ┊ 1d 00 00 00 00 00 00 00 │..@.....┊........│
│000000a0│ 1d 00 00 00 00 00 00 00 ┊ 00 10 00 00 00 00 00 00 │........┊........│
│000000b0│ 01 00 00 00 06 00 00 00 ┊ 00 20 00 00 00 00 00 00 │........┊. ......│
│000000c0│ 00 20 40 00 00 00 00 00 ┊ 00 20 40 00 00 00 00 00 │. @.....┊. @.....│
│000000d0│ 0e 00 00 00 00 00 00 00 ┊ 0e 00 00 00 00 00 00 00 │........┊........│
│000000e0│ 00 10 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │........┊........│
│000000f0│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │........┊........│
│* │ ┊ │ ┊ │
│00001000│ ba 0e 00 00 00 b9 00 20 ┊ 40 00 bb 01 00 00 00 b8 │....... ┊@.......│
│00001010│ 04 00 00 00 cd 80 b8 01 ┊ 00 00 00 cd 80 00 00 00 │........┊........│
│00001020│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │........┊........│
│* │ ┊ │ ┊ │
│00002000│ 48 65 6c 6c 6f 2c 20 77 ┊ 6f 72 6c 64 21 0a 00 2e │Hello, w┊orld!...│
│00002010│ 73 68 73 74 72 74 61 62 ┊ 00 2e 74 65 78 74 00 2e │shstrtab┊..text..│
│00002020│ 64 61 74 61 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │data....┊........│
│00002030│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │........┊........│
│* │ ┊ │ ┊ │
│00002060│ 00 00 00 00 00 00 00 00 ┊ 0b 00 00 00 01 00 00 00 │........┊........│
│00002070│ 06 00 00 00 00 00 00 00 ┊ 00 10 40 00 00 00 00 00 │........┊..@.....│
│00002080│ 00 10 00 00 00 00 00 00 ┊ 1d 00 00 00 00 00 00 00 │........┊........│
│00002090│ 00 00 00 00 00 00 00 00 ┊ 10 00 00 00 00 00 00 00 │........┊........│
│000020a0│ 00 00 00 00 00 00 00 00 ┊ 11 00 00 00 01 00 00 00 │........┊........│
│000020b0│ 03 00 00 00 00 00 00 00 ┊ 00 20 40 00 00 00 00 00 │........┊. @.....│
│000020c0│ 00 20 00 00 00 00 00 00 ┊ 0e 00 00 00 00 00 00 00 │. ......┊........│
│000020d0│ 00 00 00 00 00 00 00 00 ┊ 04 00 00 00 00 00 00 00 │........┊........│
│000020e0│ 00 00 00 00 00 00 00 00 ┊ 01 00 00 00 03 00 00 00 │........┊........│
│000020f0│ 00 00 00 00 00 00 00 00 ┊ 00 00 00 00 00 00 00 00 │........┊........│
│00002100│ 0e 20 00 00 00 00 00 00 ┊ 17 00 00 00 00 00 00 00 │. ......┊........│
│00002110│ 00 00 00 00 00 00 00 00 ┊ 01 00 00 00 00 00 00 00 │........┊........│
│00002120│ 00 00 00 00 00 00 00 00 ┊ │........┊ │
└────────┴─────────────────────────┴─────────────────────────┴────────┴────────┘
",
);
}
#[test]
fn codepage_437() {
hexyl()
.arg("hello_world_elf64")
.arg("--color=never")
.arg("--character-table=codepage-437")
.assert()
.success()
.pretty_stdout(
"┌────────┬─────────────────────────┬─────────────────────────┬────────┬────────┐
│00000000│ 7f 45 4c 46 02 01 01 00 ┊ 00 00 00 00 00 00 00 00 │⌂ELF☻☺☺⋄┊⋄⋄⋄⋄⋄⋄⋄⋄│
│00000010│ 02 00 3e 00 01 00 00 00 ┊ 00 10 40 00 00 00 00 00 │☻⋄>⋄☺⋄⋄⋄┊⋄►@⋄⋄⋄⋄⋄│
│00000020│ 40 00 00 00 00 00 00 00 ┊ 28 20 00 00 00 00 00 00 │@⋄⋄⋄⋄⋄⋄⋄┊( ⋄⋄⋄⋄⋄⋄│
│00000030│ 00 00 00 00 40 00 38 00 ┊ 03 00 40 00 04 00 03 00 │⋄⋄⋄⋄@⋄8⋄┊♥⋄@⋄♦⋄♥⋄│
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | true |
sharkdp/hexyl | https://github.com/sharkdp/hexyl/blob/2e2643782d6ced9b5ac75596169a79127d8e535a/examples/simple.rs | examples/simple.rs | use std::io;
use hexyl::{BorderStyle, PrinterBuilder};
fn main() {
let input = [
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44,
0x52, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x44, 0x08, 0x02, 0x00, 0x00, 0x00,
];
let stdout = io::stdout();
let mut handle = stdout.lock();
let mut printer = PrinterBuilder::new(&mut handle)
.show_color(true)
.show_char_panel(true)
.show_position_panel(true)
.with_border_style(BorderStyle::Unicode)
.enable_squeezing(false)
.num_panels(2)
.group_size(1)
.build();
printer.print_all(&input[..]).unwrap();
}
| rust | Apache-2.0 | 2e2643782d6ced9b5ac75596169a79127d8e535a | 2026-01-04T15:43:36.733781Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/diagnostics.rs | src/diagnostics.rs | use TSPL::ParseError;
use crate::fun::{display::DisplayFn, Name, Source};
use std::{
collections::BTreeMap,
fmt::{Display, Formatter},
ops::Range,
};
pub const ERR_INDENT_SIZE: usize = 2;
#[derive(Debug, Clone, Default)]
pub struct Diagnostics {
pub diagnostics: BTreeMap<DiagnosticOrigin, Vec<Diagnostic>>,
pub config: DiagnosticsConfig,
}
#[derive(Debug, Clone, Copy)]
pub struct DiagnosticsConfig {
pub verbose: bool,
pub irrefutable_match: Severity,
pub redundant_match: Severity,
pub unreachable_match: Severity,
pub unused_definition: Severity,
pub repeated_bind: Severity,
pub recursion_cycle: Severity,
pub missing_main: Severity,
pub import_shadow: Severity,
}
#[derive(Debug, Clone)]
pub struct Diagnostic {
pub message: String,
pub severity: Severity,
pub source: Source,
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum DiagnosticOrigin {
/// An error when parsing source code.
Parsing,
/// An error from the relationship between multiple top-level definitions.
Book,
/// An error in a function definition.
Function(Name),
/// An error in a compiled inet.
Inet(String),
/// An error during readback of hvm-core run results.
Readback,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum Severity {
Allow,
Warning,
Error,
}
#[derive(Debug, Clone, Copy)]
pub enum WarningType {
IrrefutableMatch,
RedundantMatch,
UnreachableMatch,
UnusedDefinition,
RepeatedBind,
RecursionCycle,
MissingMain,
ImportShadow,
}
impl Diagnostics {
pub fn new(config: DiagnosticsConfig) -> Self {
Self { diagnostics: Default::default(), config }
}
pub fn add_parsing_error(&mut self, err: impl std::fmt::Display, source: Source) {
self.add_diagnostic(err, Severity::Error, DiagnosticOrigin::Parsing, source);
}
pub fn add_book_error(&mut self, err: impl std::fmt::Display) {
self.add_diagnostic(err, Severity::Error, DiagnosticOrigin::Book, Default::default());
}
pub fn add_function_error(&mut self, err: impl std::fmt::Display, name: Name, source: Source) {
self.add_diagnostic(
err,
Severity::Error,
DiagnosticOrigin::Function(name.def_name_from_generated()),
source,
);
}
pub fn add_inet_error(&mut self, err: impl std::fmt::Display, def_name: String) {
self.add_diagnostic(err, Severity::Error, DiagnosticOrigin::Inet(def_name), Default::default());
}
pub fn add_function_warning(
&mut self,
warn: impl std::fmt::Display,
warn_type: WarningType,
def_name: Name,
source: Source,
) {
let severity = self.config.warning_severity(warn_type);
self.add_diagnostic(
warn,
severity,
DiagnosticOrigin::Function(def_name.def_name_from_generated()),
source,
);
}
pub fn add_book_warning(&mut self, warn: impl std::fmt::Display, warn_type: WarningType) {
let severity = self.config.warning_severity(warn_type);
self.add_diagnostic(warn, severity, DiagnosticOrigin::Book, Default::default());
}
pub fn add_diagnostic(
&mut self,
msg: impl std::fmt::Display,
severity: Severity,
orig: DiagnosticOrigin,
source: Source,
) {
let diag = Diagnostic { message: msg.to_string(), severity, source };
self.diagnostics.entry(orig).or_default().push(diag)
}
pub fn take_rule_err<T, E: std::fmt::Display>(
&mut self,
result: Result<T, E>,
def_name: Name,
) -> Option<T> {
match result {
Ok(t) => Some(t),
Err(e) => {
self.add_function_error(e, def_name, Default::default());
None
}
}
}
pub fn take_inet_err<T, E: std::fmt::Display>(
&mut self,
result: Result<T, E>,
def_name: String,
) -> Option<T> {
match result {
Ok(t) => Some(t),
Err(e) => {
self.add_inet_error(e, def_name);
None
}
}
}
pub fn has_severity(&self, severity: Severity) -> bool {
self.diagnostics.values().any(|errs| errs.iter().any(|e| e.severity == severity))
}
pub fn has_errors(&self) -> bool {
self.has_severity(Severity::Error)
}
/// Checks if any error was emitted since the start of the pass,
/// Returning all the current information as a `Err(Info)`, replacing `&mut self` with an empty one.
/// Otherwise, returns the given arg as an `Ok(T)`.
pub fn fatal<T>(&mut self, t: T) -> Result<T, Diagnostics> {
if !self.has_errors() {
Ok(t)
} else {
Err(std::mem::take(self))
}
}
/// Returns a Display that prints the diagnostics with one of the given severities.
pub fn display_with_severity(&self, severity: Severity) -> impl std::fmt::Display + '_ {
DisplayFn(move |f| {
// We want to print diagnostics information somewhat like this:
// ```
// In file A :
// In definition X :
// {error}
// In definition Y :
// {error}
//
// In file B :
// In compiled Inet Z :
// {error}
//
// Other diagnostics:
// In {...}
// ```
// The problem is, diagnostics data is currently structured as a mapping from something like
// DiagnosticOrigin to Vec<(DiagnosticMessage, DiagnosticFile)>, and we would need something
// like a mapping from DiagnosticFile to DiagnosticOrigin to Vec<DiagnosticMessage> in order
// to print it cleanly. We might want to change it later to have this structure,
// but meanwhile, we do the transformations below to make the goal possible.
// Ignore diagnostics without the desired severity.
let diagnostics = self
.diagnostics
.iter()
.map(|(origin, diags)| (origin, diags.iter().filter(|diag| diag.severity == severity)));
// Produce the structure described above.
let groups: BTreeMap<&Option<String>, BTreeMap<&DiagnosticOrigin, Vec<&Diagnostic>>> = diagnostics
.fold(BTreeMap::new(), |mut file_tree, (origin, diags)| {
for diag in diags {
// We need to allow this Clippy warning due to `Name` in `DiagnosticOrigin::Function`.
// We know how it works, so it shouldn't be a problem.
#[allow(clippy::mutable_key_type)]
let file_group_entry = file_tree.entry(&diag.source.file).or_default();
let origin_group_entry = file_group_entry.entry(origin).or_default();
origin_group_entry.push(diag);
}
file_tree
});
// Now, we have a mapping from DiagnosticFile to DiagnosticOrigin to Vec<DiagnosticMessage>.
// If the last file is `None`, it means we only have diagnostics with unknown source file.
// In this case, we won't print a special message for them.
let only_unknown_file_diagnostics = groups.keys().next_back() == Some(&&None);
// Reverse the group iterator so `None` files go last.
for (file, origin_to_diagnostics) in groups.iter().rev() {
if !only_unknown_file_diagnostics {
match &file {
Some(name) => writeln!(f, "\x1b[1mIn \x1b[4m{}\x1b[0m\x1b[1m :\x1b[0m", name)?,
None => writeln!(f, "\x1b[1mOther diagnostics:\x1b[0m")?,
};
}
let mut has_msg = false;
for (origin, diagnostics) in origin_to_diagnostics {
let mut diagnostics = diagnostics.iter().peekable();
if diagnostics.peek().is_some() {
match origin {
DiagnosticOrigin::Parsing => {
for err in diagnostics {
writeln!(f, "{err}")?;
}
}
DiagnosticOrigin::Book => {
for err in diagnostics {
writeln!(f, "{err}")?;
}
}
DiagnosticOrigin::Function(nam) => {
writeln!(f, "\x1b[1mIn definition '\x1b[4m{}\x1b[0m\x1b[1m':\x1b[0m", nam)?;
for err in diagnostics {
writeln!(f, "{:ERR_INDENT_SIZE$}{err}", "")?;
}
}
DiagnosticOrigin::Inet(nam) => {
writeln!(f, "\x1b[1mIn compiled inet '\x1b[4m{}\x1b[0m\x1b[1m':\x1b[0m", nam)?;
for err in diagnostics {
writeln!(f, "{:ERR_INDENT_SIZE$}{err}", "")?;
}
}
DiagnosticOrigin::Readback => {
writeln!(f, "\x1b[1mDuring readback:\x1b[0m")?;
for err in diagnostics {
writeln!(f, "{:ERR_INDENT_SIZE$}{err}", "")?;
}
}
}
has_msg = true;
}
}
if has_msg {
writeln!(f)?;
}
}
Ok(())
})
}
pub fn display_only_messages(&self) -> impl std::fmt::Display + '_ {
DisplayFn(move |f| {
for err in self.diagnostics.values().flatten() {
writeln!(f, "{err}")?;
}
Ok(())
})
}
}
impl Display for Diagnostics {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.has_severity(Severity::Warning) {
write!(f, "\x1b[4m\x1b[1m\x1b[33mWarnings:\x1b[0m\n{}", self.display_with_severity(Severity::Warning))?;
}
if self.has_severity(Severity::Error) {
write!(f, "\x1b[4m\x1b[1m\x1b[31mErrors:\x1b[0m\n{}", self.display_with_severity(Severity::Error))?;
}
Ok(())
}
}
impl From<String> for Diagnostics {
fn from(value: String) -> Self {
Self {
diagnostics: BTreeMap::from_iter([(
DiagnosticOrigin::Book,
vec![Diagnostic { message: value, severity: Severity::Error, source: Default::default() }],
)]),
..Default::default()
}
}
}
impl From<ParseError> for Diagnostics {
/// Transforms a parse error into `Diagnostics`.
///
/// NOTE: Since `ParseError` does not include the source code, we can't get the `TextLocation` of the error,
/// so it is not included in the diagnostic.
/// range is set as None.
fn from(value: ParseError) -> Self {
Self {
diagnostics: BTreeMap::from_iter([(
DiagnosticOrigin::Parsing,
vec![Diagnostic { message: value.into(), severity: Severity::Error, source: Default::default() }],
)]),
..Default::default()
}
}
}
impl DiagnosticsConfig {
pub fn new(severity: Severity, verbose: bool) -> Self {
Self {
irrefutable_match: severity,
redundant_match: severity,
unreachable_match: severity,
unused_definition: severity,
repeated_bind: severity,
recursion_cycle: severity,
import_shadow: severity,
// Should only be changed manually, as a missing main is always a error to hvm
missing_main: Severity::Error,
verbose,
}
}
pub fn warning_severity(&self, warn: WarningType) -> Severity {
match warn {
WarningType::UnusedDefinition => self.unused_definition,
WarningType::RepeatedBind => self.repeated_bind,
WarningType::RecursionCycle => self.recursion_cycle,
WarningType::IrrefutableMatch => self.irrefutable_match,
WarningType::RedundantMatch => self.redundant_match,
WarningType::UnreachableMatch => self.unreachable_match,
WarningType::MissingMain => self.missing_main,
WarningType::ImportShadow => self.import_shadow,
}
}
}
impl Default for DiagnosticsConfig {
fn default() -> Self {
let mut cfg = Self::new(Severity::Warning, false);
cfg.recursion_cycle = Severity::Error;
cfg
}
}
impl Display for Diagnostic {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.message)
}
}
impl Diagnostic {
pub fn display_with_origin<'a>(&'a self, origin: &'a DiagnosticOrigin) -> impl std::fmt::Display + 'a {
DisplayFn(move |f| {
match origin {
DiagnosticOrigin::Parsing => writeln!(f, "{self}")?,
DiagnosticOrigin::Book => writeln!(f, "{self}")?,
DiagnosticOrigin::Function(nam) => {
writeln!(f, "\x1b[1mIn definition '\x1b[4m{}\x1b[0m\x1b[1m':\x1b[0m", nam)?;
writeln!(f, "{:ERR_INDENT_SIZE$}{self}", "")?;
}
DiagnosticOrigin::Inet(nam) => {
writeln!(f, "\x1b[1mIn compiled inet '\x1b[4m{}\x1b[0m\x1b[1m':\x1b[0m", nam)?;
writeln!(f, "{:ERR_INDENT_SIZE$}{self}", "")?;
}
DiagnosticOrigin::Readback => {
writeln!(f, "\x1b[1mDuring readback:\x1b[0m")?;
writeln!(f, "{:ERR_INDENT_SIZE$}{self}", "")?;
}
};
Ok(())
})
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, PartialOrd, Ord, Eq)]
pub struct TextLocation {
pub line: usize,
pub char: usize,
}
impl TextLocation {
pub fn new(line: usize, char: usize) -> Self {
TextLocation { line, char }
}
/// Transforms a `usize` byte index on `code` into a `TextLocation`.
pub fn from_byte_loc(code: &str, loc: usize) -> Self {
let code = code.as_bytes();
let mut line = 0;
let mut char = 0;
let mut cur_idx = 0;
while cur_idx < loc && cur_idx < code.len() {
if code[cur_idx] == b'\n' {
line += 1;
char = 0;
} else {
char += 1;
}
cur_idx += 1;
}
TextLocation { line, char }
}
}
#[derive(Debug, Copy, Clone, Hash, PartialEq, PartialOrd, Ord, Eq)]
pub struct TextSpan {
pub start: TextLocation,
pub end: TextLocation,
}
impl TextSpan {
pub fn new(start: TextLocation, end: TextLocation) -> Self {
TextSpan { start, end }
}
/// Transforms a `usize` byte range on `code` into a `TextLocation`.
pub fn from_byte_span(code: &str, span: Range<usize>) -> Self {
// Will loop for way too long otherwise
assert!(span.start <= span.end);
let code = code.as_bytes();
let mut start_line = 0;
let mut start_char = 0;
let mut end_line;
let mut end_char;
let mut cur_idx = 0;
while cur_idx < span.start && cur_idx < code.len() {
if code[cur_idx] == b'\n' {
start_line += 1;
start_char = 0;
} else {
start_char += 1;
}
cur_idx += 1;
}
end_line = start_line;
end_char = start_char;
while cur_idx < span.end && cur_idx < code.len() {
if code[cur_idx] == b'\n' {
end_line += 1;
end_char = 0;
} else {
end_char += 1;
}
cur_idx += 1;
}
TextSpan::new(TextLocation::new(start_line, start_char), TextLocation::new(end_line, end_char))
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/lib.rs | src/lib.rs | use crate::{
fun::{book_to_hvm, net_to_term::net_to_term, term_to_net::Labels, Book, Ctx, Term},
hvm::{
add_recursive_priority::add_recursive_priority,
check_net_size::{check_net_sizes, MAX_NET_SIZE_CUDA},
eta_reduce::eta_reduce_hvm_net,
hvm_book_show_pretty,
inline::inline_hvm_book,
mutual_recursion,
prune::prune_hvm_book,
},
};
use diagnostics::{Diagnostics, DiagnosticsConfig, ERR_INDENT_SIZE};
use net::hvm_to_net::hvm_to_net;
pub mod diagnostics;
// `Name` triggers this warning, but it's safe because we're not using its internal mutability.
#[allow(clippy::mutable_key_type)]
pub mod fun;
pub mod hvm;
pub mod imp;
pub mod imports;
pub mod net;
mod utils;
pub use fun::load_book::{load_file_to_book, load_to_book};
pub const ENTRY_POINT: &str = "main";
pub const HVM1_ENTRY_POINT: &str = "Main";
pub const HVM_OUTPUT_END_MARKER: &str = "Result: ";
pub fn check_book(
book: &mut Book,
diagnostics_cfg: DiagnosticsConfig,
compile_opts: CompileOpts,
) -> Result<Diagnostics, Diagnostics> {
// TODO: Do the checks without having to do full compilation
let res = compile_book(book, compile_opts, diagnostics_cfg, None)?;
Ok(res.diagnostics)
}
pub fn compile_book(
book: &mut Book,
opts: CompileOpts,
diagnostics_cfg: DiagnosticsConfig,
args: Option<Vec<Term>>,
) -> Result<CompileResult, Diagnostics> {
let mut diagnostics = desugar_book(book, opts.clone(), diagnostics_cfg, args)?;
let (mut hvm_book, labels) = book_to_hvm(book, &mut diagnostics)?;
if opts.eta {
hvm_book.defs.values_mut().for_each(eta_reduce_hvm_net);
}
mutual_recursion::check_cycles(&hvm_book, &mut diagnostics)?;
if opts.eta {
hvm_book.defs.values_mut().for_each(eta_reduce_hvm_net);
}
if opts.inline {
if let Err(e) = inline_hvm_book(&mut hvm_book) {
diagnostics.add_book_error(format!("During inlining:\n{:ERR_INDENT_SIZE$}{}", "", e));
}
diagnostics.fatal(())?;
}
if opts.prune {
let prune_entrypoints = vec![book.hvm_entrypoint().to_string()];
prune_hvm_book(&mut hvm_book, &prune_entrypoints);
}
if opts.check_net_size {
check_net_sizes(&hvm_book, &mut diagnostics, &opts.target_architecture)?;
}
add_recursive_priority(&mut hvm_book);
Ok(CompileResult { hvm_book, labels, diagnostics })
}
pub fn desugar_book(
book: &mut Book,
opts: CompileOpts,
diagnostics_cfg: DiagnosticsConfig,
args: Option<Vec<Term>>,
) -> Result<Diagnostics, Diagnostics> {
let mut ctx = Ctx::new(book, diagnostics_cfg);
ctx.check_shared_names();
ctx.set_entrypoint();
ctx.book.encode_adts(opts.adt_encoding);
ctx.fix_match_defs()?;
ctx.apply_args(args)?;
ctx.desugar_open()?;
ctx.book.encode_builtins();
ctx.resolve_refs()?;
ctx.desugar_match_defs()?;
ctx.fix_match_terms()?;
ctx.book.lift_local_defs();
ctx.desugar_bend()?;
ctx.desugar_fold()?;
ctx.desugar_with_blocks()?;
ctx.check_unbound_vars()?;
// Auto match linearization
ctx.book.make_var_names_unique();
ctx.book.desugar_use();
match opts.linearize_matches {
OptLevel::Disabled => (),
OptLevel::Alt => ctx.book.linearize_match_binds(),
OptLevel::Enabled => ctx.book.linearize_matches(),
}
// Manual match linearization
ctx.book.linearize_match_with();
if opts.type_check {
type_check_book(&mut ctx)?;
}
ctx.book.encode_matches(opts.adt_encoding);
// sanity check
ctx.check_unbound_vars()?;
ctx.book.make_var_names_unique();
ctx.book.desugar_use();
ctx.book.make_var_names_unique();
ctx.book.linearize_vars();
// sanity check
ctx.check_unbound_vars()?;
if opts.float_combinators {
ctx.book.float_combinators(MAX_NET_SIZE_CUDA);
}
// sanity check
ctx.check_unbound_refs()?;
// Optimizing passes
ctx.prune(opts.prune);
if opts.merge {
ctx.book.merge_definitions();
}
ctx.book.expand_main();
ctx.book.make_var_names_unique();
if !ctx.info.has_errors() {
Ok(ctx.info)
} else {
Err(ctx.info)
}
}
pub fn type_check_book(ctx: &mut Ctx) -> Result<(), Diagnostics> {
ctx.check_untyped_terms()?;
ctx.resolve_type_ctrs()?;
ctx.type_check()?;
Ok(())
}
pub fn run_book(
mut book: Book,
run_opts: RunOpts,
compile_opts: CompileOpts,
diagnostics_cfg: DiagnosticsConfig,
args: Option<Vec<Term>>,
cmd: &str,
) -> Result<Option<(Term, String, Diagnostics)>, Diagnostics> {
let CompileResult { hvm_book: core_book, labels, diagnostics } =
compile_book(&mut book, compile_opts.clone(), diagnostics_cfg, args)?;
// TODO: Printing should be taken care by the cli module, but we'd
// like to print any warnings before running so that the user can
// cancel the run if a problem is detected.
eprint!("{diagnostics}");
let out = run_hvm(&core_book, cmd, &run_opts)?;
let (net, stats) = parse_hvm_output(&out)?;
let (term, diags) =
readback_hvm_net(&net, &book, &labels, run_opts.linear_readback, compile_opts.adt_encoding);
Ok(Some((term, stats, diags)))
}
pub fn readback_hvm_net(
net: &::hvm::ast::Net,
book: &Book,
labels: &Labels,
linear: bool,
adt_encoding: AdtEncoding,
) -> (Term, Diagnostics) {
let mut diags = Diagnostics::default();
let net = hvm_to_net(net);
let mut term = net_to_term(&net, book, labels, linear, &mut diags);
#[allow(clippy::mutable_key_type)] // Safe to allow, we know how `Name` works.
let recursive_defs = book.recursive_defs();
term.expand_generated(book, &recursive_defs);
term.resugar_strings(adt_encoding);
term.resugar_lists(adt_encoding);
(term, diags)
}
/// Runs an HVM book by invoking HVM as a subprocess.
fn run_hvm(book: &::hvm::ast::Book, cmd: &str, run_opts: &RunOpts) -> Result<String, String> {
let out_path = ".out.hvm";
std::fs::write(out_path, hvm_book_show_pretty(book)).map_err(|x| x.to_string())?;
let mut process = std::process::Command::new(run_opts.hvm_path.clone())
.arg(cmd)
.arg(out_path)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::inherit())
.spawn()
.map_err(|e| format!("Failed to start hvm process.\n{e}"))?;
let child_out = std::mem::take(&mut process.stdout).expect("Failed to attach to hvm output");
let thread_out = std::thread::spawn(move || filter_hvm_output(child_out, std::io::stdout()));
let _ = process.wait().expect("Failed to wait on hvm subprocess");
if let Err(e) = std::fs::remove_file(out_path) {
eprintln!("Error removing HVM output file. {e}");
}
let result = thread_out.join().map_err(|_| "HVM output thread panicked.".to_string())??;
Ok(result)
}
/// Reads the final output from HVM and separates the extra information.
fn parse_hvm_output(out: &str) -> Result<(::hvm::ast::Net, String), String> {
let Some((result, stats)) = out.split_once('\n') else {
return Err(format!(
"Failed to parse result from HVM (unterminated result).\nOutput from HVM was:\n{:?}",
out
));
};
let mut p = ::hvm::ast::CoreParser::new(result);
let Ok(net) = p.parse_net() else {
return Err(format!("Failed to parse result from HVM (invalid net).\nOutput from HVM was:\n{:?}", out));
};
Ok((net, stats.to_string()))
}
/// Filters the output from HVM, separating user output from the
/// result, used for readback and displaying stats.
///
/// Buffers the output from HVM to try to parse it.
fn filter_hvm_output(
mut stream: impl std::io::Read + Send,
mut output: impl std::io::Write + Send,
) -> Result<String, String> {
let mut capturing = false;
let mut result = String::new();
let mut buf = [0u8; 1024];
loop {
let num_read = match stream.read(&mut buf) {
Ok(n) => n,
Err(e) => {
eprintln!("{e}");
break;
}
};
if num_read == 0 {
break;
}
let new_buf = &buf[..num_read];
// TODO: Does this lead to broken characters if printing too much at once?
let new_str = String::from_utf8_lossy(new_buf);
if capturing {
// Store the result
result.push_str(&new_str);
} else if let Some((before, after)) = new_str.split_once(HVM_OUTPUT_END_MARKER) {
// If result started in the middle of the buffer, print what came before and start capturing.
if let Err(e) = output.write_all(before.as_bytes()) {
eprintln!("Error writing HVM output. {e}");
};
result.push_str(after);
capturing = true;
} else {
// Otherwise, don't capture anything
if let Err(e) = output.write_all(new_buf) {
eprintln!("Error writing HVM output. {e}");
}
}
}
if capturing {
Ok(result)
} else {
output.flush().map_err(|e| format!("Error flushing HVM output. {e}"))?;
let msg = "HVM output had no result (An error likely occurred)".to_string();
Err(msg)
}
}
#[derive(Clone, Debug)]
pub struct RunOpts {
pub linear_readback: bool,
pub pretty: bool,
pub hvm_path: String,
}
impl Default for RunOpts {
fn default() -> Self {
RunOpts { linear_readback: false, pretty: false, hvm_path: "hvm".to_string() }
}
}
#[derive(Clone, Copy, Debug, Default)]
pub enum OptLevel {
Disabled,
#[default]
Enabled,
Alt,
}
impl OptLevel {
pub fn enabled(&self) -> bool {
!matches!(self, OptLevel::Disabled)
}
pub fn is_extra(&self) -> bool {
matches!(self, OptLevel::Enabled)
}
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum CompilerTarget {
C,
Cuda,
Unknown,
}
#[derive(Clone, Debug)]
pub struct CompileOpts {
/// The Compiler target architecture
pub target_architecture: CompilerTarget,
/// Enables [hvm::eta_reduce].
pub eta: bool,
/// Enables [fun::transform::definition_pruning] and [hvm::prune].
pub prune: bool,
/// Enables [fun::transform::linearize_matches].
pub linearize_matches: OptLevel,
/// Enables [fun::transform::float_combinators].
pub float_combinators: bool,
/// Enables [fun::transform::definition_merge]
pub merge: bool,
/// Enables [hvm::inline].
pub inline: bool,
/// Enables [hvm::check_net_size].
pub check_net_size: bool,
/// Enables [type_check_book].
pub type_check: bool,
/// Determines the encoding of constructors and matches.
pub adt_encoding: AdtEncoding,
}
impl CompileOpts {
/// Set all optimizing options as true
#[must_use]
pub fn set_all(self) -> Self {
Self {
target_architecture: self.target_architecture,
eta: true,
prune: true,
float_combinators: true,
merge: true,
linearize_matches: OptLevel::Enabled,
type_check: true,
inline: true,
check_net_size: self.check_net_size,
adt_encoding: self.adt_encoding,
}
}
/// Set all optimizing options as false
#[must_use]
pub fn set_no_all(self) -> Self {
Self {
target_architecture: self.target_architecture,
eta: false,
prune: false,
linearize_matches: OptLevel::Disabled,
float_combinators: false,
merge: false,
inline: false,
type_check: self.type_check,
check_net_size: self.check_net_size,
adt_encoding: self.adt_encoding,
}
}
pub fn check_for_strict(&self) {
if !self.float_combinators {
println!(
"Warning: Running in strict mode without enabling the float_combinators pass can lead to some functions expanding infinitely."
);
}
if !self.linearize_matches.enabled() {
println!(
"Warning: Running in strict mode without enabling the linearize_matches pass can lead to some functions expanding infinitely."
);
}
}
}
impl Default for CompileOpts {
/// Enables eta, linearize_matches, float_combinators.
/// Uses num-scott ADT encoding.
fn default() -> Self {
Self {
target_architecture: CompilerTarget::Unknown,
eta: true,
prune: false,
linearize_matches: OptLevel::Enabled,
float_combinators: true,
merge: false,
inline: false,
check_net_size: true,
type_check: true,
adt_encoding: AdtEncoding::NumScott,
}
}
}
#[derive(Clone, Copy, Debug)]
pub enum AdtEncoding {
Scott,
NumScott,
}
impl std::fmt::Display for AdtEncoding {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AdtEncoding::Scott => write!(f, "Scott"),
AdtEncoding::NumScott => write!(f, "NumScott"),
}
}
}
pub struct CompileResult {
pub diagnostics: Diagnostics,
pub hvm_book: ::hvm::ast::Book,
pub labels: Labels,
}
fn maybe_grow<R, F>(f: F) -> R
where
F: FnOnce() -> R,
{
stacker::maybe_grow(1024 * 32, 1024 * 1024, f)
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/utils.rs | src/utils.rs | /// A macro for creating iterators that can have statically known
/// different types. Useful for iterating over tree children, where
/// each tree node variant yields a different iterator type.
#[macro_export]
macro_rules! multi_iterator {
($Iter:ident { $($Variant:ident),* $(,)? }) => {
#[derive(Debug, Clone)]
enum $Iter<$($Variant),*> {
$($Variant { iter: $Variant }),*
}
impl<$($Variant),*> $Iter<$($Variant),*> {
$(
#[allow(non_snake_case)]
fn $Variant(iter: impl IntoIterator<IntoIter = $Variant>) -> Self {
$Iter::$Variant { iter: iter.into_iter() }
}
)*
}
impl<T, $($Variant: Iterator<Item = T>),*> Iterator for $Iter<$($Variant),*> {
type Item = T;
fn next(&mut self) -> Option<T> {
match self { $($Iter::$Variant { iter } => iter.next()),* }
}
fn size_hint(&self) -> (usize, Option<usize>) {
match self { $($Iter::$Variant { iter } => iter.size_hint()),* }
}
}
impl<T, $($Variant: DoubleEndedIterator<Item = T>),*> DoubleEndedIterator for $Iter<$($Variant),*> {
fn next_back(&mut self) -> Option<T> {
match self { $($Iter::$Variant { iter } => iter.next_back()),* }
}
}
};
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/main.rs | src/main.rs | use bend::{
check_book, compile_book, desugar_book,
diagnostics::{Diagnostics, DiagnosticsConfig, Severity},
fun::{Book, Name},
hvm::hvm_book_show_pretty,
imports::DefaultLoader,
load_file_to_book, run_book, AdtEncoding, CompileOpts, CompilerTarget, OptLevel, RunOpts,
};
use clap::{Args, CommandFactory, Parser, Subcommand};
use std::{
path::{Path, PathBuf},
process::ExitCode,
};
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Cli {
#[command(subcommand)]
pub mode: Mode,
#[arg(short, long, global = true)]
pub verbose: bool,
#[arg(long, global = true, help = "HVM command or path to HVM binary")]
pub hvm_bin: Option<String>,
#[arg(short = 'e', long, global = true, help = "Use other entrypoint rather than main or Main")]
pub entrypoint: Option<String>,
}
#[derive(Subcommand, Clone, Debug)]
enum Mode {
/// Checks that the program is syntactically and semantically correct.
Check {
#[arg(
short = 'O',
value_delimiter = ' ',
action = clap::ArgAction::Append,
long_help = r#"Enables or disables the given optimizations
float_combinators is enabled by default on strict mode."#,
)]
comp_opts: Vec<OptArgs>,
#[command(flatten)]
warn_opts: CliWarnOpts,
#[arg(help = "Path to the input file")]
path: PathBuf,
},
/// Compiles the program and runs it with the Rust HVM implementation.
RunRs(RunArgs),
/// Compiles the program and runs it with the C HVM implementation.
#[command(alias = "run")]
RunC(RunArgs),
/// Compiles the program and runs it with the Cuda HVM implementation.
RunCu(RunArgs),
/// Compiles the program to hvm and prints to stdout.
GenHvm(GenArgs),
/// Compiles the program to standalone C and prints to stdout.
GenC(GenArgs),
/// Compiles the program to standalone Cuda and prints to stdout.
GenCu(GenArgs),
/// Runs the lambda-term level desugaring passes.
Desugar {
#[arg(
short = 'O',
value_delimiter = ' ',
action = clap::ArgAction::Append,
long_help = r#"Enables or disables the given optimizations
float_combinators is enabled by default on strict mode."#,
)]
comp_opts: Vec<OptArgs>,
#[arg(short = 'p', help = "Debug and normalization pretty printing")]
pretty: bool,
#[command(flatten)]
warn_opts: CliWarnOpts,
#[arg(help = "Path to the input file")]
path: PathBuf,
},
}
#[derive(Args, Clone, Debug)]
struct RunArgs {
#[arg(short = 'p', help = "Debug and normalization pretty printing")]
pretty: bool,
#[command(flatten)]
run_opts: CliRunOpts,
#[arg(
short = 'O',
value_delimiter = ' ',
action = clap::ArgAction::Append,
long_help = r#"Enables or disables the given optimizations
float_combinators is enabled by default on strict mode."#,
)]
comp_opts: Vec<OptArgs>,
#[command(flatten)]
warn_opts: CliWarnOpts,
#[arg(help = "Path to the input file")]
path: PathBuf,
#[arg(value_parser = |arg: &str| bend::fun::parser::FunParser::new(Name::new(""), arg, false).parse_term())]
arguments: Option<Vec<bend::fun::Term>>,
}
#[derive(Args, Clone, Debug)]
struct GenArgs {
#[arg(
short = 'O',
value_delimiter = ' ',
action = clap::ArgAction::Append,
long_help = r#"Enables or disables the given optimizations
float_combinators is enabled by default on strict mode."#,
)]
comp_opts: Vec<OptArgs>,
#[command(flatten)]
warn_opts: CliWarnOpts,
#[arg(help = "Path to the input file")]
path: PathBuf,
}
#[derive(Args, Clone, Debug)]
struct CliRunOpts {
#[arg(short = 'l', help = "Linear readback (show explicit dups)")]
linear: bool,
#[arg(short = 's', long = "stats", help = "Shows runtime stats and rewrite counts")]
print_stats: bool,
}
#[derive(Args, Debug, Clone)]
#[group(multiple = true)]
struct CliWarnOpts {
#[arg(
short = 'W',
long = "warn",
value_delimiter = ' ',
action = clap::ArgAction::Append,
help = "Show the specified compilation warning",
)]
pub warns: Vec<WarningArgs>,
#[arg(
short = 'D',
long = "deny",
value_delimiter = ' ',
action = clap::ArgAction::Append,
help = "Deny the specified compilation warning",
)]
pub denies: Vec<WarningArgs>,
#[arg(
short = 'A',
long = "allow",
value_delimiter = ' ',
action = clap::ArgAction::Append,
help = "Allow the specified compilation warning",
)]
pub allows: Vec<WarningArgs>,
}
#[derive(clap::ValueEnum, Clone, Debug)]
pub enum OptArgs {
All,
NoAll,
Eta,
NoEta,
Prune,
NoPrune,
LinearizeMatches,
LinearizeMatchesAlt,
NoLinearizeMatches,
FloatCombinators,
NoFloatCombinators,
Merge,
NoMerge,
Inline,
NoInline,
CheckNetSize,
NoCheckNetSize,
AdtScott,
AdtNumScott,
TypeCheck,
NoTypeCheck,
}
fn compile_opts_from_cli(args: &Vec<OptArgs>, compiler_target: CompilerTarget) -> CompileOpts {
use OptArgs::*;
let mut opts = CompileOpts { target_architecture: compiler_target, ..CompileOpts::default() };
if opts.target_architecture != CompilerTarget::C {
opts.eta = false;
}
for arg in args {
match arg {
All => opts = opts.set_all(),
NoAll => opts = opts.set_no_all(),
Eta => opts.eta = true,
NoEta => opts.eta = false,
Prune => opts.prune = true,
NoPrune => opts.prune = false,
FloatCombinators => opts.float_combinators = true,
NoFloatCombinators => opts.float_combinators = false,
Merge => opts.merge = true,
NoMerge => opts.merge = false,
Inline => opts.inline = true,
NoInline => opts.inline = false,
CheckNetSize => opts.check_net_size = true,
NoCheckNetSize => opts.check_net_size = false,
TypeCheck => opts.type_check = true,
NoTypeCheck => opts.type_check = false,
LinearizeMatches => opts.linearize_matches = OptLevel::Enabled,
LinearizeMatchesAlt => opts.linearize_matches = OptLevel::Alt,
NoLinearizeMatches => opts.linearize_matches = OptLevel::Disabled,
AdtScott => opts.adt_encoding = AdtEncoding::Scott,
AdtNumScott => opts.adt_encoding = AdtEncoding::NumScott,
}
}
opts
}
#[derive(clap::ValueEnum, Clone, Debug)]
pub enum WarningArgs {
All,
IrrefutableMatch,
RedundantMatch,
UnreachableMatch,
UnusedDefinition,
RepeatedBind,
RecursionCycle,
ImportShadow,
MissingMain,
}
fn main() -> ExitCode {
#[cfg(not(feature = "cli"))]
compile_error!("The 'cli' feature is needed for the Bend cli");
let cli = Cli::parse();
if let Err(diagnostics) = execute_cli_mode(cli) {
eprint!("{diagnostics}");
return ExitCode::FAILURE;
}
ExitCode::SUCCESS
}
fn execute_cli_mode(mut cli: Cli) -> Result<(), Diagnostics> {
let arg_verbose = cli.verbose;
let entrypoint = cli.entrypoint.take();
let load_book = |path: &Path, diag: DiagnosticsConfig| -> Result<Book, Diagnostics> {
let package_loader = DefaultLoader::new(path);
let mut book = load_file_to_book(path, package_loader, diag)?;
book.entrypoint = entrypoint.map(Name::new);
if arg_verbose {
println!("{book}");
}
Ok(book)
};
// Path/command for the HVM binary
// CLI option -> Env var -> Default
let hvm_bin = if let Some(hvm_bin) = cli.hvm_bin {
hvm_bin
} else if let Ok(hvm_bin) = std::env::var("HVM_BIN") {
hvm_bin
} else {
"hvm".to_string()
};
let gen_cmd = match &cli.mode {
Mode::GenC(..) => "gen-c",
Mode::GenCu(..) => "gen-cu",
_ => "gen",
};
let run_cmd = match &cli.mode {
Mode::RunC(..) => "run-c",
Mode::RunRs(..) => "run",
Mode::RunCu(..) => "run-cu",
_ => "run-c",
};
let compiler_target = match &cli.mode {
Mode::RunC(..) => CompilerTarget::C,
Mode::GenC(..) => CompilerTarget::C,
Mode::RunCu(..) => CompilerTarget::Cuda,
Mode::GenCu(..) => CompilerTarget::Cuda,
_ => CompilerTarget::Unknown,
};
match cli.mode {
Mode::Check { comp_opts, warn_opts, path } => {
let diagnostics_cfg = set_warning_cfg_from_cli(DiagnosticsConfig::default(), warn_opts);
let compile_opts = compile_opts_from_cli(&comp_opts, compiler_target);
let mut book = load_book(&path, diagnostics_cfg)?;
let diagnostics = check_book(&mut book, diagnostics_cfg, compile_opts)?;
eprintln!("{}", diagnostics);
}
Mode::GenHvm(GenArgs { comp_opts, warn_opts, path, .. }) => {
let diagnostics_cfg = set_warning_cfg_from_cli(DiagnosticsConfig::default(), warn_opts);
let opts = compile_opts_from_cli(&comp_opts, compiler_target);
let mut book = load_book(&path, diagnostics_cfg)?;
let compile_res = compile_book(&mut book, opts, diagnostics_cfg, None)?;
eprint!("{}", compile_res.diagnostics);
println!("{}", hvm_book_show_pretty(&compile_res.hvm_book));
}
Mode::RunC(RunArgs { pretty, run_opts, comp_opts, warn_opts, path, arguments })
| Mode::RunCu(RunArgs { pretty, run_opts, comp_opts, warn_opts, path, arguments })
| Mode::RunRs(RunArgs { pretty, run_opts, comp_opts, warn_opts, path, arguments }) => {
let CliRunOpts { linear, print_stats } = run_opts;
let diagnostics_cfg =
set_warning_cfg_from_cli(DiagnosticsConfig::new(Severity::Allow, arg_verbose), warn_opts);
let compile_opts = compile_opts_from_cli(&comp_opts, compiler_target);
compile_opts.check_for_strict();
let run_opts = RunOpts { linear_readback: linear, pretty, hvm_path: hvm_bin };
let book = load_book(&path, diagnostics_cfg)?;
if let Some((term, stats, diags)) =
run_book(book, run_opts, compile_opts, diagnostics_cfg, arguments, run_cmd)?
{
eprint!("{diags}");
if pretty {
println!("Result:\n{}", term.display_pretty(0));
} else {
println!("Result: {}", term);
}
if print_stats {
println!("{stats}");
}
}
}
Mode::GenC(GenArgs { comp_opts, warn_opts, path })
| Mode::GenCu(GenArgs { comp_opts, warn_opts, path }) => {
let diagnostics_cfg = set_warning_cfg_from_cli(DiagnosticsConfig::default(), warn_opts);
let opts = compile_opts_from_cli(&comp_opts, compiler_target);
let mut book = load_book(&path, diagnostics_cfg)?;
let compile_res = compile_book(&mut book, opts, diagnostics_cfg, None)?;
let out_path = ".out.hvm";
std::fs::write(out_path, hvm_book_show_pretty(&compile_res.hvm_book)).map_err(|x| x.to_string())?;
let gen_fn = |out_path: &str| {
let mut process = std::process::Command::new(hvm_bin);
process.arg(gen_cmd).arg(out_path);
process.output().map_err(|e| format!("While running hvm: {e}"))
};
let std::process::Output { stdout, stderr, status } = gen_fn(out_path)?;
let out = String::from_utf8_lossy(&stdout);
let err = String::from_utf8_lossy(&stderr);
let status = if !status.success() { status.to_string() } else { String::new() };
if let Err(e) = std::fs::remove_file(out_path) {
eprintln!("Error removing HVM output file. {e}");
}
eprintln!("{err}");
println!("{out}");
println!("{status}");
}
Mode::Desugar { path, comp_opts, warn_opts, pretty } => {
let diagnostics_cfg = set_warning_cfg_from_cli(DiagnosticsConfig::default(), warn_opts);
let opts = compile_opts_from_cli(&comp_opts, compiler_target);
let mut book = load_book(&path, diagnostics_cfg)?;
let diagnostics = desugar_book(&mut book, opts, diagnostics_cfg, None)?;
eprint!("{diagnostics}");
if pretty {
println!("{}", book.display_pretty())
} else {
println!("{book}");
}
}
};
Ok(())
}
fn set_warning_cfg_from_cli(mut cfg: DiagnosticsConfig, warn_opts: CliWarnOpts) -> DiagnosticsConfig {
fn set(cfg: &mut DiagnosticsConfig, severity: Severity, cli_val: WarningArgs) {
match cli_val {
WarningArgs::All => {
cfg.irrefutable_match = severity;
cfg.redundant_match = severity;
cfg.unreachable_match = severity;
cfg.unused_definition = severity;
cfg.repeated_bind = severity;
cfg.recursion_cycle = severity;
cfg.import_shadow = severity;
}
WarningArgs::IrrefutableMatch => cfg.irrefutable_match = severity,
WarningArgs::RedundantMatch => cfg.redundant_match = severity,
WarningArgs::UnreachableMatch => cfg.unreachable_match = severity,
WarningArgs::UnusedDefinition => cfg.unused_definition = severity,
WarningArgs::RepeatedBind => cfg.repeated_bind = severity,
WarningArgs::RecursionCycle => cfg.recursion_cycle = severity,
WarningArgs::ImportShadow => cfg.import_shadow = severity,
WarningArgs::MissingMain => cfg.missing_main = severity, // TODO: Should `WarningArgs::All` modify this as well?
}
}
let cmd = Cli::command();
let matches = cmd.get_matches();
let subcmd_name = matches.subcommand_name().expect("To have a subcommand");
let arg_matches = matches.subcommand_matches(subcmd_name).expect("To have a subcommand");
if let Some(warn_opts_ids) = arg_matches.get_many::<clap::Id>("CliWarnOpts") {
let mut allows = warn_opts.allows.into_iter();
let mut warns = warn_opts.warns.into_iter();
let mut denies = warn_opts.denies.into_iter();
for id in warn_opts_ids {
match id.as_ref() {
"allows" => set(&mut cfg, Severity::Allow, allows.next().unwrap()),
"denies" => set(&mut cfg, Severity::Error, denies.next().unwrap()),
"warns" => set(&mut cfg, Severity::Warning, warns.next().unwrap()),
_ => unreachable!(),
}
}
}
cfg
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imp/order_kwargs.rs | src/imp/order_kwargs.rs | use crate::{
fun::{parser::ParseBook, Name},
imp::{Definition, Expr, Stmt},
};
use indexmap::IndexMap;
impl Definition {
/// Traverses the program's definitions and adjusts the order of keyword arguments
/// in call/constructor expressions to match the order specified in the function or constructor definition.
pub fn order_kwargs(&mut self, book: &ParseBook) -> Result<(), String> {
let use_map = &mut IndexMap::new();
self.body.order_kwargs(book, use_map).map_err(|e| format!("In function '{}':\n {}", self.name, e))
}
}
impl Stmt {
fn order_kwargs(&mut self, book: &ParseBook, use_map: &mut IndexMap<Name, Name>) -> Result<(), String> {
match self {
Stmt::LocalDef { def, nxt } => {
def.order_kwargs(book)?;
nxt.order_kwargs(book, use_map)?;
}
Stmt::Assign { val, nxt, .. } => {
val.order_kwargs(book, use_map)?;
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Ask { val, nxt, .. } => {
val.order_kwargs(book, use_map)?;
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::InPlace { val, nxt, .. } => {
val.order_kwargs(book, use_map)?;
nxt.order_kwargs(book, use_map)?;
}
Stmt::If { cond, then, otherwise, nxt } => {
cond.order_kwargs(book, use_map)?;
then.order_kwargs(book, use_map)?;
otherwise.order_kwargs(book, use_map)?;
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Match { arg, arms, nxt, .. } => {
arg.order_kwargs(book, use_map)?;
for arm in arms {
arm.rgt.order_kwargs(book, use_map)?;
}
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Switch { arg, arms, nxt, .. } => {
arg.order_kwargs(book, use_map)?;
for arm in arms {
arm.order_kwargs(book, use_map)?;
}
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Fold { arg, arms, nxt, .. } => {
arg.order_kwargs(book, use_map)?;
for arm in arms {
arm.rgt.order_kwargs(book, use_map)?;
}
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Bend { bnd: _, arg, cond, step, base, nxt } => {
for arg in arg {
arg.order_kwargs(book, use_map)?;
}
cond.order_kwargs(book, use_map)?;
step.order_kwargs(book, use_map)?;
base.order_kwargs(book, use_map)?;
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::With { typ: _, bod, nxt } => {
bod.order_kwargs(book, use_map)?;
if let Some(nxt) = nxt {
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Open { typ: _, var: _, nxt } => {
nxt.order_kwargs(book, use_map)?;
}
Stmt::Use { nam, val: bod, nxt } => {
if let Expr::Var { nam: bod } = bod.as_ref() {
use_map.insert(nam.clone(), bod.clone());
nxt.order_kwargs(book, use_map)?;
use_map.pop();
} else {
bod.order_kwargs(book, use_map)?;
nxt.order_kwargs(book, use_map)?;
}
}
Stmt::Return { term } => term.order_kwargs(book, use_map)?,
Stmt::Err => {}
}
Ok(())
}
}
impl Expr {
fn order_kwargs(&mut self, book: &ParseBook, use_map: &mut IndexMap<Name, Name>) -> Result<(), String> {
match self {
// Named arguments are only allowed when directly calling a named function.
Expr::Call { fun, args, kwargs } => {
if !kwargs.is_empty() {
if let Expr::Var { nam } = fun.as_ref() {
if let Some(names) = get_args_def_or_ctr(nam, book, use_map) {
go_order_kwargs(&names, args, kwargs)?;
} else {
return Err(format!(
"Named args are only allowed when calling a named function, not when calling variable '{nam}'."
));
}
} else {
// TODO: Print expression
return Err(
"Named args are only allowed when calling a named function, not when calling an expression."
.to_string(),
);
}
}
fun.order_kwargs(book, use_map)?;
for arg in args {
arg.order_kwargs(book, use_map)?;
}
for (_, arg) in kwargs {
arg.order_kwargs(book, use_map)?;
}
}
Expr::Lam { bod, .. } => bod.order_kwargs(book, use_map)?,
Expr::Opr { lhs, rhs, .. } => {
lhs.order_kwargs(book, use_map)?;
rhs.order_kwargs(book, use_map)?;
}
Expr::Lst { els } | Expr::Tup { els } | Expr::Sup { els } => {
for el in els {
el.order_kwargs(book, use_map)?;
}
}
Expr::LstMap { term, iter, cond, .. } => {
term.order_kwargs(book, use_map)?;
iter.order_kwargs(book, use_map)?;
if let Some(cond) = cond {
cond.order_kwargs(book, use_map)?;
}
}
Expr::Ctr { name, args, kwargs } => match get_args_def_or_ctr(name, book, use_map) {
Some(names) => {
go_order_kwargs(&names, args, kwargs)?;
for arg in args {
arg.order_kwargs(book, use_map)?;
}
}
_ => return Err(format!("Constructor '{name}' not found.")),
},
Expr::Map { entries } => {
for entry in entries {
entry.1.order_kwargs(book, use_map)?;
}
}
Expr::MapGet { nam: _, key } => {
key.order_kwargs(book, use_map)?;
}
Expr::TreeNode { left, right } => {
left.order_kwargs(book, use_map)?;
right.order_kwargs(book, use_map)?;
}
Expr::TreeLeaf { val } => {
val.order_kwargs(book, use_map)?;
}
Expr::Era | Expr::Var { .. } | Expr::Chn { .. } | Expr::Num { .. } | Expr::Str { .. } => {}
}
Ok(())
}
}
fn go_order_kwargs(
names: &[Name],
args: &mut Vec<Expr>,
kwargs: &mut Vec<(Name, Expr)>,
) -> Result<(), String> {
if args.len() + kwargs.len() != names.len() {
return Err(
"Named args are only allowed when calling a function with the exact number of arguments.".to_string(),
);
}
let mut kwargs: IndexMap<Name, Expr> = IndexMap::from_iter(kwargs.drain(..));
let remaining_names = &names[args.len()..];
for name in remaining_names {
if let Some(arg) = kwargs.shift_remove(name) {
args.push(arg);
} else {
return Err(format!("Named arg '{name}' is missing."));
}
}
if let Some(name) = kwargs.keys().next() {
return Err(format!("Unexpected named arg in function call {}.", name));
}
Ok(())
}
fn get_args_def_or_ctr(name: &Name, book: &ParseBook, use_map: &IndexMap<Name, Name>) -> Option<Vec<Name>> {
let name = use_map.get(name).unwrap_or(name);
#[allow(clippy::manual_map)]
if let Some(adt_nam) = book.ctrs.get(name) {
Some(book.adts[adt_nam].ctrs[name].fields.iter().map(|f| f.nam.clone()).collect())
} else if let Some(def) = book.fun_defs.get(name) {
Some(def.rules[0].pats.iter().flat_map(|p| p.binds().flatten().cloned()).collect())
} else {
None
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imp/parser.rs | src/imp/parser.rs | use crate::{
fun::{
parser::{is_num_char, make_ctr_type, make_fn_type, Indent, ParseResult, ParserCommons},
Adt, AdtCtr, CtrField, HvmDefinition, Name, Num, Op, Source, SourceKind, Type, STRINGS,
},
imp::{AssignPattern, Definition, Expr, InPlaceOp, MatchArm, Stmt},
maybe_grow,
};
use TSPL::Parser;
pub struct ImpParser<'i> {
pub file: Name,
pub input: &'i str,
pub index: usize,
pub builtin: bool,
}
impl<'a> ImpParser<'a> {
pub fn new(file: Name, input: &'a str, builtin: bool) -> Self {
Self { file, input, index: 0, builtin }
}
pub fn parse_function_def(&mut self, indent: Indent) -> ParseResult<(Definition, Indent)> {
// def name(arg1: type1, arg2: type2, ...) -> type:
// body
if indent != Indent::Val(0) {
let msg = "Indentation error. Functions defined with 'def' must be at the start of the line.";
let idx = *self.index();
return self.with_ctx(Err(msg), idx..idx + 1);
}
// TODO: checked vs unchecked functions
let (mut def, nxt_indent) = self.parse_def_aux(indent)?;
def.source.kind = if self.builtin { SourceKind::Builtin } else { SourceKind::User };
Ok((def, nxt_indent))
}
pub fn parse_type_def(&mut self, mut indent: Indent) -> ParseResult<(Adt, Indent)> {
if indent != Indent::Val(0) {
let msg = "Indentation error. Types defined with 'type' must be at the start of the line.";
let idx = *self.index();
return self.with_ctx(Err(msg), idx..idx + 1);
}
let ini_idx = *self.index();
self.parse_keyword("type")?;
self.skip_trivia_inline()?;
let type_name = self.parse_restricted_name("datatype")?;
self.skip_trivia_inline()?;
let type_vars = if self.try_consume_exactly("(") {
self.list_like(|p| p.parse_var_name(), "", ")", ",", true, 0)?
} else {
vec![]
};
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(indent)?;
let mut ctrs = Vec::new();
let mut nxt_indent = indent;
while nxt_indent == indent {
ctrs.push(self.parse_type_def_variant(&type_name, &type_vars)?);
if !self.is_eof() {
self.consume_new_line()?;
}
nxt_indent = self.consume_indent_at_most(indent)?;
}
indent.exit_level();
let ctrs = ctrs.into_iter().map(|ctr| (ctr.name.clone(), ctr)).collect();
let source = Source::from_file_span(&self.file, self.input, ini_idx..self.index, self.builtin);
let adt = Adt { name: type_name, vars: type_vars, ctrs, source };
Ok((adt, nxt_indent))
}
pub fn parse_object(&mut self, indent: Indent) -> ParseResult<(Adt, Indent)> {
// object Pair(a, b) { fst: a, snd: b }
if indent != Indent::Val(0) {
let msg = "Indentation error. Types defined with 'object' must be at the start of the line.";
let idx = *self.index();
return self.with_ctx(Err(msg), idx..idx + 1);
}
let ini_idx = *self.index();
self.parse_keyword("object")?;
self.skip_trivia_inline()?;
let name = self.parse_top_level_name()?;
self.skip_trivia_inline()?;
let type_vars = if self.starts_with("(") {
self.list_like(|p| p.parse_var_name(), "(", ")", ",", true, 0)?
} else {
vec![]
};
self.skip_trivia_inline()?;
let fields = if self.starts_with("{") {
self.list_like(|p| p.parse_variant_field(), "{", "}", ",", true, 0)?
} else {
vec![]
};
let field_types = fields.iter().map(|f| f.typ.clone()).collect::<Vec<_>>();
let end_idx = *self.index();
self.check_repeated_ctr_fields(&fields, &name, ini_idx..end_idx)?;
if !self.is_eof() {
self.consume_new_line()?;
}
let nxt_indent = self.advance_newlines()?;
let typ = make_ctr_type(name.clone(), &field_types, &type_vars);
let ctr = AdtCtr { name: name.clone(), typ, fields };
let ctrs = [(name.clone(), ctr)].into_iter().collect();
let source = Source::from_file_span(&self.file, self.input, ini_idx..end_idx, self.builtin);
let adt = Adt { name, vars: type_vars, ctrs, source };
Ok((adt, nxt_indent))
}
pub fn parse_hvm(&mut self) -> ParseResult<(HvmDefinition, Indent)> {
let ini_idx = *self.index();
self.parse_keyword("hvm")?;
self.skip_trivia_inline()?;
let name = self.parse_var_name()?;
self.skip_trivia_inline()?;
let typ = self.parse_return_type()?.unwrap_or(Type::Any);
let typ = make_fn_type(vec![], typ);
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
// TODO: This will have the wrong index
let net_idx = *self.index();
let mut p = hvm::ast::CoreParser::new(&self.input[net_idx..]);
let body = p.parse_net()?;
*self.index() = net_idx + *p.index();
let source = Source::from_file_span(&self.file, self.input, ini_idx..self.index, self.builtin);
let def = HvmDefinition { name: name.clone(), typ, body, source };
let nxt_indent = self.advance_newlines()?;
Ok((def, nxt_indent))
}
fn parse_type_def_variant(&mut self, type_name: &Name, type_vars: &[Name]) -> ParseResult<AdtCtr> {
let ini_idx = *self.index();
let name = self.parse_top_level_name()?;
let name = Name::new(format!("{type_name}/{name}"));
self.skip_trivia_inline()?;
let fields = if self.try_consume_exactly("{") {
self.list_like(|p| p.parse_variant_field(), "", "}", ",", true, 0)?
} else {
vec![]
};
let field_types = fields.iter().map(|f| f.typ.clone()).collect::<Vec<_>>();
let end_idx = *self.index();
self.check_repeated_ctr_fields(&fields, &name, ini_idx..end_idx)?;
let typ = make_ctr_type(type_name.clone(), &field_types, type_vars);
Ok(AdtCtr { name, typ, fields })
}
fn parse_variant_field(&mut self) -> ParseResult<CtrField> {
let rec = self.try_consume_exactly("~");
self.skip_trivia_inline()?;
let nam = self.parse_var_name()?;
self.skip_trivia_inline()?;
let typ = if self.try_consume_exactly(":") { self.parse_type_expr()? } else { Type::Any };
Ok(CtrField { nam, typ, rec })
}
fn parse_primary_expr(&mut self, inline: bool) -> ParseResult<Expr> {
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
if self.try_parse_keyword("lambda") | self.try_consume_exactly("λ") {
fn parse_lam_var(p: &mut ImpParser) -> ParseResult<(Name, bool)> {
if p.starts_with("$") {
p.advance_one();
Ok((p.parse_var_name()?, true))
} else {
Ok((p.parse_var_name()?, false))
}
}
let names = self.list_like(|p| parse_lam_var(p), "", ":", ",", false, 1)?;
let bod = self.parse_expr(inline, false)?;
Ok(Expr::Lam { names, bod: Box::new(bod) })
} else if self.starts_with("(") {
self.advance_one();
let expr = self.parse_expr(inline, true)?;
self.consume(")")?;
Ok(expr)
} else if self.starts_with("{") {
// Map or Sup
self.parse_map_or_sup()
} else if self.starts_with("[") {
// List or Comprehension
self.parse_list_or_comprehension()
} else if self.starts_with("![") {
// Tree Node
self.parse_tree_node()
} else if self.starts_with("!") {
// Tree Leaf
self.parse_tree_leaf(inline)
} else if self.starts_with("`") {
// Symbol
Ok(Expr::Num { val: Num::U24(self.parse_quoted_symbol()?) })
} else if self.starts_with("\"") {
// String
Ok(Expr::Str { val: STRINGS.get(self.parse_quoted_string()?) })
} else if self.starts_with("'") {
// Char
Ok(Expr::Num { val: Num::U24(self.parse_quoted_char()? as u32 & 0x00ff_ffff) })
} else if self.starts_with("$") {
// Unscoped var
self.advance_one();
Ok(Expr::Chn { nam: self.parse_var_name()? })
} else if self.starts_with("*") {
// Era
self.advance_one();
Ok(Expr::Era)
} else if let Some(c) = self.peek_one() {
if is_num_char(c) {
// Number
Ok(Expr::Num { val: self.parse_number()? })
} else {
// Var
let nam = self.labelled(|p| p.parse_var_name(), "expression")?;
Ok(Expr::Var { nam })
}
} else {
self.expected("expression")?
}
}
fn call_or_postfix(&mut self, inline: bool) -> ParseResult<Expr> {
let ini_idx = *self.index();
let base = self.parse_primary_expr(inline)?;
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
// call
if self.starts_with("(") {
self.advance_one();
let mut args = Vec::new();
let mut kwargs = Vec::new();
let mut must_be_named = false;
while !self.starts_with(")") {
let ini_idx = *self.index();
let (bnd, arg) = self.parse_named_arg()?;
let end_idx = *self.index();
if let Some(bnd) = bnd {
must_be_named = true;
kwargs.push((bnd, arg));
} else if must_be_named {
let msg = "Positional arguments are not allowed to go after named arguments.".to_string();
return self.with_ctx(Err(msg), ini_idx..end_idx);
} else {
args.push(arg);
}
if self.starts_with(",") {
self.consume(",")?;
} else {
break;
}
}
self.consume(")")?;
if args.is_empty() && kwargs.is_empty() {
return Ok(base);
} else {
return Ok(Expr::Call { fun: Box::new(base), args, kwargs });
}
}
// map get
if self.starts_with("[") {
if let Expr::Var { nam } = base {
self.advance_one();
let key = self.parse_expr(inline, false)?;
self.consume("]")?;
return Ok(Expr::MapGet { nam, key: Box::new(key) });
} else {
let end_idx = *self.index();
return self.expected_spanned("Map variable name", ini_idx..end_idx);
}
}
// ctr
if self.starts_with("{") {
if let Expr::Var { nam } = base {
let kwargs = self.list_like(|p| p.data_kwarg(), "{", "}", ",", true, 0)?;
return Ok(Expr::Ctr { name: nam, args: Vec::new(), kwargs });
} else {
let end_idx = *self.index();
return self.expected_spanned("Constructor name", ini_idx..end_idx);
}
}
// no postfix
Ok(base)
}
fn parse_map_or_sup(&mut self) -> ParseResult<Expr> {
self.advance_one();
// Empty map
if self.try_consume("}") {
return Ok(Expr::Map { entries: vec![] });
}
let head = self.parse_expr(false, false)?;
self.skip_trivia();
if self.try_consume(",") {
self.parse_sup(head)
} else if self.try_consume(":") {
self.parse_map_init(head)
} else {
self.expected("',' or ':'")
}
}
fn parse_map_init(&mut self, head: Expr) -> ParseResult<Expr> {
let mut entries = Vec::new();
let val = self.parse_expr(false, false)?;
entries.push((head, val));
self.skip_trivia();
if !self.starts_with("}") {
self.consume(",")?;
}
let tail = self.list_like(|p| p.parse_map_entry(), "", "}", ",", true, 0)?;
entries.extend(tail);
Ok(Expr::Map { entries })
}
fn parse_sup(&mut self, head: Expr) -> ParseResult<Expr> {
let mut els = vec![head];
let tail = self.list_like(|p| p.parse_expr(false, false), "", "}", ",", true, 1)?;
els.extend(tail);
Ok(Expr::Sup { els })
}
fn parse_tree_node(&mut self) -> ParseResult<Expr> {
self.advance_one();
self.advance_one();
let left = self.parse_expr(false, false)?;
self.consume(",")?;
let right = self.parse_expr(false, false)?;
self.consume("]")?;
Ok(Expr::TreeNode { left: Box::new(left), right: Box::new(right) })
}
fn parse_tree_leaf(&mut self, inline: bool) -> ParseResult<Expr> {
self.advance_one();
let val = self.parse_expr(inline, false)?;
Ok(Expr::TreeLeaf { val: Box::new(val) })
}
fn data_kwarg(&mut self) -> ParseResult<(Name, Expr)> {
self.skip_trivia();
let nam = self.parse_var_name()?;
self.consume(":")?;
let expr = self.parse_expr(false, false)?;
Ok((nam, expr))
}
fn parse_map_entry(&mut self) -> ParseResult<(Expr, Expr)> {
let key = self.parse_expr(false, false)?;
self.consume(":")?;
let val = self.parse_expr(false, false)?;
Ok((key, val))
}
fn parse_list_or_comprehension(&mut self) -> ParseResult<Expr> {
self.consume_exactly("[")?;
// Empty list
self.skip_trivia();
if self.try_consume_exactly("]") {
return Ok(Expr::Lst { els: vec![] });
}
let head = self.parse_expr(false, false)?;
self.skip_trivia();
if self.try_parse_keyword("for") {
// Comprehension
self.skip_trivia();
let bind = self.parse_var_name()?;
self.skip_trivia();
self.parse_keyword("in")?;
let iter = self.parse_expr(false, false)?;
let mut cond = None;
self.skip_trivia();
if self.try_parse_keyword("if") {
cond = Some(Box::new(self.parse_expr(false, false)?));
}
self.consume("]")?;
Ok(Expr::LstMap { term: Box::new(head), bind, iter: Box::new(iter), cond })
} else {
// List
let mut head = vec![head];
self.skip_trivia();
if !self.starts_with("]") {
self.consume(",")?;
}
let tail = self.list_like(|p| p.parse_expr(false, false), "", "]", ",", true, 0)?;
head.extend(tail);
Ok(Expr::Lst { els: head })
}
}
/// "λ" (<name> ","?)+ ":" <expr>
/// | "open" <type> ":" <var>
/// | <infix>
fn parse_expr(&mut self, inline: bool, tup: bool) -> ParseResult<Expr> {
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
let base = self.parse_infix_expr(0, inline)?;
if !tup {
return Ok(base);
}
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
if self.starts_with(",") {
let mut els = vec![base];
loop {
if self.starts_with(",") {
self.advance_one();
els.push(self.parse_expr(inline, false)?);
if self.starts_with("\n") {
break;
}
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
} else {
break;
}
}
Ok(Expr::Tup { els })
} else {
Ok(base)
}
}
/// Named argument of a function call.
fn parse_named_arg(&mut self) -> ParseResult<(Option<Name>, Expr)> {
let arg = self.parse_expr(false, false)?;
if self.try_consume("=") {
if let Expr::Var { nam } = arg {
let bind = Some(nam);
let arg = self.parse_expr(false, false)?;
Ok((bind, arg))
} else {
let msg = "Unexpected '=' in unnamed argument.".to_string();
let idx = *self.index();
self.with_ctx(Err(msg), idx..idx + 1)
}
} else {
Ok((None, arg))
}
}
/// Infix expression.
/// <simple> (<infix_op> <infix>)?
fn parse_infix_expr(&mut self, prec: usize, inline: bool) -> ParseResult<Expr> {
maybe_grow(|| {
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
if prec > Op::max_precedence() {
return self.call_or_postfix(inline);
}
let mut lhs = self.parse_infix_expr(prec + 1, inline)?;
if inline {
self.skip_trivia_inline()?;
} else {
self.skip_trivia();
}
while let Some(op) = self.peek_oper() {
if op.precedence() == prec {
self.try_parse_oper().unwrap();
let rhs = self.parse_infix_expr(prec + 1, inline)?;
lhs = Expr::Opr { op, lhs: Box::new(lhs), rhs: Box::new(rhs) };
self.skip_trivia_inline()?;
} else {
break;
}
}
Ok(lhs)
})
}
fn consume_indent_at_most(&mut self, expected: Indent) -> ParseResult<Indent> {
let got = self.advance_newlines()?;
match (expected, got) {
(_, Indent::Eof) => Ok(Indent::Eof),
(Indent::Val(expected), Indent::Val(got)) if got <= expected => Ok(Indent::Val(got)),
(expected, got) => self.expected_indent(expected, got),
}
}
fn consume_indent_exactly(&mut self, expected: Indent) -> ParseResult<()> {
let got = self.advance_newlines()?;
match (expected, got) {
(Indent::Eof, Indent::Eof) => Ok(()),
(Indent::Val(expected), Indent::Val(got)) if got == expected => Ok(()),
(expected, got) => self.expected_indent(expected, got),
}
}
/// Parses a statement and returns the indentation of the next statement.
fn parse_statement(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
maybe_grow(|| {
if self.starts_with_keyword("return") {
self.parse_return()
} else if self.starts_with_keyword("def") {
self.parse_local_def(indent)
} else if self.starts_with_keyword("if") {
self.parse_if(indent)
} else if self.starts_with_keyword("match") {
self.parse_match(indent)
} else if self.starts_with_keyword("switch") {
self.parse_switch(indent)
} else if self.starts_with_keyword("fold") {
self.parse_fold(indent)
} else if self.starts_with_keyword("bend") {
self.parse_bend(indent)
} else if self.starts_with_keyword("with") {
self.parse_with(indent)
} else if self.starts_with_keyword("open") {
self.parse_open(indent)
} else if self.starts_with_keyword("use") {
self.parse_use(indent)
} else {
self.parse_assign(indent)
}
})
}
/// Assignments, monadic bind operations and in-place operations.
/// <assign_pattern> "=" <expr> ";"?
/// | <assign_pattern> "<-" <expr> ";"?
///
fn parse_assign(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
let ini_idx = *self.index();
let pat = self.parse_assign_pattern()?;
let end_idx = *self.index();
self.skip_trivia_inline()?;
// Assignment
if self.starts_with("=") {
self.advance_one();
let val = self.parse_expr(true, true)?;
self.skip_trivia_inline()?;
self.try_consume_exactly(";");
if !self.is_eof() {
self.consume_new_line()?;
}
let nxt_indent = self.advance_newlines()?;
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::Assign { pat, val: Box::new(val), nxt: Some(Box::new(nxt)) };
return Ok((stmt, nxt_indent));
} else {
let stmt = Stmt::Assign { pat, val: Box::new(val), nxt: None };
return Ok((stmt, nxt_indent));
}
}
// Ask
if self.starts_with("<-") {
self.consume("<-")?;
let val = self.parse_expr(true, true)?;
self.skip_trivia_inline()?;
self.try_consume_exactly(";");
let nxt_indent = self.advance_newlines()?;
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::Ask { pat, val: Box::new(val), nxt: Some(Box::new(nxt)) };
return Ok((stmt, nxt_indent));
} else {
let stmt = Stmt::Ask { pat, val: Box::new(val), nxt: None };
return Ok((stmt, nxt_indent));
}
}
// In-place
match &pat {
AssignPattern::Var(..) => {}
AssignPattern::MapSet(..) => {}
_ => self.expected_spanned("Var or Map accessor", ini_idx..end_idx)?,
}
if let Some(op) = self.parse_in_place_op()? {
let val = self.parse_expr(true, false)?;
self.skip_trivia_inline()?;
self.try_consume_exactly(";");
self.consume_indent_exactly(*indent)?;
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::InPlace { op, pat: Box::new(pat), val: Box::new(val), nxt: Box::new(nxt) };
return Ok((stmt, nxt_indent));
}
self.expected_spanned("statement", ini_idx..end_idx)
}
fn parse_in_place_op(&mut self) -> ParseResult<Option<InPlaceOp>> {
self.skip_trivia_inline()?;
let op = if self.starts_with("+=") {
self.consume("+=")?;
Some(InPlaceOp::Add)
} else if self.starts_with("-=") {
self.consume("-=")?;
Some(InPlaceOp::Sub)
} else if self.starts_with("*=") {
self.consume("*=")?;
Some(InPlaceOp::Mul)
} else if self.starts_with("/=") {
self.consume("/=")?;
Some(InPlaceOp::Div)
} else if self.starts_with("&=") {
self.consume("&=")?;
Some(InPlaceOp::And)
} else if self.starts_with("|=") {
self.consume("|=")?;
Some(InPlaceOp::Or)
} else if self.starts_with("^=") {
self.consume("^=")?;
Some(InPlaceOp::Xor)
} else if self.starts_with("@=") {
self.consume("@=")?;
Some(InPlaceOp::Map)
} else {
None
};
Ok(op)
}
fn parse_return(&mut self) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("return")?;
let term = self.parse_expr(true, true)?;
self.skip_trivia_inline()?;
self.try_consume_exactly(";");
if !self.is_eof() {
self.consume_new_line()?;
}
let indent = self.advance_newlines()?;
Ok((Stmt::Return { term: Box::new(term) }, indent))
}
fn parse_if(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("if")?;
self.skip_trivia_inline()?;
let cond = self.parse_expr(true, true)?;
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (then, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
if nxt_indent != *indent {
return self
.expected_indent(*indent, nxt_indent)
.or(self.expected_spanned("'else' or 'elif'", self.index..self.index + 1));
}
let mut elifs = Vec::new();
while self.try_parse_keyword("elif") {
let cond = self.parse_expr(true, false)?;
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (then, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
if nxt_indent != *indent {
return self
.expected_indent(*indent, nxt_indent)
.or(self.expected_spanned("'else' or 'elif'", self.index..self.index + 1));
}
elifs.push((cond, then));
}
self.parse_keyword("else")?;
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (otherwise, nxt_indent) = self.parse_statement(indent)?;
let otherwise = elifs.into_iter().rfold(otherwise, |acc, (cond, then)| Stmt::If {
cond: Box::new(cond),
then: Box::new(then),
otherwise: Box::new(acc),
nxt: None,
});
indent.exit_level();
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::If {
cond: Box::new(cond),
then: Box::new(then),
otherwise: Box::new(otherwise),
nxt: Some(Box::new(nxt)),
};
Ok((stmt, nxt_indent))
} else {
let stmt =
Stmt::If { cond: Box::new(cond), then: Box::new(then), otherwise: Box::new(otherwise), nxt: None };
Ok((stmt, nxt_indent))
}
}
fn parse_match(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("match")?;
self.skip_trivia_inline()?;
let (bnd, arg) = self.parse_match_arg()?;
self.skip_trivia_inline()?;
let (with_bnd, with_arg) = self.parse_with_clause()?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent).or(self.expected_spanned("'case'", self.index..self.index + 1))?;
let (case, mut nxt_indent) = self.parse_match_case(indent)?;
let mut arms = vec![case];
while nxt_indent == *indent {
let (case, nxt_indent_) = self.parse_match_case(indent)?;
nxt_indent = nxt_indent_;
arms.push(case);
}
indent.exit_level();
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::Match { arg: Box::new(arg), bnd, with_bnd, with_arg, arms, nxt: Some(Box::new(nxt)) };
Ok((stmt, nxt_indent))
} else {
let stmt = Stmt::Match { arg: Box::new(arg), bnd, with_bnd, with_arg, arms, nxt: None };
Ok((stmt, nxt_indent))
}
}
fn parse_match_arg(&mut self) -> ParseResult<(Option<Name>, Expr)> {
let ini_idx = *self.index();
let arg = self.parse_expr(true, false)?;
let end_idx = *self.index();
self.skip_trivia_inline()?;
match (arg, self.starts_with("=")) {
(Expr::Var { nam }, true) => {
self.advance_one();
Ok((Some(nam), self.parse_expr(true, false)?))
}
(_, true) => self.expected_spanned("argument name", ini_idx..end_idx),
(Expr::Var { nam }, false) => Ok((Some(nam.clone()), Expr::Var { nam })),
(arg, false) => Ok((Some(Name::new("%arg")), arg)),
}
}
fn parse_with_clause(&mut self) -> ParseResult<(Vec<Option<Name>>, Vec<Expr>)> {
self.skip_trivia_inline()?;
let res = if self.try_parse_keyword("with") {
self.list_like(|p| p.parse_with_arg(), "", ":", ",", true, 1)?.into_iter().unzip()
} else {
self.consume_exactly(":")?;
(vec![], vec![])
};
Ok(res)
}
fn parse_with_arg(&mut self) -> ParseResult<(Option<Name>, Expr)> {
let bind = self.parse_var_name()?;
self.skip_trivia_inline()?;
if self.try_consume("=") {
let arg = self.parse_expr(false, false)?;
Ok((Some(bind), arg))
} else {
Ok((Some(bind.clone()), Expr::Var { nam: bind }))
}
}
fn parse_match_case(&mut self, indent: &mut Indent) -> ParseResult<(MatchArm, Indent)> {
self.parse_keyword("case")?;
self.skip_trivia_inline()?;
let pat = if self.try_consume_exactly("_") {
None
} else {
let nam = self.labelled(|p| p.parse_var_name(), "name or '_'")?;
Some(nam)
};
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (body, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
let stmt = MatchArm { lft: pat, rgt: body };
Ok((stmt, nxt_indent))
}
fn parse_switch(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("switch")?;
self.skip_trivia_inline()?;
let (bnd, arg) = self.parse_match_arg()?;
self.skip_trivia_inline()?;
let (with_bnd, with_arg) = self.parse_with_clause()?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let ini_idx = *self.index();
let (fst_case, fst_stmt, mut nxt_indent) = self.parse_switch_case(indent)?;
let end_idx = *self.index();
if fst_case != Some(0) {
return self.expected_spanned("case 0", ini_idx..end_idx);
}
let mut arms = vec![fst_stmt];
let mut should_continue = fst_case == Some(0);
let mut expected_num = 1;
while should_continue {
if nxt_indent != *indent {
return self
.expected_indent(*indent, nxt_indent)
.or(self.expected_spanned("'case'", self.index..self.index + 1));
}
let (case, stmt, nxt_indent_) = self.parse_switch_case(indent)?;
nxt_indent = nxt_indent_;
if let Some(case) = case {
if case != expected_num {
return self.expected(&format!("case {}", expected_num));
}
should_continue = true;
arms.push(stmt);
expected_num += 1;
} else {
should_continue = false;
arms.push(stmt);
}
}
indent.exit_level();
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::Switch { arg: Box::new(arg), bnd, with_bnd, with_arg, arms, nxt: Some(Box::new(nxt)) };
Ok((stmt, nxt_indent))
} else {
let stmt = Stmt::Switch { arg: Box::new(arg), bnd, with_bnd, with_arg, arms, nxt: None };
Ok((stmt, nxt_indent))
}
}
fn parse_switch_case(&mut self, indent: &mut Indent) -> ParseResult<(Option<u32>, Stmt, Indent)> {
self.parse_keyword("case")?;
self.skip_trivia_inline()?;
let case = if let Some(c) = self.peek_one() {
match c {
'_' => {
self.advance_one();
None
}
c if c.is_ascii_digit() => Some(self.parse_u32()?),
_ => return self.expected("number or '_'"),
}
} else {
return self.expected("number or '_'")?;
};
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (stmt, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
Ok((case, stmt, nxt_indent))
}
/// "fold" <bind> ("=" <arg>)? ":"
/// "case" <ctr> ":"
/// <case>
/// ...
fn parse_fold(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("fold")?;
self.skip_trivia_inline()?;
// Actually identical to match, except the return
let (bind, arg) = self.parse_match_arg()?;
self.skip_trivia_inline()?;
let (with_bnd, with_arg) = self.parse_with_clause()?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent).or(self.expected_spanned("'case'", self.index..self.index + 1))?;
let (case, mut nxt_indent) = self.parse_match_case(indent)?;
let mut arms = vec![case];
while nxt_indent == *indent {
let (case, nxt_indent_) = self.parse_match_case(indent)?;
nxt_indent = nxt_indent_;
arms.push(case);
}
indent.exit_level();
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt =
Stmt::Fold { arg: Box::new(arg), bnd: bind, arms, with_bnd, with_arg, nxt: Some(Box::new(nxt)) };
Ok((stmt, nxt_indent))
} else {
let stmt = Stmt::Fold { arg: Box::new(arg), bnd: bind, arms, with_bnd, with_arg, nxt: None };
Ok((stmt, nxt_indent))
}
}
/// "bend" (<bind> "=" <init> ","?)* ":"
/// "when" <cond> ":"
/// <step>
/// "else" ":"
/// <base>
fn parse_bend(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("bend")?;
self.skip_trivia_inline()?;
let args = self.list_like(|p| p.parse_match_arg(), "", ":", ",", true, 1)?;
let (bind, init) = args.into_iter().unzip();
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent).or(self.expected_spanned("'when'", self.index..self.index + 1))?;
self.parse_keyword("when")?;
self.skip_trivia_inline()?;
let cond = self.parse_expr(true, true)?;
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (step, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
if nxt_indent != *indent {
return self
.expected_indent(*indent, nxt_indent)
.or(self.expected_spanned("'else'", self.index..self.index + 1));
}
self.parse_keyword("else")?;
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (base, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
indent.exit_level();
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
let stmt = Stmt::Bend {
bnd: bind,
arg: init,
cond: Box::new(cond),
step: Box::new(step),
base: Box::new(base),
nxt: Some(Box::new(nxt)),
};
Ok((stmt, nxt_indent))
} else {
let stmt = Stmt::Bend {
bnd: bind,
arg: init,
cond: Box::new(cond),
step: Box::new(step),
base: Box::new(base),
nxt: None,
};
Ok((stmt, nxt_indent))
}
}
/// "with" <typ> ":"
/// <bod>
/// <nxt>?
fn parse_with(&mut self, indent: &mut Indent) -> ParseResult<(Stmt, Indent)> {
self.parse_keyword("with")?;
self.skip_trivia_inline()?;
let typ = self.parse_var_name()?;
self.skip_trivia_inline()?;
self.consume_exactly(":")?;
self.consume_new_line()?;
indent.enter_level();
self.consume_indent_exactly(*indent)?;
let (bod, nxt_indent) = self.parse_statement(indent)?;
indent.exit_level();
if nxt_indent == *indent {
let (nxt, nxt_indent) = self.parse_statement(indent)?;
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | true |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imp/gen_map_get.rs | src/imp/gen_map_get.rs | use crate::fun::Name;
use super::{AssignPattern, Definition, Expr, Stmt};
impl Definition {
/// Generates a map from `Stmt` to `Substitutions` for each definition in the program.
/// Iterates over all definitions in the program and applies `gen_map_get` to their bodies.
/// It replaces `Expr::MapGet` expressions with variable accesses, introducing
/// new variables as necessary to hold intermediate results from map accesses.
pub fn gen_map_get(&mut self) {
self.body.gen_map_get(&mut 0);
}
}
impl Stmt {
fn gen_map_get(&mut self, id: &mut usize) {
match self {
Stmt::LocalDef { def, nxt } => {
nxt.gen_map_get(id);
def.gen_map_get()
}
Stmt::Assign { pat, val, nxt } => {
let key_substitutions =
if let AssignPattern::MapSet(_, key) = pat { key.substitute_map_gets(id) } else { Vec::new() };
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
let substitutions = val.substitute_map_gets(id);
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
if !key_substitutions.is_empty() {
*self = gen_get(self, key_substitutions);
}
}
Stmt::Ask { pat: _, val, nxt } => {
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
let substitutions = val.substitute_map_gets(id);
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::InPlace { op: _, pat, val, nxt } => {
let key_substitutions = if let AssignPattern::MapSet(_, key) = &mut **pat {
key.substitute_map_gets(id)
} else {
Vec::new()
};
nxt.gen_map_get(id);
let substitutions = val.substitute_map_gets(id);
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
if !key_substitutions.is_empty() {
*self = gen_get(self, key_substitutions);
}
}
Stmt::If { cond, then, otherwise, nxt } => {
then.gen_map_get(id);
otherwise.gen_map_get(id);
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
let substitutions = cond.substitute_map_gets(id);
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::Match { bnd: _, arg, with_bnd: _, with_arg, arms, nxt }
| Stmt::Fold { bnd: _, arg, arms, with_bnd: _, with_arg, nxt } => {
for arm in arms.iter_mut() {
arm.rgt.gen_map_get(id);
}
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
let mut substitutions = arg.substitute_map_gets(id);
for arg in with_arg {
substitutions.extend(arg.substitute_map_gets(id));
}
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::Switch { bnd: _, arg, with_bnd: _, with_arg, arms, nxt } => {
for arm in arms.iter_mut() {
arm.gen_map_get(id);
}
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
let mut substitutions = arg.substitute_map_gets(id);
for arg in with_arg {
substitutions.extend(arg.substitute_map_gets(id));
}
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::Bend { bnd: _, arg: init, cond, step, base, nxt } => {
step.gen_map_get(id);
base.gen_map_get(id);
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
let mut substitutions = cond.substitute_map_gets(id);
for init in init {
substitutions.extend(init.substitute_map_gets(id));
}
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::With { typ: _, bod, nxt } => {
bod.gen_map_get(id);
if let Some(nxt) = nxt {
nxt.gen_map_get(id);
}
}
Stmt::Return { term } => {
let substitutions = term.substitute_map_gets(id);
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::Open { typ: _, var: _, nxt } => {
nxt.gen_map_get(id);
}
Stmt::Use { nam: _, val: bod, nxt } => {
nxt.gen_map_get(id);
let substitutions = bod.substitute_map_gets(id);
if !substitutions.is_empty() {
*self = gen_get(self, substitutions);
}
}
Stmt::Err => {}
}
}
}
type Substitutions = Vec<(Name, Name, Box<Expr>)>;
impl Expr {
fn substitute_map_gets(&mut self, id: &mut usize) -> Substitutions {
fn go(e: &mut Expr, substitutions: &mut Substitutions, id: &mut usize) {
match e {
Expr::MapGet { nam, key } => {
go(key, substitutions, id);
let new_var = gen_map_var(id);
substitutions.push((new_var.clone(), nam.clone(), key.clone()));
*e = Expr::Var { nam: new_var };
}
Expr::Call { fun, args, kwargs } => {
go(fun, substitutions, id);
for arg in args {
go(arg, substitutions, id);
}
for (_, arg) in kwargs {
go(arg, substitutions, id);
}
}
Expr::Lam { bod, .. } => {
go(bod, substitutions, id);
}
Expr::Opr { lhs, rhs, .. } => {
go(lhs, substitutions, id);
go(rhs, substitutions, id);
}
Expr::Lst { els } | Expr::Tup { els } | Expr::Sup { els } => {
for el in els {
go(el, substitutions, id);
}
}
Expr::Ctr { kwargs, .. } => {
for (_, arg) in kwargs.iter_mut() {
go(arg, substitutions, id);
}
}
Expr::LstMap { term, iter, cond, .. } => {
go(term, substitutions, id);
go(iter, substitutions, id);
if let Some(cond) = cond {
go(cond, substitutions, id);
}
}
Expr::Map { entries } => {
for (_, entry) in entries {
go(entry, substitutions, id);
}
}
Expr::TreeNode { left, right } => {
go(left, substitutions, id);
go(right, substitutions, id);
}
Expr::TreeLeaf { val } => {
go(val, substitutions, id);
}
Expr::Era | Expr::Str { .. } | Expr::Var { .. } | Expr::Chn { .. } | Expr::Num { .. } => {}
}
}
let mut substitutions = Substitutions::new();
go(self, &mut substitutions, id);
substitutions
}
}
fn gen_get(current: &mut Stmt, substitutions: Substitutions) -> Stmt {
substitutions.into_iter().rfold(std::mem::take(current), |acc, next| {
let (var, map_var, key) = next;
let map_get_call = Expr::Var { nam: Name::new("Map/get") };
let map_get_call = Expr::Call {
fun: Box::new(map_get_call),
args: vec![Expr::Var { nam: map_var.clone() }, *key],
kwargs: Vec::new(),
};
let pat = AssignPattern::Tup(vec![AssignPattern::Var(var), AssignPattern::Var(map_var)]);
Stmt::Assign { pat, val: Box::new(map_get_call), nxt: Some(Box::new(acc)) }
})
}
fn gen_map_var(id: &mut usize) -> Name {
let name = Name::new(format!("map/get%{}", id));
*id += 1;
name
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imp/to_fun.rs | src/imp/to_fun.rs | use super::{AssignPattern, Definition, Expr, InPlaceOp, Stmt};
use crate::{
diagnostics::Diagnostics,
fun::{
self,
builtins::{LCONS, LNIL},
parser::ParseBook,
Book, Name,
},
};
impl ParseBook {
// TODO: Change all functions to return diagnostics
pub fn to_fun(mut self) -> Result<Book, Diagnostics> {
for (name, mut def) in std::mem::take(&mut self.imp_defs) {
def.order_kwargs(&self)?;
def.gen_map_get();
if self.fun_defs.contains_key(&name) {
panic!("Def names collision should be checked at parse time")
}
self.fun_defs.insert(name, def.to_fun()?);
}
let ParseBook { fun_defs: defs, hvm_defs, adts, ctrs, import_ctx, .. } = self;
Ok(Book { defs, hvm_defs, adts, ctrs, entrypoint: None, imports: import_ctx.to_imports() })
}
}
impl Definition {
pub fn to_fun(self) -> Result<fun::Definition, Diagnostics> {
let body = self.body.into_fun().map_err(|e| {
let mut diags = Diagnostics::default();
diags.add_function_error(e, self.name.clone(), self.source.clone());
diags
})?;
let body = match body {
StmtToFun::Return(term) => term,
StmtToFun::Assign(..) => {
let mut diags = Diagnostics::default();
diags.add_function_error(
"Function doesn't end with a return statement",
self.name,
self.source.clone(),
);
return Err(diags);
}
};
let rule =
fun::Rule { pats: self.args.into_iter().map(|param| fun::Pattern::Var(Some(param))).collect(), body };
let def = fun::Definition {
name: self.name,
typ: self.typ,
check: self.check,
rules: vec![rule],
source: self.source,
};
Ok(def)
}
}
impl AssignPattern {
pub fn into_fun(self) -> fun::Pattern {
match self {
AssignPattern::Eraser => fun::Pattern::Var(None),
AssignPattern::Var(name) => fun::Pattern::Var(Some(name)),
AssignPattern::Chn(name) => fun::Pattern::Chn(name),
AssignPattern::Tup(names) => fun::Pattern::Fan(
fun::FanKind::Tup,
fun::Tag::Static,
names.into_iter().map(Self::into_fun).collect(),
),
AssignPattern::Sup(names) => {
fun::Pattern::Fan(fun::FanKind::Dup, fun::Tag::Auto, names.into_iter().map(Self::into_fun).collect())
}
AssignPattern::MapSet(..) => unreachable!(),
}
}
}
#[derive(Debug)]
enum StmtToFun {
Return(fun::Term),
Assign(bool, fun::Pattern, fun::Term),
}
fn take(t: Stmt) -> Result<(bool, Option<fun::Pattern>, fun::Term), String> {
match t.into_fun()? {
StmtToFun::Return(ret) => Ok((false, None, ret)),
StmtToFun::Assign(x, pat, val) => Ok((x, Some(pat), val)),
}
}
fn wrap(nxt: Option<fun::Pattern>, term: fun::Term, ask: bool) -> StmtToFun {
if let Some(pat) = nxt {
StmtToFun::Assign(ask, pat, term)
} else {
StmtToFun::Return(term)
}
}
impl Stmt {
fn into_fun(self) -> Result<StmtToFun, String> {
// TODO: Refactor this to not repeat everything.
// TODO: When we have an error with an assignment, we should show the offending assignment (eg. "{pat} = ...").
let stmt_to_fun = match self {
Stmt::Assign { pat: AssignPattern::MapSet(map, key), val, nxt: Some(nxt) } => {
let (ask, nxt_pat, nxt) = take(*nxt)?;
let term = fun::Term::Let {
pat: Box::new(fun::Pattern::Var(Some(map.clone()))),
val: Box::new(fun::Term::call(
fun::Term::Ref { nam: fun::Name::new("Map/set") },
[fun::Term::Var { nam: map }, key.to_fun(), val.to_fun()],
)),
nxt: Box::new(nxt),
};
wrap(nxt_pat, term, ask)
}
Stmt::Assign { pat: AssignPattern::MapSet(..), val: _, nxt: None } => {
return Err("Branch ends with map assignment.".to_string())?;
}
Stmt::Assign { pat, val, nxt: Some(nxt) } => {
let pat = pat.into_fun();
let val = val.to_fun();
let (ask, nxt_pat, nxt) = take(*nxt)?;
let term = fun::Term::Let { pat: Box::new(pat), val: Box::new(val), nxt: Box::new(nxt) };
wrap(nxt_pat, term, ask)
}
Stmt::Assign { pat, val, nxt: None } => {
let pat = pat.into_fun();
let val = val.to_fun();
StmtToFun::Assign(false, pat, val)
}
Stmt::InPlace { op, pat, val, nxt } => {
let (ask, nxt_pat, nxt) = take(*nxt)?;
// if it is a mapper operation
if let InPlaceOp::Map = op {
let term = match &*pat {
AssignPattern::MapSet(map, key) => {
let rhs = fun::Term::call(
fun::Term::r#ref("Map/map"),
[fun::Term::Var { nam: map.clone() }, key.clone().to_fun(), val.clone().to_fun()],
);
fun::Term::Let {
pat: Box::new(fun::Pattern::Var(Some(map.clone()))),
val: Box::new(rhs),
nxt: Box::new(nxt),
}
}
_ => {
let rhs = fun::Term::call(val.to_fun(), [pat.clone().into_fun().to_term()]);
fun::Term::Let { pat: Box::new(pat.into_fun()), val: Box::new(rhs), nxt: Box::new(nxt) }
}
};
return Ok(wrap(nxt_pat, term, ask));
}
// otherwise
match *pat {
AssignPattern::Var(var) => {
let term = fun::Term::Let {
pat: Box::new(fun::Pattern::Var(Some(var.clone()))),
val: Box::new(fun::Term::Oper {
opr: op.to_lang_op(),
fst: Box::new(fun::Term::Var { nam: var }),
snd: Box::new(val.to_fun()),
}),
nxt: Box::new(nxt),
};
wrap(nxt_pat, term, ask)
}
AssignPattern::MapSet(map, key) => {
let temp = Name::new("%0");
let partial =
Expr::Opr { op: op.to_lang_op(), lhs: Box::new(Expr::Var { nam: temp.clone() }), rhs: val };
let map_fn = Expr::Lam { names: vec![(temp, false)], bod: Box::new(partial) };
let map_term = fun::Term::call(
fun::Term::r#ref("Map/map"),
[fun::Term::Var { nam: map.clone() }, key.to_fun(), map_fn.to_fun()],
);
let term = fun::Term::Let {
pat: Box::new(fun::Pattern::Var(Some(map))),
val: Box::new(map_term),
nxt: Box::new(nxt),
};
wrap(nxt_pat, term, ask)
}
_ => unreachable!(),
}
}
Stmt::If { cond, then, otherwise, nxt } => {
let (ask, pat, then, else_) = match (then.into_fun()?, otherwise.into_fun()?) {
(StmtToFun::Return(t), StmtToFun::Return(e)) => (false, None, t, e),
(StmtToFun::Assign(ask, tp, t), StmtToFun::Assign(ask_, ep, e)) if tp == ep => {
(ask && ask_, Some(tp), t, e)
}
(StmtToFun::Assign(..), StmtToFun::Assign(..)) => {
return Err("'if' branches end with different assignments.".to_string())?;
}
(StmtToFun::Return(..), StmtToFun::Assign(..)) => {
return Err(
"Expected 'else' branch from 'if' to return, but it ends with assignment.".to_string(),
)?;
}
(StmtToFun::Assign(..), StmtToFun::Return(..)) => {
return Err(
"Expected 'else' branch from 'if' to end with assignment, but it returns.".to_string(),
)?;
}
};
let arms = vec![else_, then];
let term = fun::Term::Swt {
arg: Box::new(cond.to_fun()),
bnd: Some(Name::new("%pred")),
with_bnd: vec![],
with_arg: vec![],
pred: Some(Name::new("%pred-1")),
arms,
};
wrap_nxt_assign_stmt(term, nxt, pat, ask)?
}
Stmt::Match { arg, bnd, with_bnd, with_arg, arms, nxt } => {
let arg = arg.to_fun();
let mut fun_arms = vec![];
let mut arms = arms.into_iter();
let fst = arms.next().unwrap();
let (fst_ask, fst_pat, fst_rgt) = take(fst.rgt)?;
let with_arg = with_arg.into_iter().map(Expr::to_fun).collect();
fun_arms.push((fst.lft, vec![], fst_rgt));
for arm in arms {
let (arm_ask, arm_pat, arm_rgt) = take(arm.rgt)?;
match (&arm_pat, &fst_pat) {
(Some(arm_pat), Some(fst_pat)) if arm_pat != fst_pat || arm_ask != fst_ask => {
return Err("'match' arms end with different assignments.".to_string())?;
}
(Some(_), None) => {
return Err("Expected 'match' arms to end with assignment, but it returns.".to_string())?;
}
(None, Some(_)) => {
return Err("Expected 'match' arms to return, but it ends with assignment.".to_string())?;
}
(Some(_), Some(_)) => fun_arms.push((arm.lft, vec![], arm_rgt)),
(None, None) => fun_arms.push((arm.lft, vec![], arm_rgt)),
}
}
let term = fun::Term::Mat { arg: Box::new(arg), bnd, with_bnd, with_arg, arms: fun_arms };
wrap_nxt_assign_stmt(term, nxt, fst_pat, fst_ask)?
}
Stmt::Switch { arg, bnd, with_bnd, with_arg, arms, nxt } => {
let arg = arg.to_fun();
let mut fun_arms = vec![];
let mut arms = arms.into_iter();
let fst = arms.next().unwrap();
let (fst_ask, fst_pat, fst) = take(fst)?;
let with_arg = with_arg.into_iter().map(Expr::to_fun).collect();
fun_arms.push(fst);
for arm in arms {
let (arm_ask, arm_pat, arm) = take(arm)?;
match (&arm_pat, &fst_pat) {
(Some(arm_pat), Some(fst_pat)) if arm_pat != fst_pat || arm_ask != fst_ask => {
return Err("'switch' arms end with different assignments.".to_string())?;
}
(Some(_), None) => {
return Err("Expected 'switch' arms to end with assignment, but it returns.".to_string())?;
}
(None, Some(_)) => {
return Err("Expected 'switch' arms to return, but it ends with assignment.".to_string())?;
}
(Some(_), Some(_)) => fun_arms.push(arm),
(None, None) => fun_arms.push(arm),
}
}
let pred = Some(Name::new(format!("{}-{}", bnd.clone().unwrap(), fun_arms.len() - 1)));
let term = fun::Term::Swt { arg: Box::new(arg), bnd, with_bnd, with_arg, pred, arms: fun_arms };
wrap_nxt_assign_stmt(term, nxt, fst_pat, fst_ask)?
}
Stmt::Fold { arg, bnd, with_bnd, with_arg, arms, nxt } => {
let arg = arg.to_fun();
let mut fun_arms = vec![];
let mut arms = arms.into_iter();
let fst = arms.next().unwrap();
let (fst_ask, fst_pat, fst_rgt) = take(fst.rgt)?;
fun_arms.push((fst.lft, vec![], fst_rgt));
let with_arg = with_arg.into_iter().map(Expr::to_fun).collect();
for arm in arms {
let (arm_ask, arm_pat, arm_rgt) = take(arm.rgt)?;
match (&arm_pat, &fst_pat) {
(Some(arm_pat), Some(fst_pat)) if arm_pat != fst_pat || arm_ask != fst_ask => {
return Err("'fold' arms end with different assignments.".to_string())?;
}
(Some(_), None) => {
return Err("Expected 'fold' arms to end with assignment, but it returns.".to_string())?;
}
(None, Some(_)) => {
return Err("Expected 'fold' arms to return, but it ends with assignment.".to_string())?;
}
(Some(_), Some(_)) => fun_arms.push((arm.lft, vec![], arm_rgt)),
(None, None) => fun_arms.push((arm.lft, vec![], arm_rgt)),
}
}
let term = fun::Term::Fold { arg: Box::new(arg), bnd, with_bnd, with_arg, arms: fun_arms };
wrap_nxt_assign_stmt(term, nxt, fst_pat, fst_ask)?
}
Stmt::Bend { bnd, arg, cond, step, base, nxt } => {
let arg = arg.into_iter().map(Expr::to_fun).collect();
let cond = cond.to_fun();
let (ask, pat, step, base) = match (step.into_fun()?, base.into_fun()?) {
(StmtToFun::Return(s), StmtToFun::Return(b)) => (false, None, s, b),
(StmtToFun::Assign(aa, sp, s), StmtToFun::Assign(ba, bp, b)) if sp == bp => {
(aa && ba, Some(sp), s, b)
}
(StmtToFun::Assign(..), StmtToFun::Assign(..)) => {
return Err("'bend' branches end with different assignments.".to_string())?;
}
(StmtToFun::Return(..), StmtToFun::Assign(..)) => {
return Err(
"Expected 'else' branch from 'bend' to return, but it ends with assignment.".to_string(),
)?;
}
(StmtToFun::Assign(..), StmtToFun::Return(..)) => {
return Err(
"Expected 'else' branch from 'bend' to end with assignment, but it returns.".to_string(),
)?;
}
};
let term =
fun::Term::Bend { bnd, arg, cond: Box::new(cond), step: Box::new(step), base: Box::new(base) };
wrap_nxt_assign_stmt(term, nxt, pat, ask)?
}
Stmt::With { typ, bod, nxt } => {
let (ask, pat, bod) = take(*bod)?;
let term = fun::Term::With { typ, bod: Box::new(bod) };
wrap_nxt_assign_stmt(term, nxt, pat, ask)?
}
Stmt::Ask { pat, val, nxt: Some(nxt) } => {
let (ask, nxt_pat, nxt) = take(*nxt)?;
let term =
fun::Term::Ask { pat: Box::new(pat.into_fun()), val: Box::new(val.to_fun()), nxt: Box::new(nxt) };
wrap(nxt_pat, term, ask)
}
Stmt::Ask { pat, val, nxt: None } => {
let pat = pat.into_fun();
let val = val.to_fun();
StmtToFun::Assign(true, pat, val)
}
Stmt::Open { typ, var, nxt } => {
let (ask, nxt_pat, nxt) = take(*nxt)?;
let term = fun::Term::Open { typ, var, bod: Box::new(nxt) };
wrap(nxt_pat, term, ask)
}
Stmt::Use { nam, val, nxt } => {
let (ask, nxt_pat, nxt) = take(*nxt)?;
let term = fun::Term::Use { nam: Some(nam), val: Box::new(val.to_fun()), nxt: Box::new(nxt) };
wrap(nxt_pat, term, ask)
}
Stmt::Return { term } => StmtToFun::Return(term.to_fun()),
Stmt::LocalDef { def, nxt } => {
let (ask, nxt_pat, nxt) = take(*nxt)?;
let def = def.to_fun().map_err(|e| e.display_only_messages().to_string())?;
let term = fun::Term::Def { def, nxt: Box::new(nxt) };
wrap(nxt_pat, term, ask)
}
Stmt::Err => unreachable!(),
};
Ok(stmt_to_fun)
}
}
impl Expr {
pub fn to_fun(self) -> fun::Term {
match self {
Expr::Era => fun::Term::Era,
Expr::Var { nam } => fun::Term::Var { nam },
Expr::Chn { nam } => fun::Term::Link { nam },
Expr::Num { val } => fun::Term::Num { val },
Expr::Call { fun, args, kwargs } => {
assert!(kwargs.is_empty());
let args = args.into_iter().map(Self::to_fun);
fun::Term::call(fun.to_fun(), args)
}
Expr::Lam { names, bod } => names.into_iter().rfold(bod.to_fun(), |acc, (name, link)| fun::Term::Lam {
tag: fun::Tag::Static,
pat: Box::new(if link { fun::Pattern::Chn(name) } else { fun::Pattern::Var(Some(name)) }),
bod: Box::new(acc),
}),
Expr::Opr { op, lhs, rhs } => {
fun::Term::Oper { opr: op, fst: Box::new(lhs.to_fun()), snd: Box::new(rhs.to_fun()) }
}
Expr::Str { val } => fun::Term::Str { val },
Expr::Lst { els } => fun::Term::List { els: els.into_iter().map(Self::to_fun).collect() },
Expr::Tup { els } => fun::Term::Fan {
fan: fun::FanKind::Tup,
tag: fun::Tag::Static,
els: els.into_iter().map(Self::to_fun).collect(),
},
Expr::Sup { els } => fun::Term::Fan {
fan: fun::FanKind::Dup,
tag: fun::Tag::Auto,
els: els.into_iter().map(Self::to_fun).collect(),
},
Expr::Ctr { name, args, kwargs } => {
assert!(kwargs.is_empty());
let args = args.into_iter().map(Self::to_fun);
fun::Term::call(fun::Term::Var { nam: name }, args)
}
Expr::LstMap { term, bind, iter, cond } => {
const ITER_TAIL: &str = "%iter.tail";
const ITER_HEAD: &str = "%iter.head";
let cons_branch = fun::Term::call(
fun::Term::r#ref(LCONS),
[term.to_fun(), fun::Term::Var { nam: Name::new(ITER_TAIL) }],
);
let cons_branch = if let Some(cond) = cond {
fun::Term::Swt {
arg: Box::new(cond.to_fun()),
bnd: Some(Name::new("%comprehension")),
with_bnd: vec![],
with_arg: vec![],
pred: Some(Name::new("%comprehension-1")),
arms: vec![fun::Term::Var { nam: Name::new(ITER_TAIL) }, cons_branch],
}
} else {
cons_branch
};
let cons_branch = fun::Term::Let {
pat: Box::new(fun::Pattern::Var(Some(bind))),
val: Box::new(fun::Term::Var { nam: Name::new(ITER_HEAD) }),
nxt: Box::new(cons_branch),
};
fun::Term::Fold {
bnd: Some(Name::new("%iter")),
arg: Box::new(iter.to_fun()),
with_bnd: vec![],
with_arg: vec![],
arms: vec![
(Some(Name::new(LNIL)), vec![], fun::Term::r#ref(LNIL)),
(Some(Name::new(LCONS)), vec![], cons_branch),
],
}
}
Expr::Map { entries } => map_init(entries),
Expr::MapGet { .. } => unreachable!(),
Expr::TreeNode { left, right } => {
let left = left.to_fun();
let right = right.to_fun();
fun::Term::call(fun::Term::r#ref("Tree/Node"), [left, right])
}
Expr::TreeLeaf { val } => {
let val = val.to_fun();
fun::Term::app(fun::Term::r#ref("Tree/Leaf"), val)
}
}
}
}
fn map_init(entries: Vec<(Expr, Expr)>) -> fun::Term {
let mut map = fun::Term::Ref { nam: fun::Name::new("Map/empty") };
for (key, value) in entries {
map =
fun::Term::call(fun::Term::Ref { nam: fun::Name::new("Map/set") }, [map, key.to_fun(), value.to_fun()]);
}
map
}
/// If the statement was a return, returns it, erroring if there is another after it.
/// Otherwise, turns it into a 'let' and returns the next statement.
fn wrap_nxt_assign_stmt(
term: fun::Term,
nxt: Option<Box<Stmt>>,
pat: Option<fun::Pattern>,
ask: bool,
) -> Result<StmtToFun, String> {
if let Some(nxt) = nxt {
if let Some(pat) = pat {
let (ask_nxt, nxt_pat, nxt) = take(*nxt)?;
let term = if ask {
fun::Term::Ask { pat: Box::new(pat), val: Box::new(term), nxt: Box::new(nxt) }
} else {
fun::Term::Let { pat: Box::new(pat), val: Box::new(term), nxt: Box::new(nxt) }
};
Ok(wrap(nxt_pat, term, ask_nxt))
} else {
Err("Statement ends with return but is not at end of function.".to_string())?
}
} else if let Some(pat) = pat {
Ok(StmtToFun::Assign(ask, pat, term))
} else {
Ok(StmtToFun::Return(term))
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imp/mod.rs | src/imp/mod.rs | pub mod gen_map_get;
mod order_kwargs;
pub mod parser;
pub mod to_fun;
use crate::fun::{Name, Num, Op, Source, Type};
use interner::global::GlobalString;
#[derive(Clone, Debug)]
pub enum Expr {
// "*"
Era,
// [a-zA-Z_]+
Var { nam: Name },
// "$" [a-zA-Z_]+
Chn { nam: Name },
// [0-9_]+
Num { val: Num },
// {fun}({args},{kwargs},)
Call { fun: Box<Expr>, args: Vec<Expr>, kwargs: Vec<(Name, Expr)> },
// "lambda" {names}* ":" {bod}
Lam { names: Vec<(Name, bool)>, bod: Box<Expr> },
// {lhs} {op} {rhs}
Opr { op: Op, lhs: Box<Expr>, rhs: Box<Expr> },
// "\"" ... "\""
Str { val: GlobalString },
// "[" ... "]"
Lst { els: Vec<Expr> },
// "(" ... ")"
Tup { els: Vec<Expr> },
// "{" {els} "}"
Sup { els: Vec<Expr> },
// {name} "{" {kwargs} "}"
Ctr { name: Name, args: Vec<Expr>, kwargs: Vec<(Name, Expr)> },
// "[" {term} "for" {bind} "in" {iter} ("if" {cond})? "]"
LstMap { term: Box<Expr>, bind: Name, iter: Box<Expr>, cond: Option<Box<Expr>> },
// "{" {entries} "}"
Map { entries: Vec<(Expr, Expr)> },
// {map} "[" {key} "]"
MapGet { nam: Name, key: Box<Expr> },
// "![" {left} "," {right} "]"
TreeNode { left: Box<Expr>, right: Box<Expr> },
// "!" {val}
TreeLeaf { val: Box<Expr> },
}
#[derive(Clone, Debug)]
pub struct MatchArm {
pub lft: Option<Name>,
pub rgt: Stmt,
}
#[derive(Clone, Debug, Default)]
pub enum AssignPattern {
// "*"
#[default]
Eraser,
// [a-zA-Z_]+
Var(Name),
// "$" [a-zA-Z_]+
Chn(Name),
// "(" ... ")"
Tup(Vec<AssignPattern>),
// "{" ... "}"
Sup(Vec<AssignPattern>),
// {name} "[" {expr} "]"
MapSet(Name, Expr),
}
#[derive(Clone, Debug)]
pub enum InPlaceOp {
Add,
Sub,
Mul,
Div,
And,
Or,
Xor,
Map,
}
#[derive(Clone, Debug, Default)]
pub enum Stmt {
// {pat} = {val} ";"? {nxt}
Assign {
pat: AssignPattern,
val: Box<Expr>,
nxt: Option<Box<Stmt>>,
},
// {var} += {val} ";"? {nxt}
InPlace {
op: InPlaceOp,
pat: Box<AssignPattern>,
val: Box<Expr>,
nxt: Box<Stmt>,
},
// "if" {cond} ":"
// {then}
// "else" ":"
// {otherwise}
// {nxt}?
If {
cond: Box<Expr>,
then: Box<Stmt>,
otherwise: Box<Stmt>,
nxt: Option<Box<Stmt>>,
},
// "match" ({bind} "=")? {arg} ("with" (({bind}) | ({bind} "=" {arg}) ","?)*)? ":"
// "case" {lft} ":"
// {rgt}
// ...
// <nxt>?
Match {
arg: Box<Expr>,
bnd: Option<Name>,
with_bnd: Vec<Option<Name>>,
with_arg: Vec<Expr>,
arms: Vec<MatchArm>,
nxt: Option<Box<Stmt>>,
},
// "switch" ({bind} "=")? {arg}("with" (({bind}) | ({bind} "=" {arg}) ","?)*)? ":"
// "case" 0 ":"
// {stmt}
// ...
// "case" _ ":"
// {stmt}
// <nxt>?
Switch {
arg: Box<Expr>,
bnd: Option<Name>,
with_bnd: Vec<Option<Name>>,
with_arg: Vec<Expr>,
arms: Vec<Stmt>,
nxt: Option<Box<Stmt>>,
},
// "bend" ({bind} ("=" {init})? ","?)*
// "when" {cond} ":"
// {step}
// "else" ":"
// {base}
// {nxt}}?
Bend {
bnd: Vec<Option<Name>>,
arg: Vec<Expr>,
cond: Box<Expr>,
step: Box<Stmt>,
base: Box<Stmt>,
nxt: Option<Box<Stmt>>,
},
// "fold" ({bind} "=")? {arg} ("with" (({bind}) | ({bind} "=" {arg}) ","?)*)? ":"
// case {lft} ":"
// {rgt}
// ...
// {nxt}?
Fold {
arg: Box<Expr>,
bnd: Option<Name>,
with_bnd: Vec<Option<Name>>,
with_arg: Vec<Expr>,
arms: Vec<MatchArm>,
nxt: Option<Box<Stmt>>,
},
// "with" {typ} ":"
// "ask" {id} = {expr} ";"?
// ...
// <nxt>?
With {
typ: Name,
bod: Box<Stmt>,
nxt: Option<Box<Stmt>>,
},
// {pat} <- {val} ";"? {nxt}
Ask {
pat: AssignPattern,
val: Box<Expr>,
nxt: Option<Box<Stmt>>,
},
// "return" {expr} ";"?
Return {
term: Box<Expr>,
},
// "open" {typ} ":" {var} ";"? {nxt}
Open {
typ: Name,
var: Name,
nxt: Box<Stmt>,
},
// "use" {name} "=" {expr} ";"? {nxt}
Use {
nam: Name,
val: Box<Expr>,
nxt: Box<Stmt>,
},
// {def} {nxt}
LocalDef {
def: Box<Definition>,
nxt: Box<Stmt>,
},
#[default]
Err,
}
// "def" {name} "(" {params} ")" ":" {body}
#[derive(Clone, Debug)]
pub struct Definition {
pub name: Name,
pub typ: Type,
pub check: bool,
pub args: Vec<Name>,
pub body: Stmt,
pub source: Source,
}
impl InPlaceOp {
pub fn to_lang_op(self) -> Op {
match self {
InPlaceOp::Add => Op::ADD,
InPlaceOp::Sub => Op::SUB,
InPlaceOp::Mul => Op::MUL,
InPlaceOp::Div => Op::DIV,
InPlaceOp::And => Op::AND,
InPlaceOp::Or => Op::OR,
InPlaceOp::Xor => Op::XOR,
InPlaceOp::Map => unreachable!(),
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/prune.rs | src/hvm/prune.rs | use super::{net_trees, tree_children};
use crate::maybe_grow;
use hvm::ast::{Book, Tree};
use std::collections::HashSet;
pub fn prune_hvm_book(book: &mut Book, entrypoints: &[String]) {
let mut state = PruneState { book, unvisited: book.defs.keys().map(|x| x.to_owned()).collect() };
for name in entrypoints {
state.visit_def(name);
}
let unvisited = state.unvisited;
for name in unvisited {
book.defs.remove(&name);
}
}
struct PruneState<'a> {
book: &'a Book,
unvisited: HashSet<String>,
}
impl PruneState<'_> {
fn visit_def(&mut self, name: &str) {
if self.unvisited.remove(name) {
for tree in net_trees(&self.book.defs[name]) {
self.visit_tree(tree);
}
}
}
fn visit_tree(&mut self, tree: &Tree) {
maybe_grow(|| {
if let Tree::Ref { nam, .. } = tree {
self.visit_def(nam);
} else {
tree_children(tree).for_each(|t| self.visit_tree(t));
}
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/eta_reduce.rs | src/hvm/eta_reduce.rs | //! Carries out simple eta-reduction, to reduce the amount of rewrites at
//! runtime.
//!
//! ### Eta-equivalence
//!
//! In interaction combinators, there are some nets that are equivalent and
//! have no observable difference
//!
//! 
//!
//! This module implements the eta-equivalence rule at the top-left of the image
//! above
//!
//! ```txt
//! /|-, ,-|\ eta_reduce
//! ---| | X | |-- ~~~~~~~~~~~~> -------------
//! \|-' '-|/
//! ```
//!
//! In hvm-core's AST representation, this reduction looks like this
//!
//! ```txt
//! {lab x y} ... {lab x y} ~~~~~~~~> x ..... x
//! ```
//!
//! Essentially, both occurrences of the same constructor are replaced by a
//! variable.
//!
//! ### The algorithm
//!
//! The code uses a two-pass O(n) algorithm, where `n` is the amount of nodes
//! in the AST
//!
//! In the first pass, a node-list is built out of an ordered traversal of the
//! AST. Crucially, the node list stores variable offsets instead of the
//! variable's names Since the AST's order is consistent, the ordering of nodes
//! in the node list can be reproduced with a traversal.
//!
//! This means that each occurrence of a variable is encoded with the offset in
//! the node-list to the _other_ occurrence of the variable.
//!
//! For example, if we start with the net: `[(x y) (x y)]`
//!
//! The resulting node list will look like this:
//!
//! `[Ctr(1), Ctr(0), Var(3), Var(3), Ctr(0), Var(-3), Var(-3)]`
//!
//! The second pass uses the node list to find repeated constructors. If a
//! constructor's children are both variables with the same offset, then we
//! lookup that offset relative to the constructor. If it is equal to the first
//! constructor, it means both of them are equal and they can be replaced with a
//! variable.
//!
//! The pass also reduces subnets such as `(* *) -> *`
use crate::hvm::net_trees_mut;
use super::{tree_children, tree_children_mut};
use core::ops::RangeFrom;
use hvm::ast::{Net, Tree};
use std::collections::HashMap;
/// Carries out simple eta-reduction
pub fn eta_reduce_hvm_net(net: &mut Net) {
let mut phase1 = Phase1::default();
for tree in net_trees_mut(net) {
phase1.walk_tree(tree);
}
let mut phase2 = Phase2 { nodes: phase1.nodes, index: 0.. };
for tree in net_trees_mut(net) {
phase2.reduce_tree(tree);
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum NodeType {
Ctr(u16),
Var(isize),
Era,
Other,
Hole,
}
#[derive(Default, Debug)]
struct Phase1<'a> {
vars: HashMap<&'a str, usize>,
nodes: Vec<NodeType>,
}
impl<'a> Phase1<'a> {
fn walk_tree(&mut self, tree: &'a Tree) {
match tree {
Tree::Con { fst, snd } => {
self.nodes.push(NodeType::Ctr(0));
self.walk_tree(fst);
self.walk_tree(snd);
}
Tree::Dup { fst, snd } => {
self.nodes.push(NodeType::Ctr(1));
self.walk_tree(fst);
self.walk_tree(snd);
}
Tree::Var { nam } => {
if let Some(i) = self.vars.get(&**nam) {
let j = self.nodes.len() as isize;
self.nodes.push(NodeType::Var(*i as isize - j));
self.nodes[*i] = NodeType::Var(j - *i as isize);
} else {
self.vars.insert(nam, self.nodes.len());
self.nodes.push(NodeType::Hole);
}
}
Tree::Era => self.nodes.push(NodeType::Era),
_ => {
self.nodes.push(NodeType::Other);
for i in tree_children(tree) {
self.walk_tree(i);
}
}
}
}
}
struct Phase2 {
nodes: Vec<NodeType>,
index: RangeFrom<usize>,
}
impl Phase2 {
fn reduce_ctr(&mut self, tree: &mut Tree, idx: usize) -> NodeType {
if let Tree::Con { fst, snd } | Tree::Dup { fst, snd } = tree {
let fst_typ = self.reduce_tree(fst);
let snd_typ = self.reduce_tree(snd);
// If both children are variables with the same offset, and their parent is a ctr of the same label,
// then they are eta-reducible and we replace the current node with the first variable.
match (fst_typ, snd_typ) {
(NodeType::Var(off_lft), NodeType::Var(off_rgt)) => {
if off_lft == off_rgt && self.nodes[idx] == self.nodes[(idx as isize + off_lft) as usize] {
let Tree::Var { nam } = fst.as_mut() else { unreachable!() };
*tree = Tree::Var { nam: std::mem::take(nam) };
return NodeType::Var(off_lft);
}
}
(NodeType::Era, NodeType::Era) => {
*tree = Tree::Era;
return NodeType::Era;
}
_ => {}
}
self.nodes[idx]
} else {
unreachable!()
}
}
fn reduce_tree(&mut self, tree: &mut Tree) -> NodeType {
let idx = self.index.next().unwrap();
match tree {
Tree::Con { .. } | Tree::Dup { .. } => self.reduce_ctr(tree, idx),
_ => {
for child in tree_children_mut(tree) {
self.reduce_tree(child);
}
self.nodes[idx]
}
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/add_recursive_priority.rs | src/hvm/add_recursive_priority.rs | use super::tree_children;
use crate::maybe_grow;
use hvm::ast::{Book, Net, Tree};
use std::collections::{HashMap, HashSet};
pub fn add_recursive_priority(book: &mut Book) {
// Direct dependencies
let deps = book.defs.iter().map(|(nam, net)| (nam.clone(), dependencies(net))).collect::<HashMap<_, _>>();
// Recursive cycles
let cycles = cycles(&deps);
for cycle in cycles {
// For each function in the cycle, if there are redexes with the
// next ref in the cycle, add a priority to one of those redexes.
for i in 0..cycle.len() {
let cur = book.defs.get_mut(&cycle[i]).unwrap();
let nxt = &cycle[(i + 1) % cycle.len()];
add_priority_next_in_cycle(cur, nxt);
}
}
}
fn add_priority_next_in_cycle(net: &mut Net, nxt: &String) {
let mut count = 0;
// Count the number of recursive refs
for (_, a, b) in net.rbag.iter() {
if let Tree::Ref { nam } = a {
if nam == nxt {
count += 1;
}
}
if let Tree::Ref { nam } = b {
if nam == nxt {
count += 1;
}
}
}
// If there are more than one recursive ref, add a priority to them.
if count > 1 {
for (pri, a, b) in net.rbag.iter_mut().rev() {
if let Tree::Ref { nam } = a {
if nam == nxt {
*pri = true;
}
}
if let Tree::Ref { nam } = b {
if nam == nxt {
*pri = true;
}
}
}
}
}
type DepGraph = HashMap<String, HashSet<String>>;
type Cycles = Vec<Vec<String>>;
/// Find all cycles in the dependency graph.
pub fn cycles(deps: &DepGraph) -> Cycles {
let mut cycles = vec![];
let mut stack = vec![];
let mut visited = HashSet::new();
for nam in deps.keys() {
if !visited.contains(nam) {
find_cycles(deps, nam, &mut visited, &mut stack, &mut cycles);
}
}
cycles
}
fn find_cycles(
deps: &DepGraph,
nam: &String,
visited: &mut HashSet<String>,
stack: &mut Vec<String>,
cycles: &mut Cycles,
) {
maybe_grow(|| {
// Check if the current ref is already in the stack, which indicates a cycle.
if let Some(cycle_start) = stack.iter().position(|n| n == nam) {
// If found, add the cycle to the cycles vector.
cycles.push(stack[cycle_start..].to_vec());
return;
}
// If the ref has not been visited yet, mark it as visited.
if visited.insert(nam.clone()) {
// Add the current ref to the stack to keep track of the path.
stack.push(nam.clone());
// Get the dependencies of the current ref.
if let Some(dependencies) = deps.get(nam) {
// Search for cycles from each dependency.
for dep in dependencies {
find_cycles(deps, dep, visited, stack, cycles);
}
}
stack.pop();
}
})
}
/// Gather the set of net that this net directly depends on (has a ref in the net).
fn dependencies(net: &Net) -> HashSet<String> {
let mut deps = HashSet::new();
dependencies_tree(&net.root, &mut deps);
for (_, a, b) in &net.rbag {
dependencies_tree(a, &mut deps);
dependencies_tree(b, &mut deps);
}
deps
}
fn dependencies_tree(tree: &Tree, deps: &mut HashSet<String>) {
if let Tree::Ref { nam, .. } = tree {
deps.insert(nam.clone());
} else {
for subtree in tree_children(tree) {
dependencies_tree(subtree, deps);
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/inline.rs | src/hvm/inline.rs | use super::{net_trees_mut, tree_children, tree_children_mut};
use crate::maybe_grow;
use core::ops::BitOr;
use hvm::ast::{Book, Net, Tree};
use std::collections::{HashMap, HashSet};
pub fn inline_hvm_book(book: &mut Book) -> Result<HashSet<String>, String> {
let mut state = InlineState::default();
state.populate_inlinees(book)?;
let mut all_changed = HashSet::new();
for (name, net) in &mut book.defs {
let mut inlined = false;
for tree in net_trees_mut(net) {
inlined |= state.inline_into(tree);
}
if inlined {
all_changed.insert(name.to_owned());
}
}
Ok(all_changed)
}
#[derive(Debug, Default)]
struct InlineState {
inlinees: HashMap<String, Tree>,
}
impl InlineState {
fn populate_inlinees(&mut self, book: &Book) -> Result<(), String> {
for (name, net) in &book.defs {
if should_inline(net) {
// Detect cycles with tortoise and hare algorithm
let mut hare = &net.root;
let mut tortoise = &net.root;
// Whether or not the tortoise should take a step
let mut parity = false;
while let Tree::Ref { nam, .. } = hare {
let Some(net) = &book.defs.get(nam) else { break };
if should_inline(net) {
hare = &net.root;
} else {
break;
}
if parity {
let Tree::Ref { nam: tortoise_nam, .. } = tortoise else { unreachable!() };
if tortoise_nam == nam {
Err(format!("infinite reference cycle in `@{nam}`"))?;
}
tortoise = &book.defs[tortoise_nam].root;
}
parity = !parity;
}
self.inlinees.insert(name.to_owned(), hare.clone());
}
}
Ok(())
}
fn inline_into(&self, tree: &mut Tree) -> bool {
maybe_grow(|| {
let Tree::Ref { nam, .. } = &*tree else {
return tree_children_mut(tree).map(|t| self.inline_into(t)).fold(false, bool::bitor);
};
if let Some(inlined) = self.inlinees.get(nam) {
*tree = inlined.clone();
true
} else {
false
}
})
}
}
fn should_inline(net: &Net) -> bool {
net.rbag.is_empty() && tree_children(&net.root).next().is_none()
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/mutual_recursion.rs | src/hvm/mutual_recursion.rs | use super::tree_children;
use crate::{
diagnostics::{Diagnostics, WarningType, ERR_INDENT_SIZE},
fun::transform::definition_merge::MERGE_SEPARATOR,
maybe_grow,
};
use hvm::ast::{Book, Tree};
use indexmap::{IndexMap, IndexSet};
use std::fmt::Debug;
type Ref = String;
type Stack<T> = Vec<T>;
type RefSet = IndexSet<Ref>;
#[derive(Default)]
pub struct Graph(IndexMap<Ref, RefSet>);
pub fn check_cycles(book: &Book, diagnostics: &mut Diagnostics) -> Result<(), Diagnostics> {
let graph = Graph::from(book);
let cycles = graph.cycles();
if !cycles.is_empty() {
let msg = format!(include_str!("mutual_recursion.message"), cycles = show_cycles(cycles));
diagnostics.add_book_warning(msg.as_str(), WarningType::RecursionCycle);
}
diagnostics.fatal(())
}
fn show_cycles(mut cycles: Vec<Vec<Ref>>) -> String {
let tail = if cycles.len() > 5 {
format!("\n{:ERR_INDENT_SIZE$}and {} other cycles...", "", cycles.len() - 5)
} else {
String::new()
};
cycles = cycles.into_iter().flat_map(combinations_from_merges).collect::<Vec<_>>();
let mut cycles = cycles
.iter()
.take(5)
.map(|cycle| {
let cycle_str = cycle
.iter()
.filter(|nam| !nam.contains("__C"))
.chain(cycle.first())
.cloned()
.collect::<Vec<_>>()
.join(" -> ");
format!("{:ERR_INDENT_SIZE$}* {}", "", cycle_str)
})
.collect::<Vec<String>>()
.join("\n");
cycles.push_str(&tail);
cycles
}
impl Graph {
pub fn cycles(&self) -> Vec<Vec<Ref>> {
let mut cycles = Vec::new();
let mut stack = Stack::new();
let mut visited = RefSet::new();
for r#ref in self.0.keys() {
if !visited.contains(r#ref) {
self.find_cycles(r#ref, &mut visited, &mut stack, &mut cycles);
}
}
cycles
}
fn find_cycles(
&self,
r#ref: &Ref,
visited: &mut RefSet,
stack: &mut Stack<Ref>,
cycles: &mut Vec<Vec<Ref>>,
) {
// Check if the current ref is already in the stack, which indicates a cycle.
if let Some(cycle_start) = stack.iter().position(|n| n == r#ref) {
// If found, add the cycle to the cycles vector.
cycles.push(stack[cycle_start..].to_vec());
return;
}
// If the ref has not been visited yet, mark it as visited.
if visited.insert(r#ref.clone()) {
// Add the current ref to the stack to keep track of the path.
stack.push(r#ref.clone());
// Get the dependencies of the current ref.
if let Some(dependencies) = self.get(r#ref) {
// Search for cycles from each dependency.
for dep in dependencies {
self.find_cycles(dep, visited, stack, cycles);
}
}
stack.pop();
}
}
}
/// Collect active refs from the tree.
fn collect_refs(current: Ref, tree: &Tree, graph: &mut Graph) {
maybe_grow(|| match tree {
Tree::Ref { nam, .. } => graph.add(current, nam.clone()),
Tree::Con { fst: _, snd } => collect_refs(current.clone(), snd, graph),
tree => {
for subtree in tree_children(tree) {
collect_refs(current.clone(), subtree, graph);
}
}
});
}
impl From<&Book> for Graph {
fn from(book: &Book) -> Self {
let mut graph = Self::new();
for (r#ref, net) in book.defs.iter() {
// Collect active refs from the root.
collect_refs(r#ref.clone(), &net.root, &mut graph);
// Collect active refs from redexes.
for (_, left, right) in net.rbag.iter() {
if let Tree::Ref { nam, .. } = left {
graph.add(r#ref.clone(), nam.clone());
}
if let Tree::Ref { nam, .. } = right {
graph.add(r#ref.clone(), nam.clone());
}
}
}
graph
}
}
impl Graph {
pub fn new() -> Self {
Self::default()
}
pub fn add(&mut self, r#ref: Ref, dependency: Ref) {
self.0.entry(r#ref).or_default().insert(dependency.clone());
self.0.entry(dependency).or_default();
}
pub fn get(&self, r#ref: &Ref) -> Option<&RefSet> {
self.0.get(r#ref)
}
}
impl Debug for Graph {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Graph{:?}", self.0)
}
}
fn combinations_from_merges(cycle: Vec<Ref>) -> Vec<Vec<Ref>> {
let mut combinations: Vec<Vec<Ref>> = vec![vec![]];
for r#ref in cycle {
if let Some(index) = r#ref.find(MERGE_SEPARATOR) {
let (left, right) = r#ref.split_at(index);
let right = &right[MERGE_SEPARATOR.len()..]; // skip merge separator
let mut new_combinations = Vec::new();
for combination in &combinations {
let mut left_comb = combination.clone();
left_comb.push(left.to_string());
new_combinations.push(left_comb);
let mut right_comb = combination.clone();
right_comb.push(right.to_string());
new_combinations.push(right_comb);
}
combinations = new_combinations;
} else {
for combination in &mut combinations {
combination.push(r#ref.clone());
}
}
}
combinations
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/mod.rs | src/hvm/mod.rs | use crate::multi_iterator;
use hvm::ast::{Net, Tree};
pub mod add_recursive_priority;
pub mod check_net_size;
pub mod eta_reduce;
pub mod inline;
pub mod mutual_recursion;
pub mod prune;
pub fn tree_children(tree: &Tree) -> impl DoubleEndedIterator<Item = &Tree> + Clone {
multi_iterator!(ChildrenIter { Zero, Two });
match tree {
Tree::Var { .. } | Tree::Ref { .. } | Tree::Era | Tree::Num { .. } => ChildrenIter::Zero([]),
Tree::Con { fst, snd } | Tree::Dup { fst, snd } | Tree::Opr { fst, snd } | Tree::Swi { fst, snd } => {
ChildrenIter::Two([fst.as_ref(), snd.as_ref()])
}
}
}
pub fn tree_children_mut(tree: &mut Tree) -> impl DoubleEndedIterator<Item = &mut Tree> {
multi_iterator!(ChildrenIter { Zero, Two });
match tree {
Tree::Var { .. } | Tree::Ref { .. } | Tree::Era | Tree::Num { .. } => ChildrenIter::Zero([]),
Tree::Con { fst, snd } | Tree::Dup { fst, snd } | Tree::Opr { fst, snd } | Tree::Swi { fst, snd } => {
ChildrenIter::Two([fst.as_mut(), snd.as_mut()])
}
}
}
pub fn net_trees(net: &Net) -> impl DoubleEndedIterator<Item = &Tree> + Clone {
[&net.root].into_iter().chain(net.rbag.iter().flat_map(|(_, fst, snd)| [fst, snd]))
}
pub fn net_trees_mut(net: &mut Net) -> impl DoubleEndedIterator<Item = &mut Tree> {
[&mut net.root].into_iter().chain(net.rbag.iter_mut().flat_map(|(_, fst, snd)| [fst, snd]))
}
pub fn hvm_book_show_pretty(book: &hvm::ast::Book) -> String {
let mut s = String::new();
for (nam, def) in book.defs.iter() {
s.push_str(&format!("@{} = {}\n", nam, def.root.show()));
for (pri, a, b) in def.rbag.iter() {
s.push_str(" &");
if *pri {
s.push('!');
} else {
s.push(' ');
}
s.push_str(&a.show());
s.push_str(" ~ ");
s.push_str(&b.show());
s.push('\n');
}
s.push('\n');
}
s
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/hvm/check_net_size.rs | src/hvm/check_net_size.rs | use super::tree_children;
use crate::{diagnostics::Diagnostics, fun::Name, CompilerTarget};
use hvm::ast::{Book, Net, Tree};
pub const MAX_NET_SIZE_C: usize = 4095;
pub const MAX_NET_SIZE_CUDA: usize = 64;
pub fn check_net_sizes(
book: &Book,
diagnostics: &mut Diagnostics,
target: &CompilerTarget,
) -> Result<(), Diagnostics> {
let (net_size_bound, target_lang) = match target {
CompilerTarget::Cuda => (MAX_NET_SIZE_CUDA, "Cuda"),
_ => (MAX_NET_SIZE_C, "C"),
};
for (name, net) in &book.defs {
let nodes = count_nodes(net);
if nodes > net_size_bound {
diagnostics.add_function_error(
format!("Definition is too large for HVM {target_lang} (size={nodes}, max size={net_size_bound}). Please break it into smaller pieces."),
Name::new(name),
Default::default()
);
}
}
diagnostics.fatal(())
}
/// Utility function to count the amount of nodes in an hvm-core AST net
pub fn count_nodes(net: &Net) -> usize {
let mut visit: Vec<&Tree> = vec![&net.root];
let mut count = 0usize;
for (_, l, r) in &net.rbag {
visit.push(l);
visit.push(r);
}
while let Some(tree) = visit.pop() {
// If it is not 0-ary, then we'll count it as a node.
if tree_children(tree).next().is_some() {
count += 1;
}
for subtree in tree_children(tree) {
visit.push(subtree);
}
}
count
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/net/hvm_to_net.rs | src/net/hvm_to_net.rs | use super::{INet, INode, INodes, NodeId, NodeKind::*, Port, SlotId, ROOT};
use crate::{
fun::Name,
net::{CtrKind, NodeKind},
};
use hvm::ast::{Net, Tree};
pub fn hvm_to_net(net: &Net) -> INet {
let inodes = hvm_to_inodes(net);
inodes_to_inet(&inodes)
}
fn hvm_to_inodes(net: &Net) -> INodes {
let mut inodes = vec![];
let mut n_vars = 0;
let net_root = if let Tree::Var { nam } = &net.root { nam } else { "" };
// If we have a tree attached to the net root, convert that first
if !matches!(&net.root, Tree::Var { .. }) {
let mut root = tree_to_inodes(&net.root, "_".to_string(), net_root, &mut n_vars);
inodes.append(&mut root);
}
// Convert all the trees forming active pairs.
for (i, (_, tree1, tree2)) in net.rbag.iter().enumerate() {
// This name cannot appear anywhere in the original net
let tree_root = format!("%a{i}");
let mut tree1 = tree_to_inodes(tree1, tree_root.clone(), net_root, &mut n_vars);
inodes.append(&mut tree1);
let mut tree2 = tree_to_inodes(tree2, tree_root, net_root, &mut n_vars);
inodes.append(&mut tree2);
}
inodes
}
fn new_var(n_vars: &mut NodeId) -> String {
// This name cannot appear anywhere in the original net
let new_var = format!("%x{n_vars}");
*n_vars += 1;
new_var
}
fn tree_to_inodes(tree: &Tree, tree_root: String, net_root: &str, n_vars: &mut NodeId) -> INodes {
fn process_node_subtree<'a>(
subtree: &'a Tree,
net_root: &str,
subtrees: &mut Vec<(String, &'a Tree)>,
n_vars: &mut NodeId,
) -> String {
if let Tree::Var { nam } = subtree {
if nam == net_root {
"_".to_string()
} else {
nam.clone()
}
} else {
let var = new_var(n_vars);
subtrees.push((var.clone(), subtree));
var
}
}
let mut inodes = vec![];
let mut subtrees = vec![(tree_root, tree)];
while let Some((subtree_root, subtree)) = subtrees.pop() {
match subtree {
Tree::Era => {
let var = new_var(n_vars);
inodes.push(INode { kind: Era, ports: [subtree_root, var.clone(), var] });
}
Tree::Con { fst, snd } => {
let kind = NodeKind::Ctr(CtrKind::Con(None));
let fst = process_node_subtree(fst, net_root, &mut subtrees, n_vars);
let snd = process_node_subtree(snd, net_root, &mut subtrees, n_vars);
inodes.push(INode { kind, ports: [subtree_root, fst, snd] });
}
Tree::Dup { fst, snd } => {
let kind = NodeKind::Ctr(CtrKind::Dup(0));
let fst = process_node_subtree(fst, net_root, &mut subtrees, n_vars);
let snd = process_node_subtree(snd, net_root, &mut subtrees, n_vars);
inodes.push(INode { kind, ports: [subtree_root, fst, snd] });
}
Tree::Var { .. } => unreachable!(),
Tree::Ref { nam } => {
let kind = Ref { def_name: Name::new(nam) };
let var = new_var(n_vars);
inodes.push(INode { kind, ports: [subtree_root, var.clone(), var] });
}
Tree::Num { val } => {
let kind = Num { val: val.0 };
let var = new_var(n_vars);
inodes.push(INode { kind, ports: [subtree_root, var.clone(), var] });
}
Tree::Opr { fst, snd } => {
let kind = NodeKind::Opr;
let fst = process_node_subtree(fst, net_root, &mut subtrees, n_vars);
let snd = process_node_subtree(snd, net_root, &mut subtrees, n_vars);
inodes.push(INode { kind, ports: [subtree_root, fst, snd] });
}
Tree::Swi { fst, snd } => {
let kind = NodeKind::Swi;
let fst = process_node_subtree(fst, net_root, &mut subtrees, n_vars);
let snd = process_node_subtree(snd, net_root, &mut subtrees, n_vars);
inodes.push(INode { kind, ports: [subtree_root, fst, snd] });
}
}
}
inodes
}
// Converts INodes to an INet by linking ports based on names.
fn inodes_to_inet(inodes: &INodes) -> INet {
let mut inet = INet::new();
// Maps named inode ports to numeric inet ports.
let mut name_map = std::collections::HashMap::new();
for inode in inodes {
let node = inet.new_node(inode.kind.clone());
for (j, name) in inode.ports.iter().enumerate() {
let p = Port(node, j as SlotId);
if name == "_" {
inet.link(p, ROOT);
} else if let Some(&q) = name_map.get(name) {
inet.link(p, q);
name_map.remove(name);
} else {
name_map.insert(name.clone(), p);
}
}
}
inet
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/net/mod.rs | src/net/mod.rs | pub mod hvm_to_net;
use crate::fun::Name;
pub type BendLab = u16;
use NodeKind::*;
#[derive(Debug, Clone)]
/// Net representation used only as an intermediate for converting to hvm-core format
pub struct INet {
nodes: Vec<Node>,
}
#[derive(Debug, Clone)]
pub struct Node {
pub main: Port,
pub aux1: Port,
pub aux2: Port,
pub kind: NodeKind,
}
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Default)]
pub struct Port(pub NodeId, pub SlotId);
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum NodeKind {
/// Root node
Rot,
/// Erasure nodes
Era,
/// Binary combinators
Ctr(CtrKind),
/// Reference to function definitions
Ref { def_name: Name },
/// Numbers
Num { val: u32 },
/// Numeric operations
Opr,
/// Pattern matching on numbers
Swi,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum CtrKind {
Con(Option<BendLab>),
Tup(Option<BendLab>),
Dup(BendLab),
}
impl CtrKind {
pub fn to_lab(self) -> BendLab {
#[allow(clippy::identity_op)]
match self {
CtrKind::Con(None) => 0,
CtrKind::Con(Some(_)) => todo!("Tagged lambdas/applications not implemented for hvm32"),
CtrKind::Tup(None) => 0,
CtrKind::Tup(Some(_)) => todo!("Tagged tuples not implemented for hvm32"),
CtrKind::Dup(0) => 1,
CtrKind::Dup(_) => todo!("Tagged dups/sups not implemented for hvm32"),
}
}
}
pub type NodeId = u64;
pub type SlotId = u64;
/// The ROOT port is on the deadlocked root node at address 0.
pub const ROOT: Port = Port(0, 1);
pub const TAG_WIDTH: u32 = 4;
pub const TAG: u32 = u64::BITS - TAG_WIDTH;
pub const LABEL_MASK: u64 = (1 << TAG) - 1;
pub const TAG_MASK: u64 = !LABEL_MASK;
impl INet {
/// Create a new net, with a deadlocked root node.
pub fn new() -> Self {
Self::default()
}
/// Allocates a new node with its ports disconnected.
pub fn new_node(&mut self, kind: NodeKind) -> NodeId {
let idx = self.nodes.len() as NodeId;
let node = Node::new(Port(idx, 0), Port(idx, 1), Port(idx, 2), kind);
self.nodes.extend([node]);
idx
}
/// Returns a reference to a node.
pub fn node(&self, node: NodeId) -> &Node {
&self.nodes[node as usize]
}
/// Returns the value stored at a port, the port on the other side of the given one.
pub fn enter_port(&self, port: Port) -> Port {
self.node(port.node_id()).port(port.slot())
}
/// Links two ports.
pub fn link(&mut self, a: Port, b: Port) {
self.set(a, b);
self.set(b, a);
}
/// Sets a port to point to another port
pub fn set(&mut self, src: Port, dst: Port) {
*self.nodes[src.node_id() as usize].port_mut(src.slot()) = dst;
}
}
impl Default for INet {
fn default() -> Self {
INet {
nodes: vec![Node::new(Port(0, 2), Port(0, 1), Port(0, 0), Rot)], // p2 points to p0, p1 points to net
}
}
}
impl Node {
pub fn new(main: Port, aux1: Port, aux2: Port, kind: NodeKind) -> Self {
Node { main, aux1, aux2, kind }
}
pub fn port(&self, slot: SlotId) -> Port {
match slot {
0 => self.main,
1 => self.aux1,
2 => self.aux2,
_ => unreachable!(),
}
}
pub fn port_mut(&mut self, slot: SlotId) -> &mut Port {
match slot {
0 => &mut self.main,
1 => &mut self.aux1,
2 => &mut self.aux2,
_ => unreachable!(),
}
}
}
impl Port {
/// Returns the node address of a port.
pub fn node_id(self) -> NodeId {
self.0
}
/// Returns the slot of a port.
pub fn slot(self) -> SlotId {
self.1
}
}
/* INodes representation: */
/// A flat inet representation where links are represented by shared wire names.
// TODO: Find a better name
pub type INodes = Vec<INode>;
#[derive(Debug)]
pub struct INode {
pub kind: NodeKind,
pub ports: [String; 3],
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/parser.rs | src/fun/parser.rs | use crate::{
fun::{
display::DisplayFn, Adt, AdtCtr, Adts, Constructors, CtrField, FanKind, HvmDefinition, HvmDefinitions,
MatchRule, Name, Num, Op, Pattern, Rule, Source, SourceKind, Tag, Term, Type, STRINGS,
},
imp::parser::ImpParser,
imports::{Import, ImportCtx, ImportType},
maybe_grow,
};
use highlight_error::highlight_error;
use indexmap::IndexMap;
use itertools::Itertools;
use std::ops::Range;
use TSPL::{ParseError, Parser};
type FunDefinition = super::Definition;
type ImpDefinition = crate::imp::Definition;
/// Intermediate representation of a program.
#[derive(Debug, Clone, Default)]
pub struct ParseBook {
/// The `functional` function definitions.
pub fun_defs: IndexMap<Name, FunDefinition>,
/// The `imperative` function definitions.
pub imp_defs: IndexMap<Name, ImpDefinition>,
/// HVM native function definitions.
pub hvm_defs: HvmDefinitions,
/// The algebraic datatypes defined by the program
pub adts: Adts,
/// To which type does each constructor belong to.
pub ctrs: Constructors,
/// Imported packages to be loaded in the program
pub import_ctx: ImportCtx,
/// File path that the book was loaded from.
pub source: Name,
}
impl ParseBook {
pub fn contains_def(&self, name: &Name) -> bool {
self.fun_defs.contains_key(name) || self.imp_defs.contains_key(name) || self.hvm_defs.contains_key(name)
}
pub fn contains_builtin_def(&self, name: &Name) -> Option<bool> {
self
.fun_defs
.get(name)
.map(|d| d.is_builtin())
.or_else(|| self.imp_defs.get(name).map(|d| d.source.is_builtin()))
.or_else(|| self.hvm_defs.get(name).map(|d| d.source.is_builtin()))
}
}
pub type ParseResult<T> = std::result::Result<T, ParseError>;
pub struct FunParser<'i> {
file: Name,
input: &'i str,
index: usize,
builtin: bool,
}
impl<'a> FunParser<'a> {
pub fn new(file: Name, input: &'a str, builtin: bool) -> Self {
Self { file, input, index: 0, builtin }
}
/* AST parsing functions */
pub fn parse_book(&mut self, default_book: ParseBook) -> ParseResult<ParseBook> {
let mut book = default_book;
let mut indent = self.advance_newlines()?;
while !self.is_eof() {
// Record type definition
if self.starts_with_keyword("object") {
let ini_idx = *self.index();
let mut prs = ImpParser {
file: self.file.clone(),
input: self.input,
index: *self.index(),
builtin: self.builtin,
};
let (adt, nxt_indent) = prs.parse_object(indent)?;
self.index = prs.index;
let end_idx = *self.index();
self.add_type_def(adt, &mut book, ini_idx..end_idx)?;
indent = nxt_indent;
continue;
}
// Imp function definition
if self.starts_with_keyword("def") {
let ini_idx = *self.index();
let mut prs =
ImpParser { file: self.file.clone(), input: self.input, index: ini_idx, builtin: self.builtin };
let (def, nxt_indent) = prs.parse_function_def(indent)?;
self.index = prs.index;
let end_idx = *self.index();
self.add_imp_def(def, &mut book, ini_idx..end_idx)?;
indent = nxt_indent;
continue;
}
// Fun/Imp type definition
if self.starts_with_keyword("type") {
fn starts_with_imp_type(p: &mut FunParser) -> ParseResult<()> {
p.parse_keyword("type")?;
p.skip_trivia_inline()?;
p.parse_top_level_name()?;
p.skip_trivia_inline()?;
if p.starts_with(":") || p.starts_with("(") {
Ok(())
} else {
Err(ParseError::new((0, 0), ""))
}
}
let ini_idx = *self.index();
let is_imp = starts_with_imp_type(self).is_ok();
self.index = ini_idx;
if is_imp {
// Imp type definition
let mut prs = ImpParser {
file: self.file.clone(),
input: self.input,
index: *self.index(),
builtin: self.builtin,
};
let (adt, nxt_indent) = prs.parse_type_def(indent)?;
self.index = prs.index;
let end_idx = *self.index();
self.add_type_def(adt, &mut book, ini_idx..end_idx)?;
indent = nxt_indent;
continue;
} else {
// Fun type definition
let adt = self.parse_type_def()?;
let end_idx = *self.index();
self.add_type_def(adt, &mut book, ini_idx..end_idx)?;
indent = self.advance_newlines()?;
continue;
}
}
// HVM native function definition
if self.starts_with_keyword("hvm") {
let ini_idx = self.index;
let mut prs =
ImpParser { file: self.file.clone(), input: self.input, index: self.index, builtin: self.builtin };
let (def, nxt_indent) = prs.parse_hvm()?;
*self.index() = prs.index;
let end_idx = *self.index();
self.add_hvm(def, &mut book, ini_idx..end_idx)?;
indent = nxt_indent;
continue;
}
// Import declaration
if self.starts_with_keyword("from") {
let import = self.parse_from_import()?;
book.import_ctx.add_import(import);
indent = self.advance_newlines()?;
continue;
}
if self.starts_with_keyword("import") {
let imports = self.parse_import()?;
for imp in imports {
book.import_ctx.add_import(imp);
}
indent = self.advance_newlines()?;
continue;
}
// Fun function definition
let ini_idx = *self.index();
let def = self.parse_fun_def()?;
let end_idx = *self.index();
self.add_fun_def(def, &mut book, ini_idx..end_idx)?;
indent = self.advance_newlines()?;
}
Ok(book)
}
fn parse_type_def(&mut self) -> ParseResult<Adt> {
// type (name var1 ... varN) = ctr (| ctr)*
let ini_idx = self.index;
self.parse_keyword("type")?;
self.skip_trivia();
let name;
let vars;
if self.try_consume("(") {
// parens around name and vars
self.skip_trivia();
name = self.parse_restricted_name("Datatype")?;
vars = self
.labelled(|p| p.list_like(|p| p.parse_var_name(), "", ")", "", false, 0), "Type variable or ')'")?;
self.consume("=")?;
} else {
// no parens
name = self.parse_restricted_name("Datatype")?;
vars = self
.labelled(|p| p.list_like(|p| p.parse_var_name(), "", "=", "", false, 0), "Type variable or '='")?;
}
let mut ctrs = vec![self.parse_type_ctr(&name, &vars)?];
while self.try_consume("|") {
ctrs.push(self.parse_type_ctr(&name, &vars)?);
}
let ctrs = ctrs.into_iter().map(|ctr| (ctr.name.clone(), ctr)).collect::<IndexMap<_, _>>();
let end_idx = *self.index();
let source = Source::from_file_span(&self.file, self.input, ini_idx..end_idx, self.builtin);
let adt = Adt { name, vars, ctrs, source };
Ok(adt)
}
fn parse_type_ctr(&mut self, type_name: &Name, type_vars: &[Name]) -> ParseResult<AdtCtr> {
// '(' name (( '~'? field) | ('~'? '('field (':' type)? ')') )* ')'
// name
self.skip_trivia();
let ini_idx = *self.index();
if self.try_consume("(") {
// name and optionally fields
self.skip_trivia();
let ctr_name = self.parse_top_level_name()?;
let ctr_name = Name::new(format!("{type_name}/{ctr_name}"));
let fields = self.list_like(|p| p.parse_type_ctr_field(), "", ")", "", false, 0)?;
let field_types = fields.iter().map(|f| f.typ.clone()).collect::<Vec<_>>();
let end_idx = *self.index();
self.check_repeated_ctr_fields(&fields, &ctr_name, ini_idx..end_idx)?;
let typ = make_ctr_type(type_name.clone(), &field_types, type_vars);
let ctr = AdtCtr { name: ctr_name, typ, fields };
Ok(ctr)
} else {
// just name
let name = self.parse_restricted_name("Datatype constructor")?;
let name = Name::new(format!("{type_name}/{name}"));
let typ = make_ctr_type(type_name.clone(), &[], type_vars);
let ctr = AdtCtr { name, typ, fields: vec![] };
Ok(ctr)
}
}
fn parse_type_ctr_field(&mut self) -> ParseResult<CtrField> {
let rec = self.try_consume("~");
let nam;
let typ;
if self.try_consume("(") {
nam = self.parse_var_name()?;
if self.try_consume(":") {
typ = self.parse_type_term()?;
} else {
typ = Type::Any;
}
self.consume(")")?;
} else {
nam = self.parse_var_name()?;
typ = Type::Any;
}
Ok(CtrField { nam, typ, rec })
}
fn parse_fun_def(&mut self) -> ParseResult<FunDefinition> {
let ini_idx = *self.index();
// Try to parse signature
if let Ok((name, args, check, typ)) = self.parse_def_sig() {
if self.try_consume("=") {
// Single rule with signature
let body = self.parse_term()?;
let pats = args.into_iter().map(|nam| Pattern::Var(Some(nam))).collect();
let rules = vec![Rule { pats, body }];
let end_idx = *self.index();
let source = Source::from_file_span(&self.file, self.input, ini_idx..end_idx, self.builtin);
let def = FunDefinition { name, typ, check, rules, source };
Ok(def)
} else {
// Multiple rules with signature
let mut rules = vec![];
let (_, rule) = self.parse_rule()?;
rules.push(rule);
while self.starts_with_rule(&name) {
let (_, rule) = self.parse_rule()?;
rules.push(rule);
}
let end_idx = *self.index();
let source = Source::from_file_span(&self.file, self.input, ini_idx..end_idx, self.builtin);
let def = FunDefinition { name, typ, check, rules, source };
Ok(def)
}
} else {
// Was not a signature, backtrack and read the name from the first rule
self.index = ini_idx;
// No signature, don't check by default
let check = self.parse_checked(false);
let mut rules = vec![];
let (name, rule) = self.parse_rule()?;
rules.push(rule);
while self.starts_with_rule(&name) {
let (_, rule) = self.parse_rule()?;
rules.push(rule);
}
let end_idx = *self.index();
let source = Source::from_file_span(&self.file, self.input, ini_idx..end_idx, self.builtin);
let def = FunDefinition { name, typ: Type::Any, check, rules, source };
Ok(def)
}
}
/// Parses a function definition signature.
/// Returns the name, name of the arguments and the type of the function.
fn parse_def_sig(&mut self) -> ParseResult<(Name, Vec<Name>, bool, Type)> {
// '(' name ((arg | '(' arg (':' type)? ')'))* ')' ':' type
// name ((arg | '(' arg (':' type)? ')'))* ':' type
// Signature, check by default
let check = self.parse_checked(true);
let (name, args, typ) = if self.try_consume("(") {
let name = self.parse_top_level_name()?;
let args = self.list_like(|p| p.parse_def_sig_arg(), "", ")", "", false, 0)?;
self.consume(":")?;
let typ = self.parse_type_term()?;
(name, args, typ)
} else {
let name = self.parse_top_level_name()?;
let args = self.list_like(|p| p.parse_def_sig_arg(), "", ":", "", false, 0)?;
let typ = self.parse_type_term()?;
(name, args, typ)
};
let (args, arg_types): (Vec<_>, Vec<_>) = args.into_iter().unzip();
let typ = make_fn_type(arg_types, typ);
Ok((name, args, check, typ))
}
fn parse_def_sig_arg(&mut self) -> ParseResult<(Name, Type)> {
// name
// '(' name ')'
// '(' name ':' type ')'
if self.try_consume("(") {
let name = self.parse_var_name()?;
let typ = if self.try_consume(":") { self.parse_type_term()? } else { Type::Any };
self.consume(")")?;
Ok((name, typ))
} else {
let name = self.parse_var_name()?;
Ok((name, Type::Any))
}
}
fn parse_checked(&mut self, default: bool) -> bool {
if self.try_parse_keyword("checked") {
true
} else if self.try_parse_keyword("unchecked") {
false
} else {
default
}
}
fn parse_from_import(&mut self) -> ParseResult<Import> {
// from path import package
// from path import (a, b)
// from path import *
self.parse_keyword("from")?;
self.skip_trivia_inline()?;
let path = self.parse_restricted_name("Path")?;
self.skip_trivia_inline()?;
self.consume("import")?;
self.skip_trivia_inline()?;
let relative = path.starts_with("./") | path.starts_with("../");
if self.try_consume("*") {
return Ok(Import::new(path, ImportType::Glob, relative));
}
if self.try_consume("(") {
let sub = self.list_like(|p| p.parse_name_maybe_alias("Name"), "", ")", ",", false, 1)?;
return Ok(Import::new(path, ImportType::List(sub), relative));
}
let (import, alias) = self.parse_name_maybe_alias("Import")?;
Ok(Import::new(path, ImportType::Single(import, alias), relative))
}
fn parse_import(&mut self) -> ParseResult<Vec<Import>> {
// import path
// import (path/a, path/b)
self.parse_keyword("import")?;
self.skip_trivia_inline()?;
let new_import = |import: Name, alias: Option<Name>, relative: bool| -> Import {
let (path, import) = match import.rsplit_once('/') {
Some((start, end)) => (Name::new(start), Name::new(end)),
None => (Name::default(), import),
};
Import::new(path, ImportType::Single(import, alias), relative)
};
if self.try_consume("(") {
let list = self.list_like(|p| p.parse_import_name("Name"), "", ")", ",", false, 1)?;
let imports = list.into_iter().map(|(a, b, c)| new_import(a, b, c)).collect_vec();
return Ok(imports);
}
let (import, alias, relative) = self.parse_import_name("Import")?;
let import = new_import(import, alias, relative);
Ok(vec![import])
}
fn parse_rule_lhs(&mut self) -> ParseResult<(Name, Vec<Pattern>)> {
if self.try_consume_exactly("(") {
self.skip_trivia();
let name = self.parse_restricted_name("Function")?;
let pats = self.list_like(|p| p.parse_pattern(false), "", ")", "", false, 0)?;
Ok((name, pats))
} else {
// Rule without parens
// Here we use a different label for the error because this is
// the last alternative case for top-level definitions.
let name = self.labelled(|p| p.parse_top_level_name(), "top-level definition")?;
let mut pats = vec![];
self.skip_trivia();
while !self.starts_with("=") {
pats.push(self.parse_pattern(false)?);
self.skip_trivia();
}
Ok((name, pats))
}
}
fn parse_rule(&mut self) -> ParseResult<(Name, Rule)> {
self.skip_trivia();
let (name, pats) = self.parse_rule_lhs()?;
self.consume("=")?;
let body = self.parse_term()?;
let rule = Rule { pats, body };
Ok((name, rule))
}
fn starts_with_rule(&mut self, expected_name: &Name) -> bool {
let ini_idx = *self.index();
self.skip_trivia();
let res = self.parse_rule_lhs();
if !self.try_consume("=") {
self.index = ini_idx;
return false;
}
self.index = ini_idx;
if let Ok((name, _)) = res {
if &name == expected_name {
// Found rule with the expected name
true
} else {
// Found rule with a different name
false
}
} else {
// Not a rule
false
}
}
fn parse_pattern(&mut self, simple: bool) -> ParseResult<Pattern> {
maybe_grow(|| {
let (tag, unexpected_tag) = self.parse_tag()?;
self.skip_trivia();
// Ctr or Tup
if self.starts_with("(") {
self.advance_one();
let head_ini_idx = *self.index();
let head = self.parse_pattern(simple)?;
let head_end_idx = *self.index();
// Tup
self.skip_trivia();
if self.starts_with(",") || simple {
self.consume(",")?;
let mut els = self.list_like(|p| p.parse_pattern(simple), "", ")", ",", true, 1)?;
els.insert(0, head);
return Ok(Pattern::Fan(FanKind::Tup, tag.unwrap_or(Tag::Static), els));
}
// Ctr
unexpected_tag(self)?;
let Pattern::Var(Some(name)) = head else {
return self.expected_spanned("constructor name", head_ini_idx..head_end_idx);
};
let els = self.list_like(|p| p.parse_pattern(simple), "", ")", "", false, 0)?;
return Ok(Pattern::Ctr(name, els));
}
// Dup
if self.starts_with("{") {
let els = self.list_like(|p| p.parse_pattern(simple), "{", "}", ",", false, 0)?;
return Ok(Pattern::Fan(FanKind::Dup, tag.unwrap_or(Tag::Auto), els));
}
// List
if self.starts_with("[") && !simple {
unexpected_tag(self)?;
let els = self.list_like(|p| p.parse_pattern(simple), "[", "]", ",", false, 0)?;
return Ok(Pattern::Lst(els));
}
// String
if self.starts_with("\"") && !simple {
unexpected_tag(self)?;
let str = self.parse_quoted_string()?;
return Ok(Pattern::Str(STRINGS.get(str)));
}
// Char
if self.starts_with("'") {
unexpected_tag(self)?;
let char = self.parse_quoted_char()?;
return Ok(Pattern::Num(char as u32));
}
// Number
if self.peek_one().is_some_and(|c| c.is_ascii_digit()) {
unexpected_tag(self)?;
let num = self.parse_u32()?;
return Ok(Pattern::Num(num));
}
// Channel
if self.starts_with("$") {
unexpected_tag(self)?;
self.advance_one();
self.skip_trivia();
let name = self.parse_var_name()?;
return Ok(Pattern::Chn(name));
}
// Var
if self.starts_with("*")
|| self
.peek_one()
.is_some_and(|c| c.is_ascii_alphanumeric() || c == '_' || c == '.' || c == '-' || c == '/')
{
unexpected_tag(self)?;
let nam = self.parse_name_or_era()?;
return Ok(Pattern::Var(nam));
}
let ini_idx = *self.index();
while !(self.is_eof() || self.starts_with("=")) {
self.advance_one();
}
let cur_idx = *self.index();
self.expected_spanned("pattern or '='", ini_idx..cur_idx)
})
}
pub fn parse_term(&mut self) -> ParseResult<Term> {
maybe_grow(|| {
let (tag, unexpected_tag) = self.parse_tag()?;
self.skip_trivia();
// Lambda, unscoped lambda
if self.starts_with("λ") || self.starts_with("@") {
self.advance_one();
let tag = tag.unwrap_or(Tag::Static);
let pat = self.parse_pattern(true)?;
let bod = self.parse_term()?;
return Ok(Term::Lam { tag, pat: Box::new(pat), bod: Box::new(bod) });
}
// App, Tup, Num Op
if self.starts_with("(") {
self.advance_one();
self.skip_trivia();
// Opr but maybe something else
// ( +/-n , -> Tup with Int/Float
// ( +/-n ) -> Int/Float
// ( +/-n term -> App with Int/Float
// ( * , -> Tup with Era
// ( * ) -> Era
// ( opr -> Num Op
if let Some(opr) = self.try_parse_oper() {
if (opr == Op::ADD || opr == Op::SUB) && self.peek_one().is_some_and(|c| "0123456789".contains(c)) {
unexpected_tag(self)?;
*self.index() -= 1;
let num = self.parse_number()?;
let head = Term::Num { val: num };
self.skip_trivia();
if self.starts_with(",") {
self.consume_exactly(",")?;
let tail = self.list_like(|p| p.parse_term(), "", ")", ",", true, 1)?;
let els = [head].into_iter().chain(tail).collect();
return Ok(Term::Fan { fan: FanKind::Tup, tag: tag.unwrap_or(Tag::Static), els });
}
if self.starts_with(")") {
self.consume_exactly(")")?;
return Ok(head);
}
let els = self.list_like(|p| p.parse_term(), "", ")", "", false, 0)?;
let term = els.into_iter().fold(head, |fun, arg| Term::App {
tag: tag.clone().unwrap_or(Tag::Static),
fun: Box::new(fun),
arg: Box::new(arg),
});
return Ok(term);
}
self.skip_trivia();
if opr == Op::MUL && self.starts_with(",") {
self.consume_exactly(",")?;
let tail = self.list_like(|p| p.parse_term(), "", ")", ",", true, 1)?;
let els = [Term::Era].into_iter().chain(tail).collect();
return Ok(Term::Fan { fan: FanKind::Tup, tag: tag.unwrap_or(Tag::Static), els });
}
if opr == Op::MUL && self.starts_with(")") {
self.consume_exactly(")")?;
return Ok(Term::Era);
}
// Opr
unexpected_tag(self)?;
let fst = self.parse_term()?;
let snd = self.parse_term()?;
self.consume(")")?;
return Ok(Term::Oper { opr, fst: Box::new(fst), snd: Box::new(snd) });
}
// Tup or App
let head = self.parse_term()?;
// Tup
self.skip_trivia();
if self.starts_with(",") {
let mut els = vec![head];
while self.try_consume(",") {
els.push(self.parse_term()?);
}
self.consume(")")?;
return Ok(Term::Fan { fan: FanKind::Tup, tag: tag.unwrap_or(Tag::Static), els });
}
// App
let els = self.list_like(|p| p.parse_term(), "", ")", "", false, 0)?;
let term = els.into_iter().fold(head, |fun, arg| Term::App {
tag: tag.clone().unwrap_or(Tag::Static),
fun: Box::new(fun),
arg: Box::new(arg),
});
return Ok(term);
}
// List
if self.starts_with("[") {
unexpected_tag(self)?;
let els = self.list_like(|p| p.parse_term(), "[", "]", ",", false, 0)?;
return Ok(Term::List { els });
}
// Tree Node
if self.starts_with("![") {
self.advance_one();
self.advance_one();
unexpected_tag(self)?;
let lft = self.parse_term()?;
self.try_consume(",");
let rgt = self.parse_term()?;
self.labelled(|p| p.consume("]"), "Only two children in a Tree/Node")?;
return Ok(Term::call(Term::r#ref("Tree/Node"), [lft, rgt]));
}
// Tree Leaf
if self.starts_with("!") {
self.advance_one();
unexpected_tag(self)?;
let val = self.parse_term()?;
return Ok(Term::app(Term::r#ref("Tree/Leaf"), val));
}
// Sup
if self.starts_with("{") {
let els = self.list_like(|p| p.parse_term(), "{", "}", ",", false, 2)?;
return Ok(Term::Fan { fan: FanKind::Dup, tag: tag.unwrap_or(Tag::Auto), els });
}
// Unscoped var
if self.starts_with("$") {
self.advance_one();
unexpected_tag(self)?;
self.skip_trivia();
let nam = self.parse_var_name()?;
return Ok(Term::Link { nam });
}
// Era
if self.starts_with("*") {
self.advance_one();
unexpected_tag(self)?;
return Ok(Term::Era);
}
// Nat
if self.starts_with("#") {
self.advance_one();
unexpected_tag(self)?;
let val = self.parse_u32()?;
return Ok(Term::Nat { val });
}
// String
if self.starts_with("\"") {
unexpected_tag(self)?;
let str = self.parse_quoted_string()?;
return Ok(Term::Str { val: STRINGS.get(str) });
}
// Char
if self.starts_with("'") {
unexpected_tag(self)?;
let char = self.parse_quoted_char()?;
return Ok(Term::Num { val: Num::U24(char as u32 & 0x00ff_ffff) });
}
// Symbol
if self.starts_with("`") {
unexpected_tag(self)?;
let val = self.parse_quoted_symbol()?;
return Ok(Term::Num { val: Num::U24(val) });
}
// Native Number
if self.peek_one().is_some_and(is_num_char) {
unexpected_tag(self)?;
let num = self.parse_number()?;
return Ok(Term::Num { val: num });
}
// Use
if self.try_parse_keyword("use") {
unexpected_tag(self)?;
self.skip_trivia();
let nam = self.parse_var_name()?;
self.consume("=")?;
let val = self.parse_term()?;
self.try_consume(";");
let nxt = self.parse_term()?;
return Ok(Term::Use { nam: Some(nam), val: Box::new(val), nxt: Box::new(nxt) });
}
// Let
if self.try_parse_keyword("let") {
unexpected_tag(self)?;
let pat = self.parse_pattern(true)?;
self.consume("=")?;
let val = self.parse_term()?;
self.try_consume(";");
let nxt = self.parse_term()?;
return Ok(Term::Let { pat: Box::new(pat), val: Box::new(val), nxt: Box::new(nxt) });
}
// Ask (monadic operation)
if self.try_parse_keyword("ask") {
unexpected_tag(self)?;
let pat = self.parse_pattern(true)?;
self.consume("=")?;
let val = self.parse_term()?;
self.try_consume(";");
let nxt = self.parse_term()?;
return Ok(Term::Ask { pat: Box::new(pat), val: Box::new(val), nxt: Box::new(nxt) });
}
// Def
if self.try_parse_keyword("def") {
self.skip_trivia();
let mut def = self.parse_fun_def()?;
def.source.kind = SourceKind::Generated;
let nxt = self.parse_term()?;
return Ok(Term::Def { def, nxt: Box::new(nxt) });
}
// If
if self.try_parse_keyword("if") {
let mut chain = Vec::new();
let cnd = self.parse_term()?;
self.consume("{")?;
let thn = self.parse_term()?;
self.consume("}")?;
chain.push((cnd, thn));
self.skip_trivia_inline()?;
while self.try_parse_keyword("elif") {
let cnd = self.parse_term()?;
self.consume("{")?;
let thn = self.parse_term()?;
self.consume("}")?;
self.skip_trivia_inline()?;
chain.push((cnd, thn));
}
self.consume("else")?;
self.consume("{")?;
let els = self.parse_term()?;
self.consume("}")?;
let els = chain.into_iter().rfold(els, |acc, (cnd, thn)| Term::Swt {
bnd: Some(Name::new("%cond")),
arg: Box::new(cnd),
with_bnd: Vec::new(),
with_arg: Vec::new(),
pred: Some(Name::new("%cond-1")),
arms: vec![acc, thn],
});
return Ok(els);
}
// Match
if self.try_parse_keyword("match") {
unexpected_tag(self)?;
let (bnd, arg) = self.parse_match_arg()?;
let (with_bnd, with_arg) = self.parse_with_clause()?;
let arms = self.list_like(|p| p.parse_match_arm(), "", "}", ";", false, 1)?;
return Ok(Term::Mat { arg: Box::new(arg), bnd, with_bnd, with_arg, arms });
}
// Switch
if self.try_parse_keyword("switch") {
unexpected_tag(self)?;
let (bnd, arg) = self.parse_match_arg()?;
let (with_bnd, with_arg) = self.parse_with_clause()?;
self.try_consume("|");
self.consume("0")?;
self.consume(":")?;
let zero = self.parse_term()?;
self.try_consume(";");
let mut arms = vec![zero];
let mut expected_num = 1;
loop {
self.try_consume("|");
// case _
if self.try_consume("_") {
self.consume(":")?;
arms.push(self.parse_term()?);
self.try_consume(";");
self.consume("}")?;
break;
}
// case num
let val = self.parse_u32()?;
if val != expected_num {
return self.expected(&format!("'{}'", &expected_num.to_string()));
}
expected_num += 1;
self.consume(":")?;
arms.push(self.parse_term()?);
self.try_consume(";");
}
let pred = Some(Name::new(format!("{}-{}", bnd.as_ref().unwrap(), arms.len() - 1)));
return Ok(Term::Swt { arg: Box::new(arg), bnd, with_bnd, with_arg, pred, arms });
}
// With (monadic block)
if self.try_parse_keyword("with") {
unexpected_tag(self)?;
let typ = self.parse_name()?;
self.consume("{")?;
let bod = self.parse_term()?;
self.consume("}")?;
return Ok(Term::With { typ: Name::new(typ), bod: Box::new(bod) });
}
// Fold
if self.try_parse_keyword("fold") {
unexpected_tag(self)?;
let (bnd, arg) = self.parse_match_arg()?;
let (with_bnd, with_arg) = self.parse_with_clause()?;
let arms = self.list_like(|p| p.parse_match_arm(), "", "}", ";", false, 1)?;
return Ok(Term::Fold { arg: Box::new(arg), bnd, with_bnd, with_arg, arms });
}
// Bend
if self.try_parse_keyword("bend") {
unexpected_tag(self)?;
let args = self.list_like(
|p| {
let bind = p.parse_var_name()?;
let init = if p.try_consume("=") { p.parse_term()? } else { Term::Var { nam: bind.clone() } };
Ok((bind, init))
},
"",
"{",
",",
false,
0,
)?;
let (bind, init): (Vec<_>, Vec<_>) = args.into_iter().unzip();
let bind = bind.into_iter().map(Some).collect::<Vec<_>>();
self.skip_trivia();
self.parse_keyword("when")?;
let cond = self.parse_term()?;
self.consume(":")?;
let step = self.parse_term()?;
self.skip_trivia();
self.parse_keyword("else")?;
self.consume(":")?;
let base = self.parse_term()?;
self.consume("}")?;
return Ok(Term::Bend {
bnd: bind,
arg: init,
cond: Box::new(cond),
step: Box::new(step),
base: Box::new(base),
});
}
// Open
if self.try_parse_keyword("open") {
unexpected_tag(self)?;
self.skip_trivia();
let typ = self.parse_top_level_name()?;
self.skip_trivia();
let var = self.parse_var_name()?;
self.try_consume(";");
let bod = self.parse_term()?;
return Ok(Term::Open { typ, var, bod: Box::new(bod) });
}
// Var
unexpected_tag(self)?;
let nam = self.labelled(|p| p.parse_var_name(), "term")?;
Ok(Term::Var { nam })
})
}
fn parse_name_or_era(&mut self) -> ParseResult<Option<Name>> {
self.labelled(
|p| {
if p.try_consume_exactly("*") {
Ok(None)
} else {
let nam = p.parse_var_name()?;
Ok(Some(nam))
}
},
"name or '*'",
)
}
/// Parses a tag where it may or may not be valid.
///
/// If it is not valid, the returned callback can be used to issue an error.
fn parse_tag(&mut self) -> ParseResult<(Option<Tag>, impl FnOnce(&mut Self) -> ParseResult<()>)> {
let index = self.index;
self.skip_trivia();
let tag = if self.peek_one() == Some('#')
&& !self.peek_many(2).is_some_and(|x| x.chars().nth(1).unwrap().is_ascii_digit())
{
let msg = "Tagged terms not supported for hvm32.".to_string();
return self.err_msg_spanned(&msg, index..index + 1);
} else {
None
};
let end_index = self.index;
Ok((tag.clone(), move |slf: &mut Self| {
if let Some(tag) = tag {
let msg = format!("Unexpected tag '{tag}'");
slf.err_msg_spanned(&msg, index..end_index)
} else {
Ok(())
}
}))
}
// A named arg with optional name.
fn parse_match_arg(&mut self) -> ParseResult<(Option<Name>, Term)> {
let ini_idx = *self.index();
let mut arg = self.parse_term()?;
let end_idx = *self.index();
self.skip_trivia();
match (&mut arg, self.starts_with("=")) {
(Term::Var { nam }, true) => {
self.consume("=")?;
Ok((Some(std::mem::take(nam)), self.parse_term()?))
}
(Term::Var { nam }, false) => Ok((Some(nam.clone()), Term::Var { nam: std::mem::take(nam) })),
(_, true) => self.expected_spanned("argument name", ini_idx..end_idx),
(arg, false) => Ok((Some(Name::new("%arg")), std::mem::take(arg))),
}
}
/// A named arg with non-optional name.
fn parse_named_arg(&mut self) -> ParseResult<(Option<Name>, Term)> {
let nam = self.parse_var_name()?;
self.skip_trivia();
if self.starts_with("=") {
self.advance_one();
let arg = self.parse_term()?;
Ok((Some(nam), arg))
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | true |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/term_to_net.rs | src/fun/term_to_net.rs | use crate::{
diagnostics::Diagnostics,
fun::{num_to_name, Book, FanKind, Name, Op, Pattern, Term},
hvm::{net_trees, tree_children},
maybe_grow,
net::CtrKind::{self, *},
};
use hvm::ast::{Net, Tree};
use loaned::LoanedMut;
use std::{
collections::{hash_map::Entry, HashMap},
ops::{Index, IndexMut},
};
#[derive(Debug, Clone)]
pub struct ViciousCycleErr;
pub fn book_to_hvm(book: &Book, diags: &mut Diagnostics) -> Result<(hvm::ast::Book, Labels), Diagnostics> {
let mut hvm_book = hvm::ast::Book { defs: Default::default() };
let mut labels = Labels::default();
let main = book.entrypoint.as_ref();
for def in book.defs.values() {
for rule in def.rules.iter() {
let net = term_to_hvm(&rule.body, &mut labels);
let name = if main.is_some_and(|m| &def.name == m) {
book.hvm_entrypoint().to_string()
} else {
def.name.0.to_string()
};
match net {
Ok(net) => {
hvm_book.defs.insert(name, net);
}
Err(err) => diags.add_inet_error(err, name),
}
}
}
// TODO: native hvm nets ignore labels
for def in book.hvm_defs.values() {
hvm_book.defs.insert(def.name.to_string(), def.body.clone());
}
labels.con.finish();
labels.dup.finish();
diags.fatal((hvm_book, labels))
}
/// Converts an LC term into an IC net.
pub fn term_to_hvm(term: &Term, labels: &mut Labels) -> Result<Net, String> {
let mut net = Net { root: Tree::Era, rbag: Default::default() };
let mut state = EncodeTermState {
lets: Default::default(),
vars: Default::default(),
wires: Default::default(),
redexes: Default::default(),
name_idx: 0,
created_nodes: 0,
labels,
};
state.encode_term(term, Place::Hole(&mut net.root));
LoanedMut::from(std::mem::take(&mut state.redexes)).place(&mut net.rbag);
let EncodeTermState { created_nodes, .. } = { state };
let found_nodes = net_trees(&net).map(count_nodes).sum::<usize>();
if created_nodes != found_nodes {
return Err("Found term that compiles into an inet with a vicious cycle".into());
}
Ok(net)
}
#[derive(Debug)]
struct EncodeTermState<'t, 'l> {
lets: Vec<(&'t Pattern, &'t Term)>,
vars: HashMap<(bool, Name), Place<'t>>,
wires: Vec<Option<Place<'t>>>,
redexes: Vec<LoanedMut<'t, (bool, Tree, Tree)>>,
name_idx: u64,
created_nodes: usize,
labels: &'l mut Labels,
}
fn count_nodes(tree: &Tree) -> usize {
maybe_grow(|| {
usize::from(tree_children(tree).next().is_some()) + tree_children(tree).map(count_nodes).sum::<usize>()
})
}
#[derive(Debug)]
enum Place<'t> {
Tree(LoanedMut<'t, Tree>),
Hole(&'t mut Tree),
Wire(usize),
}
impl<'t> EncodeTermState<'t, '_> {
/// Adds a subterm connected to `up` to the `inet`.
/// `scope` has the current variable scope.
/// `vars` has the information of which ports the variables are declared and used in.
/// `global_vars` has the same information for global lambdas. Must be linked outside this function.
/// Expects variables to be linear, refs to be stored as Refs and all names to be bound.
fn encode_term(&mut self, term: &'t Term, up: Place<'t>) {
maybe_grow(|| {
match term {
Term::Era => self.link(up, Place::Tree(LoanedMut::new(Tree::Era))),
Term::Var { nam } => self.link_var(false, nam, up),
Term::Link { nam } => self.link_var(true, nam, up),
Term::Ref { nam } => self.link(up, Place::Tree(LoanedMut::new(Tree::Ref { nam: nam.to_string() }))),
Term::Num { val } => {
let val = hvm::ast::Numb(val.to_bits());
self.link(up, Place::Tree(LoanedMut::new(Tree::Num { val })))
}
// A lambda becomes to a con node. Ports:
// - 0: points to where the lambda occurs.
// - 1: points to the lambda variable.
// - 2: points to the lambda body.
// core: (var_use bod)
Term::Lam { tag, pat, bod } => {
let kind = Con(self.labels.con.generate(tag));
let node = self.new_ctr(kind);
self.link(up, node.0);
self.encode_pat(pat, node.1);
self.encode_term(bod, node.2);
}
// An application becomes to a con node too. Ports:
// - 0: points to the function being applied.
// - 1: points to the function's argument.
// - 2: points to where the application occurs.
// core: & fun ~ (arg ret) (fun not necessarily main port)
Term::App { tag, fun, arg } => {
let kind = Con(self.labels.con.generate(tag));
let node = self.new_ctr(kind);
self.encode_term(fun, node.0);
self.encode_term(arg, node.1);
self.link(up, node.2);
}
// core: & arg ~ ?<(zero succ) ret>
Term::Swt { arg, bnd, with_bnd, with_arg, pred, arms, } => {
// At this point should be only num matches of 0 and succ.
assert!(bnd.is_none());
assert!(with_bnd.is_empty());
assert!(with_arg.is_empty());
assert!(pred.is_none());
assert!(arms.len() == 2);
self.created_nodes += 2;
let loaned = Tree::Swi { fst: Box::new(Tree::Con{fst: Box::new(Tree::Era), snd: Box::new(Tree::Era)}), snd: Box::new(Tree::Era)};
let ((zero, succ, out), node) =
LoanedMut::loan_with(loaned, |t, l| {
let Tree::Swi { fst, snd: out } = t else { unreachable!() };
let Tree::Con { fst:zero, snd: succ } = fst.as_mut() else { unreachable!() };
(l.loan_mut(zero), l.loan_mut(succ), l.loan_mut(out))
});
self.encode_term(arg, Place::Tree(node));
self.encode_term(&arms[0], Place::Hole(zero));
self.encode_term(&arms[1], Place::Hole(succ));
self.link(up, Place::Hole(out));
}
Term::Let { pat, val, nxt } => {
// Dups/tup eliminators are not actually scoped like other terms.
// They are depended on
self.lets.push((pat, val));
self.encode_term(nxt, up);
}
Term::Fan { fan, tag, els } => {
let kind = self.fan_kind(fan, tag);
self.make_node_list(kind, up, els.iter().map(|el| |slf: &mut Self, up| slf.encode_term(el, up)));
}
// core: & [opr] ~ $(fst $(snd ret))
Term::Oper { opr, fst, snd } => {
match (fst.as_ref(), snd.as_ref()) {
// Partially apply with fst
(Term::Num { val }, snd) => {
let val = val.to_bits();
let val = hvm::ast::Numb((val & !0x1F) | opr.to_native_tag() as u32);
let fst = Place::Tree(LoanedMut::new(Tree::Num { val }));
let node = self.new_opr();
self.link(fst, node.0);
self.encode_term(snd, node.1);
self.encode_le_ge_opers(opr, up, node.2);
}
// Partially apply with snd, flip
(fst, Term::Num { val }) => {
if let Op::POW = opr {
// POW shares tags with AND, so don't flip or results will be wrong
let opr_val = hvm::ast::Numb(hvm::hvm::Numb::new_sym(opr.to_native_tag()).0);
let oper = Place::Tree(LoanedMut::new(Tree::Num { val: opr_val }));
let node1 = self.new_opr();
self.encode_term(fst, node1.0);
self.link(oper, node1.1);
let node2 = self.new_opr();
self.link(node1.2, node2.0);
self.encode_term(snd, node2.1);
self.encode_le_ge_opers(opr, up, node2.2);
} else {
// flip
let val = val.to_bits();
let val = hvm::ast::Numb((val & !0x1F) | flip_sym(opr.to_native_tag()) as u32);
let snd = Place::Tree(LoanedMut::new(Tree::Num { val }));
let node = self.new_opr();
self.encode_term(fst, node.0);
self.link(snd, node.1);
self.encode_le_ge_opers(opr, up, node.2);
}
}
// Don't partially apply
(fst, snd) => {
let opr_val = hvm::ast::Numb(hvm::hvm::Numb::new_sym(opr.to_native_tag()).0);
let oper = Place::Tree(LoanedMut::new(Tree::Num { val: opr_val }));
let node1 = self.new_opr();
self.encode_term(fst, node1.0);
self.link(oper, node1.1);
let node2 = self.new_opr();
self.link(node1.2, node2.0);
self.encode_term(snd, node2.1);
self.encode_le_ge_opers(opr, up, node2.2);
}
}
}
Term::Use { .. } // Removed in earlier pass
| Term::With { .. } // Removed in earlier pass
| Term::Ask { .. } // Removed in earlier pass
| Term::Mat { .. } // Removed in earlier pass
| Term::Bend { .. } // Removed in desugar_bend
| Term::Fold { .. } // Removed in desugar_fold
| Term::Open { .. } // Removed in desugar_open
| Term::Nat { .. } // Removed in encode_nat
| Term::Str { .. } // Removed in encode_str
| Term::List { .. } // Removed in encode_list
| Term::Def { .. } // Removed in earlier pass
| Term::Err => unreachable!(),
}
while let Some((pat, val)) = self.lets.pop() {
let wire = self.new_wire();
self.encode_term(val, Place::Wire(wire));
self.encode_pat(pat, Place::Wire(wire));
}
})
}
fn encode_le_ge_opers(&mut self, opr: &Op, up: Place<'t>, node: Place<'t>) {
match opr {
Op::LE | Op::GE => {
let node_eq = self.new_opr();
let eq_val =
Place::Tree(LoanedMut::new(Tree::Num { val: hvm::ast::Numb(Op::EQ.to_native_tag() as u32) }));
self.link(eq_val, node_eq.0);
self.link(node_eq.1, node);
self.link(up, node_eq.2);
}
_ => self.link(up, node),
}
}
fn encode_pat(&mut self, pat: &Pattern, up: Place<'t>) {
maybe_grow(|| match pat {
Pattern::Var(None) => self.link(up, Place::Tree(LoanedMut::new(Tree::Era))),
Pattern::Var(Some(name)) => self.link_var(false, name, up),
Pattern::Chn(name) => self.link_var(true, name, up),
Pattern::Fan(fan, tag, els) => {
let kind = self.fan_kind(fan, tag);
self.make_node_list(kind, up, els.iter().map(|el| |slf: &mut Self, up| slf.encode_pat(el, up)));
}
Pattern::Ctr(_, _) | Pattern::Num(_) | Pattern::Lst(_) | Pattern::Str(_) => unreachable!(),
})
}
fn link(&mut self, a: Place<'t>, b: Place<'t>) {
match (a, b) {
(Place::Tree(a), Place::Tree(b)) => {
self.redexes.push(LoanedMut::merge((false, Tree::Era, Tree::Era), |r, m| {
m.place(b, &mut r.1);
m.place(a, &mut r.2);
}))
}
(Place::Tree(t), Place::Hole(h)) | (Place::Hole(h), Place::Tree(t)) => {
t.place(h);
}
(Place::Hole(a), Place::Hole(b)) => {
let var = Tree::Var { nam: num_to_name(self.name_idx) };
self.name_idx += 1;
*a = var.clone();
*b = var;
}
(Place::Wire(v), p) | (p, Place::Wire(v)) => {
let v = &mut self.wires[v];
match v.take() {
Some(q) => self.link(p, q),
None => *v = Some(p),
}
}
}
}
fn new_ctr(&mut self, kind: CtrKind) -> (Place<'t>, Place<'t>, Place<'t>) {
self.created_nodes += 1;
let node = match kind {
CtrKind::Con(None) => Tree::Con { fst: Box::new(Tree::Era), snd: Box::new(Tree::Era) },
CtrKind::Dup(0) => Tree::Dup { fst: Box::new(Tree::Era), snd: Box::new(Tree::Era) },
CtrKind::Tup(None) => Tree::Con { fst: Box::new(Tree::Era), snd: Box::new(Tree::Era) },
_ => unreachable!(),
};
let ((a, b), node) = LoanedMut::loan_with(node, |t, l| match t {
Tree::Con { fst, snd } => (l.loan_mut(fst), l.loan_mut(snd)),
Tree::Dup { fst, snd } => (l.loan_mut(fst), l.loan_mut(snd)),
_ => unreachable!(),
});
(Place::Tree(node), Place::Hole(a), Place::Hole(b))
}
fn new_opr(&mut self) -> (Place<'t>, Place<'t>, Place<'t>) {
self.created_nodes += 1;
let ((fst, snd), node) =
LoanedMut::loan_with(Tree::Opr { fst: Box::new(Tree::Era), snd: Box::new(Tree::Era) }, |t, l| {
let Tree::Opr { fst, snd } = t else { unreachable!() };
(l.loan_mut(fst), l.loan_mut(snd))
});
(Place::Tree(node), Place::Hole(fst), Place::Hole(snd))
}
/// Adds a list-like tree of nodes of the same kind to the inet.
fn make_node_list(
&mut self,
kind: CtrKind,
mut up: Place<'t>,
mut els: impl DoubleEndedIterator<Item = impl FnOnce(&mut Self, Place<'t>)>,
) {
let last = els.next_back().unwrap();
for item in els {
let node = self.new_ctr(kind);
self.link(up, node.0);
item(self, node.1);
up = node.2;
}
last(self, up);
}
fn new_wire(&mut self) -> usize {
let i = self.wires.len();
self.wires.push(None);
i
}
fn fan_kind(&mut self, fan: &FanKind, tag: &crate::fun::Tag) -> CtrKind {
let lab = self.labels[*fan].generate(tag);
if *fan == FanKind::Tup {
Tup(lab)
} else {
Dup(lab.unwrap())
}
}
fn link_var(&mut self, global: bool, name: &Name, place: Place<'t>) {
match self.vars.entry((global, name.clone())) {
Entry::Occupied(e) => {
let other = e.remove();
self.link(place, other);
}
Entry::Vacant(e) => {
e.insert(place);
}
}
}
}
#[derive(Debug, Default, Clone)]
pub struct Labels {
pub con: LabelGenerator,
pub dup: LabelGenerator,
pub tup: LabelGenerator,
}
#[derive(Debug, Default, Clone)]
pub struct LabelGenerator {
pub next: u16,
pub name_to_label: HashMap<Name, u16>,
pub label_to_name: HashMap<u16, Name>,
}
impl Index<FanKind> for Labels {
type Output = LabelGenerator;
fn index(&self, fan: FanKind) -> &Self::Output {
match fan {
FanKind::Tup => &self.tup,
FanKind::Dup => &self.dup,
}
}
}
impl IndexMut<FanKind> for Labels {
fn index_mut(&mut self, fan: FanKind) -> &mut Self::Output {
match fan {
FanKind::Tup => &mut self.tup,
FanKind::Dup => &mut self.dup,
}
}
}
impl LabelGenerator {
// If some tag and new generate a new label, otherwise return the generated label.
// If none use the implicit label counter.
fn generate(&mut self, tag: &crate::fun::Tag) -> Option<u16> {
use crate::fun::Tag;
match tag {
Tag::Named(_name) => {
todo!("Named tags not implemented for hvm32");
/* match self.name_to_label.entry(name.clone()) {
Entry::Occupied(e) => Some(*e.get()),
Entry::Vacant(e) => {
let lab = unique();
self.label_to_name.insert(lab, name.clone());
Some(*e.insert(lab))
}
} */
}
Tag::Numeric(lab) => Some(*lab),
Tag::Auto => Some(0),
Tag::Static => None,
}
}
pub fn to_tag(&self, label: Option<u16>) -> crate::fun::Tag {
use crate::fun::Tag;
match label {
Some(label) => match self.label_to_name.get(&label) {
Some(name) => Tag::Named(name.clone()),
None => {
if label == 0 {
Tag::Auto
} else {
Tag::Numeric(label)
}
}
},
None => Tag::Static,
}
}
fn finish(&mut self) {
self.next = u16::MAX;
self.name_to_label.clear();
}
}
impl Op {
fn to_native_tag(self) -> hvm::hvm::Tag {
match self {
Op::ADD => hvm::hvm::OP_ADD,
Op::SUB => hvm::hvm::OP_SUB,
Op::MUL => hvm::hvm::OP_MUL,
Op::DIV => hvm::hvm::OP_DIV,
Op::REM => hvm::hvm::OP_REM,
Op::EQ => hvm::hvm::OP_EQ,
Op::NEQ => hvm::hvm::OP_NEQ,
Op::LT => hvm::hvm::OP_LT,
Op::GT => hvm::hvm::OP_GT,
Op::AND => hvm::hvm::OP_AND,
Op::OR => hvm::hvm::OP_OR,
Op::XOR => hvm::hvm::OP_XOR,
Op::SHL => hvm::hvm::OP_SHL,
Op::SHR => hvm::hvm::OP_SHR,
Op::POW => hvm::hvm::OP_XOR,
Op::LE => hvm::hvm::OP_GT,
Op::GE => hvm::hvm::OP_LT,
}
}
}
fn flip_sym(tag: hvm::hvm::Tag) -> hvm::hvm::Tag {
match tag {
hvm::hvm::OP_SUB => hvm::hvm::FP_SUB,
hvm::hvm::FP_SUB => hvm::hvm::OP_SUB,
hvm::hvm::OP_DIV => hvm::hvm::FP_DIV,
hvm::hvm::FP_DIV => hvm::hvm::OP_DIV,
hvm::hvm::OP_REM => hvm::hvm::FP_REM,
hvm::hvm::FP_REM => hvm::hvm::OP_REM,
hvm::hvm::OP_LT => hvm::hvm::OP_GT,
hvm::hvm::OP_GT => hvm::hvm::OP_LT,
hvm::hvm::OP_SHL => hvm::hvm::FP_SHL,
hvm::hvm::FP_SHL => hvm::hvm::OP_SHL,
hvm::hvm::OP_SHR => hvm::hvm::FP_SHR,
hvm::hvm::FP_SHR => hvm::hvm::OP_SHR,
_ => tag,
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/net_to_term.rs | src/fun/net_to_term.rs | use crate::{
diagnostics::{DiagnosticOrigin, Diagnostics, Severity},
fun::{term_to_net::Labels, Book, FanKind, Name, Num, Op, Pattern, Tag, Term},
maybe_grow,
net::{BendLab, CtrKind, INet, NodeId, NodeKind, Port, SlotId, ROOT},
};
use hvm::hvm::Numb;
use std::collections::{BTreeSet, HashMap, HashSet};
/// Converts an Interaction-INet to a Lambda Calculus term
pub fn net_to_term(
net: &INet,
book: &Book,
labels: &Labels,
linear: bool,
diagnostics: &mut Diagnostics,
) -> Term {
let mut reader = Reader {
net,
labels,
book,
recursive_defs: &book.recursive_defs(),
dup_paths: if linear { None } else { Some(Default::default()) },
scope: Default::default(),
seen_fans: Default::default(),
namegen: Default::default(),
seen: Default::default(),
errors: Default::default(),
};
let mut term = reader.read_term(net.enter_port(ROOT));
while let Some(node) = reader.scope.pop_first() {
let val = reader.read_term(reader.net.enter_port(Port(node, 0)));
let fst = reader.namegen.decl_name(net, Port(node, 1));
let snd = reader.namegen.decl_name(net, Port(node, 2));
let (fan, tag) = match reader.net.node(node).kind {
NodeKind::Ctr(CtrKind::Tup(lab)) => (FanKind::Tup, reader.labels.tup.to_tag(lab)),
NodeKind::Ctr(CtrKind::Dup(lab)) => (FanKind::Dup, reader.labels.dup.to_tag(Some(lab))),
_ => unreachable!(),
};
let split = &mut Split { fan, tag, fst, snd, val };
let uses = term.insert_split(split, usize::MAX).unwrap();
let result = term.insert_split(split, uses);
debug_assert_eq!(result, None);
}
reader.report_errors(diagnostics);
let mut unscoped = HashSet::new();
let mut scope = Vec::new();
term.collect_unscoped(&mut unscoped, &mut scope);
term.apply_unscoped(&unscoped);
term
}
// BTreeSet for consistent readback of dups
type Scope = BTreeSet<NodeId>;
pub struct Reader<'a> {
pub book: &'a Book,
pub namegen: NameGen,
net: &'a INet,
labels: &'a Labels,
dup_paths: Option<HashMap<u16, Vec<SlotId>>>,
/// Store for floating/unscoped terms, like dups and let tups.
scope: Scope,
// To avoid reinserting things in the scope.
seen_fans: Scope,
seen: HashSet<Port>,
errors: Vec<ReadbackError>,
recursive_defs: &'a BTreeSet<Name>,
}
impl Reader<'_> {
fn read_term(&mut self, next: Port) -> Term {
use CtrKind::*;
maybe_grow(|| {
if !self.seen.insert(next) && self.dup_paths.is_none() {
self.error(ReadbackError::Cyclic);
return Term::Var { nam: Name::new("...") };
}
let node = next.node_id();
match &self.net.node(node).kind {
NodeKind::Era => Term::Era,
NodeKind::Ctr(CtrKind::Con(lab)) => self.read_con(next, *lab),
NodeKind::Swi => self.read_swi(next),
NodeKind::Ref { def_name } => Term::Ref { nam: def_name.clone() },
NodeKind::Ctr(kind @ (Dup(_) | Tup(_))) => self.read_fan(next, *kind),
NodeKind::Num { val } => num_from_bits_with_type(*val, *val),
NodeKind::Opr => self.read_opr(next),
NodeKind::Rot => {
self.error(ReadbackError::ReachedRoot);
Term::Err
}
}
})
}
/// Reads a term from a CON node.
/// Could be a lambda, an application, a CON tuple or a CON tuple elimination.
fn read_con(&mut self, next: Port, label: Option<BendLab>) -> Term {
let node = next.node_id();
match next.slot() {
// If we're visiting a port 0, then it is a tuple or a lambda.
0 => {
if self.is_tup(node) {
// A tuple
let lft = self.read_term(self.net.enter_port(Port(node, 1)));
let rgt = self.read_term(self.net.enter_port(Port(node, 2)));
Term::Fan { fan: FanKind::Tup, tag: self.labels.con.to_tag(label), els: vec![lft, rgt] }
} else {
// A lambda
let nam = self.namegen.decl_name(self.net, Port(node, 1));
let bod = self.read_term(self.net.enter_port(Port(node, 2)));
Term::Lam {
tag: self.labels.con.to_tag(label),
pat: Box::new(Pattern::Var(nam)),
bod: Box::new(bod),
}
}
}
// If we're visiting a port 1, then it is a variable.
1 => Term::Var { nam: self.namegen.var_name(next) },
// If we're visiting a port 2, then it is an application.
2 => {
let fun = self.read_term(self.net.enter_port(Port(node, 0)));
let arg = self.read_term(self.net.enter_port(Port(node, 1)));
Term::App { tag: self.labels.con.to_tag(label), fun: Box::new(fun), arg: Box::new(arg) }
}
_ => unreachable!(),
}
}
/// Reads a fan term from a DUP node.
/// Could be a superposition, a duplication, a DUP tuple or a DUP tuple elimination.
fn read_fan(&mut self, next: Port, kind: CtrKind) -> Term {
let node = next.node_id();
let (fan, lab) = match kind {
CtrKind::Tup(lab) => (FanKind::Tup, lab),
CtrKind::Dup(lab) => (FanKind::Dup, Some(lab)),
_ => unreachable!(),
};
match next.slot() {
// If we're visiting a port 0, then it is a pair.
0 => {
// If this superposition is in a readback path with a paired Dup,
// we resolve it by splitting the two sup values into the two Dup variables.
// If we find that it's not paired with a Dup, we just keep the Sup as a term.
// The latter are all the early returns.
if fan != FanKind::Dup {
return self.decay_or_get_ports(node).unwrap_or_else(|(fst, snd)| Term::Fan {
fan,
tag: self.labels[fan].to_tag(lab),
els: vec![fst, snd],
});
}
let Some(dup_paths) = &mut self.dup_paths else {
return self.decay_or_get_ports(node).unwrap_or_else(|(fst, snd)| Term::Fan {
fan,
tag: self.labels[fan].to_tag(lab),
els: vec![fst, snd],
});
};
let stack = dup_paths.entry(lab.unwrap()).or_default();
let Some(slot) = stack.pop() else {
return self.decay_or_get_ports(node).unwrap_or_else(|(fst, snd)| Term::Fan {
fan,
tag: self.labels[fan].to_tag(lab),
els: vec![fst, snd],
});
};
// Found a paired Dup, so we "decay" the superposition according to the original direction we came from the Dup.
let term = self.read_term(self.net.enter_port(Port(node, slot)));
self.dup_paths.as_mut().unwrap().get_mut(&lab.unwrap()).unwrap().push(slot);
term
}
// If we're visiting a port 1 or 2, then it is a variable.
// Also, that means we found a dup, so we store it to read later.
1 | 2 => {
// If doing non-linear readback, we also store dup paths to try to resolve them later.
if let Some(dup_paths) = &mut self.dup_paths {
if fan == FanKind::Dup {
dup_paths.entry(lab.unwrap()).or_default().push(next.slot());
let term = self.read_term(self.net.enter_port(Port(node, 0)));
self.dup_paths.as_mut().unwrap().entry(lab.unwrap()).or_default().pop().unwrap();
return term;
}
}
// Otherwise, just store the new dup/let tup and return the variable.
if self.seen_fans.insert(node) {
self.scope.insert(node);
}
Term::Var { nam: self.namegen.var_name(next) }
}
_ => unreachable!(),
}
}
/// Reads an Opr term from an OPR node.
fn read_opr(&mut self, next: Port) -> Term {
/// Read one of the argument ports of an operation.
fn add_arg(
reader: &mut Reader,
port: Port,
args: &mut Vec<Result<hvm::hvm::Val, Term>>,
types: &mut Vec<hvm::hvm::Tag>,
ops: &mut Vec<hvm::hvm::Tag>,
) {
if let NodeKind::Num { val } = reader.net.node(port.node_id()).kind {
match hvm::hvm::Numb::get_typ(&Numb(val)) {
// Contains an operation
hvm::hvm::TY_SYM => {
ops.push(hvm::hvm::Numb(val).get_sym());
}
// Contains a number with a type
typ @ hvm::hvm::TY_U24..=hvm::hvm::TY_F24 => {
types.push(typ);
args.push(Ok(val));
}
// Contains a partially applied number with operation and no type
op @ hvm::hvm::OP_ADD.. => {
ops.push(op);
args.push(Ok(val));
}
}
} else {
// Some other non-number argument
let term = reader.read_term(port);
args.push(Err(term));
}
}
/// Creates an Opr term from the arguments of the subnet of an OPR node.
fn opr_term_from_hvm_args(
args: &mut Vec<Result<hvm::hvm::Val, Term>>,
types: &mut Vec<hvm::hvm::Tag>,
ops: &mut Vec<hvm::hvm::Tag>,
is_flipped: bool,
) -> Term {
let typ = match types.as_slice() {
[typ] => *typ,
// Use U24 as default number type
[] => hvm::hvm::TY_U24,
_ => {
// Too many types
return Term::Err;
}
};
match (args.as_slice(), ops.as_slice()) {
([arg1, arg2], [op]) => {
// Correct number of arguments
let arg1 = match arg1 {
Ok(val) => num_from_bits_with_type(*val, typ as u32),
Err(val) => val.clone(),
};
let arg2 = match arg2 {
Ok(val) => num_from_bits_with_type(*val, typ as u32),
Err(val) => val.clone(),
};
let (arg1, arg2) = if is_flipped ^ op_is_flipped(*op) { (arg2, arg1) } else { (arg1, arg2) };
let Some(op) = op_from_native_tag(*op, typ) else {
// Invalid operator
return Term::Err;
};
Term::Oper { opr: op, fst: Box::new(arg1), snd: Box::new(arg2) }
}
_ => {
// Invalid number of arguments/types/operators
Term::Err
}
}
}
fn op_is_flipped(op: hvm::hvm::Tag) -> bool {
[hvm::hvm::FP_DIV, hvm::hvm::FP_REM, hvm::hvm::FP_SHL, hvm::hvm::FP_SHR, hvm::hvm::FP_SUB].contains(&op)
}
fn op_from_native_tag(val: hvm::hvm::Tag, typ: hvm::hvm::Tag) -> Option<Op> {
let op = match val {
hvm::hvm::OP_ADD => Op::ADD,
hvm::hvm::OP_SUB => Op::SUB,
hvm::hvm::FP_SUB => Op::SUB,
hvm::hvm::OP_MUL => Op::MUL,
hvm::hvm::OP_DIV => Op::DIV,
hvm::hvm::FP_DIV => Op::DIV,
hvm::hvm::OP_REM => Op::REM,
hvm::hvm::FP_REM => Op::REM,
hvm::hvm::OP_EQ => Op::EQ,
hvm::hvm::OP_NEQ => Op::NEQ,
hvm::hvm::OP_LT => Op::LT,
hvm::hvm::OP_GT => Op::GT,
hvm::hvm::OP_AND => {
if typ == hvm::hvm::TY_F24 {
todo!("Implement readback of atan2")
} else {
Op::AND
}
}
hvm::hvm::OP_OR => {
if typ == hvm::hvm::TY_F24 {
todo!("Implement readback of log")
} else {
Op::OR
}
}
hvm::hvm::OP_XOR => {
if typ == hvm::hvm::TY_F24 {
Op::POW
} else {
Op::XOR
}
}
hvm::hvm::OP_SHL => Op::SHL,
hvm::hvm::FP_SHL => Op::SHL,
hvm::hvm::OP_SHR => Op::SHR,
hvm::hvm::FP_SHR => Op::SHR,
_ => return None,
};
Some(op)
}
let node = next.node_id();
match next.slot() {
2 => {
// If port1 has a partially applied number, the operation has 1 node.
// Port0 has arg1 and port1 has arg2.
// The operation is interpreted as being pre-flipped (if its a FP_, they cancel and don't flip).
let port1_kind = self.net.node(self.net.enter_port(Port(node, 1)).node_id()).kind.clone();
if let NodeKind::Num { val } = port1_kind {
match hvm::hvm::Numb::get_typ(&Numb(val)) {
hvm::hvm::OP_ADD.. => {
let x1_port = self.net.enter_port(Port(node, 0));
let x2_port = self.net.enter_port(Port(node, 1));
let mut args = vec![];
let mut types = vec![];
let mut ops = vec![];
add_arg(self, x1_port, &mut args, &mut types, &mut ops);
add_arg(self, x2_port, &mut args, &mut types, &mut ops);
let term = opr_term_from_hvm_args(&mut args, &mut types, &mut ops, true);
if let Term::Err = term {
// Since that function doesn't have access to the reader, add the error here.
self.error(ReadbackError::InvalidNumericOp);
}
return term;
}
_ => {
// Not a partially applied number, handle it in the next case
}
}
}
// If port0 has a partially applied number, it also has 1 node.
// The operation is interpreted as not pre-flipped.
let port0_kind = self.net.node(self.net.enter_port(Port(node, 0)).node_id()).kind.clone();
if let NodeKind::Num { val } = port0_kind {
match hvm::hvm::Numb::get_typ(&Numb(val)) {
hvm::hvm::OP_ADD.. => {
let x1_port = self.net.enter_port(Port(node, 0));
let x2_port = self.net.enter_port(Port(node, 1));
let mut args = vec![];
let mut types = vec![];
let mut ops = vec![];
add_arg(self, x1_port, &mut args, &mut types, &mut ops);
add_arg(self, x2_port, &mut args, &mut types, &mut ops);
let term = opr_term_from_hvm_args(&mut args, &mut types, &mut ops, false);
if let Term::Err = term {
// Since that function doesn't have access to the reader, add the error here.
self.error(ReadbackError::InvalidNumericOp);
}
return term;
}
_ => {
// Not a partially applied number, handle it in the next case
}
}
}
// Otherwise, the operation has 2 nodes.
// Read the top node port0 and 1, bottom node port1.
// Args are in that order, skipping the operation.
let bottom_id = node;
let top_id = self.net.enter_port(Port(bottom_id, 0)).node_id();
if let NodeKind::Opr = self.net.node(top_id).kind {
let x1_port = self.net.enter_port(Port(top_id, 0));
let x2_port = self.net.enter_port(Port(top_id, 1));
let x3_port = self.net.enter_port(Port(bottom_id, 1));
let mut args = vec![];
let mut types = vec![];
let mut ops = vec![];
add_arg(self, x1_port, &mut args, &mut types, &mut ops);
add_arg(self, x2_port, &mut args, &mut types, &mut ops);
add_arg(self, x3_port, &mut args, &mut types, &mut ops);
let term = opr_term_from_hvm_args(&mut args, &mut types, &mut ops, false);
if let Term::Err = term {
self.error(ReadbackError::InvalidNumericOp);
}
term
} else {
// Port 0 was not an OPR node, invalid.
self.error(ReadbackError::InvalidNumericOp);
Term::Err
}
}
_ => {
// Entered from a port other than 2, invalid.
self.error(ReadbackError::InvalidNumericOp);
Term::Err
}
}
}
/// Reads a switch term from a SWI node.
fn read_swi(&mut self, next: Port) -> Term {
let node = next.node_id();
match next.slot() {
2 => {
// Read the matched expression
let arg = self.read_term(self.net.enter_port(Port(node, 0)));
let bnd = if let Term::Var { nam } = &arg { nam.clone() } else { self.namegen.unique() };
// Read the pattern matching node
let sel_node = self.net.enter_port(Port(node, 1)).node_id();
// We expect the pattern matching node to be a CON
let sel_kind = &self.net.node(sel_node).kind;
if sel_kind != &NodeKind::Ctr(CtrKind::Con(None)) {
// TODO: Is there any case where we expect a different node type here on readback?
self.error(ReadbackError::InvalidNumericMatch);
return Term::Err;
}
let zero = self.read_term(self.net.enter_port(Port(sel_node, 1)));
let mut succ = self.read_term(self.net.enter_port(Port(sel_node, 2)));
// Call expand_generated in case of succ_term be a lifted term
succ.expand_generated(self.book, self.recursive_defs);
// Succ term should be a lambda
let succ = match &mut succ {
Term::Lam { pat, bod, .. } => {
if let Pattern::Var(nam) = pat.as_ref() {
let mut bod = std::mem::take(bod.as_mut());
if let Some(nam) = nam {
bod.subst(nam, &Term::Var { nam: Name::new(format!("{bnd}-1")) });
}
bod
} else {
// Readback should never generate non-var patterns for lambdas.
self.error(ReadbackError::InvalidNumericMatch);
succ
}
}
_ => {
self.error(ReadbackError::InvalidNumericMatch);
succ
}
};
Term::Swt {
arg: Box::new(arg),
bnd: Some(bnd),
with_arg: vec![],
with_bnd: vec![],
pred: None,
arms: vec![zero, succ],
}
}
_ => {
self.error(ReadbackError::InvalidNumericMatch);
Term::Err
}
}
}
/// Enters both ports 1 and 2 of a node. Returns a Term if it is
/// possible to simplify the net, or the Terms on the two ports of the node.
/// The two possible outcomes are always equivalent.
///
/// If:
/// - The node Kind is CON/TUP/DUP
/// - Both ports 1 and 2 are connected to the same node on slots 1 and 2 respectively
/// - That node Kind is the same as the given node Kind
///
/// Then:
/// Reads the port 0 of the connected node, and returns that term.
///
/// Otherwise:
/// Returns the terms on ports 1 and 2 of the given node.
///
/// # Example
///
/// ```hvm
/// // λa let (a, b) = a; (a, b)
/// ([a b] [a b])
///
/// // The node `(a, b)` is just a reconstruction of the destructuring of `a`,
/// // So we can skip both steps and just return the "value" unchanged:
///
/// // λa a
/// (a a)
/// ```
///
fn decay_or_get_ports(&mut self, node: NodeId) -> Result<Term, (Term, Term)> {
let fst_port = self.net.enter_port(Port(node, 1));
let snd_port = self.net.enter_port(Port(node, 2));
let node_kind = &self.net.node(node).kind;
// Eta-reduce the readback inet.
// This is not valid for all kinds of nodes, only CON/TUP/DUP, due to their interaction rules.
if matches!(node_kind, NodeKind::Ctr(_)) {
match (fst_port, snd_port) {
(Port(fst_node, 1), Port(snd_node, 2)) if fst_node == snd_node => {
if self.net.node(fst_node).kind == *node_kind {
self.scope.remove(&fst_node);
let port_zero = self.net.enter_port(Port(fst_node, 0));
let term = self.read_term(port_zero);
return Ok(term);
}
}
_ => {}
}
}
let fst = self.read_term(fst_port);
let snd = self.read_term(snd_port);
Err((fst, snd))
}
pub fn error(&mut self, error: ReadbackError) {
self.errors.push(error);
}
pub fn report_errors(&mut self, diagnostics: &mut Diagnostics) {
let mut err_counts = std::collections::HashMap::new();
for err in &self.errors {
*err_counts.entry(*err).or_insert(0) += 1;
}
for (err, count) in err_counts {
let count_msg = if count > 1 { format!(" ({count} occurrences)") } else { "".to_string() };
let msg = format!("{}{}", err, count_msg);
diagnostics.add_diagnostic(
msg.as_str(),
Severity::Warning,
DiagnosticOrigin::Readback,
Default::default(),
);
}
}
/// Returns whether the given port represents a tuple or some other
/// term (usually a lambda).
///
/// Used heuristic: a con node is a tuple if port 1 is a closed tree and not an ERA.
fn is_tup(&self, node: NodeId) -> bool {
if !matches!(self.net.node(node).kind, NodeKind::Ctr(CtrKind::Con(_))) {
return false;
}
if self.net.node(self.net.enter_port(Port(node, 1)).node_id()).kind == NodeKind::Era {
return false;
}
let mut wires = HashSet::new();
let mut to_check = vec![self.net.enter_port(Port(node, 1))];
while let Some(port) = to_check.pop() {
match port.slot() {
0 => {
let node = port.node_id();
let lft = self.net.enter_port(Port(node, 1));
let rgt = self.net.enter_port(Port(node, 2));
to_check.push(lft);
to_check.push(rgt);
}
1 | 2 => {
// Mark as a wire. If already present, mark as visited by removing it.
if !(wires.insert(port) && wires.insert(self.net.enter_port(port))) {
wires.remove(&port);
wires.remove(&self.net.enter_port(port));
}
}
_ => unreachable!(),
}
}
// No hanging wires = a combinator = a tuple
wires.is_empty()
}
}
/* Utils for numbers and numeric operations */
/// From an hvm number carrying the value and another carrying the type, return a Num term.
fn num_from_bits_with_type(val: u32, typ: u32) -> Term {
match hvm::hvm::Numb::get_typ(&Numb(typ)) {
// No type information, assume u24 by default
hvm::hvm::TY_SYM => Term::Num { val: Num::U24(Numb::get_u24(&Numb(val))) },
hvm::hvm::TY_U24 => Term::Num { val: Num::U24(Numb::get_u24(&Numb(val))) },
hvm::hvm::TY_I24 => Term::Num { val: Num::I24(Numb::get_i24(&Numb(val))) },
hvm::hvm::TY_F24 => Term::Num { val: Num::F24(Numb::get_f24(&Numb(val))) },
_ => Term::Err,
}
}
/* Insertion of dups in the middle of the term */
/// Represents `let #tag(fst, snd) = val` / `let #tag{fst snd} = val`
struct Split {
fan: FanKind,
tag: Tag,
fst: Option<Name>,
snd: Option<Name>,
val: Term,
}
impl Default for Split {
fn default() -> Self {
Self {
fan: FanKind::Dup,
tag: Default::default(),
fst: Default::default(),
snd: Default::default(),
val: Default::default(),
}
}
}
impl Term {
/// Calculates the number of times `fst` and `snd` appear in this term. If
/// that is `>= threshold`, it inserts the split at this term, and returns
/// `None`. Otherwise, returns `Some(uses)`.
///
/// This is only really useful when called in two passes – first, with
/// `threshold = usize::MAX`, to count the number of uses, and then with
/// `threshold = uses`.
///
/// This has the effect of inserting the split at the lowest common ancestor
/// of all of the uses of `fst` and `snd`.
fn insert_split(&mut self, split: &mut Split, threshold: usize) -> Option<usize> {
maybe_grow(|| {
let mut n = match self {
Term::Var { nam } => usize::from(split.fst == *nam || split.snd == *nam),
_ => 0,
};
for child in self.children_mut() {
n += child.insert_split(split, threshold)?;
}
if n >= threshold {
let Split { fan, tag, fst, snd, val } = std::mem::take(split);
let nxt = Box::new(std::mem::take(self));
*self = Term::Let {
pat: Box::new(Pattern::Fan(fan, tag, vec![Pattern::Var(fst), Pattern::Var(snd)])),
val: Box::new(val),
nxt,
};
None
} else {
Some(n)
}
})
}
}
/* Variable name generation */
#[derive(Default)]
pub struct NameGen {
pub var_port_to_id: HashMap<Port, u64>,
pub id_counter: u64,
}
impl NameGen {
// Given a port, returns its name, or assigns one if it wasn't named yet.
fn var_name(&mut self, var_port: Port) -> Name {
let id = self.var_port_to_id.entry(var_port).or_insert_with(|| {
let id = self.id_counter;
self.id_counter += 1;
id
});
Name::from(*id)
}
fn decl_name(&mut self, net: &INet, var_port: Port) -> Option<Name> {
// If port is linked to an erase node, return an unused variable
let var_use = net.enter_port(var_port);
let var_kind = &net.node(var_use.node_id()).kind;
(*var_kind != NodeKind::Era).then(|| self.var_name(var_port))
}
pub fn unique(&mut self) -> Name {
let id = self.id_counter;
self.id_counter += 1;
Name::from(id)
}
}
/* Readback errors */
#[derive(Debug, Clone, Copy)]
pub enum ReadbackError {
InvalidNumericMatch,
InvalidNumericOp,
ReachedRoot,
Cyclic,
}
impl PartialEq for ReadbackError {
fn eq(&self, other: &Self) -> bool {
core::mem::discriminant(self) == core::mem::discriminant(other)
}
}
impl Eq for ReadbackError {}
impl std::hash::Hash for ReadbackError {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
core::mem::discriminant(self).hash(state);
}
}
impl std::fmt::Display for ReadbackError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ReadbackError::InvalidNumericMatch => write!(f, "Encountered an invalid 'switch'."),
ReadbackError::InvalidNumericOp => write!(f, "Encountered an invalid numeric operation."),
ReadbackError::ReachedRoot => {
write!(f, "Unable to interpret the HVM result as a valid Bend term. (Reached Root)")
}
ReadbackError::Cyclic => {
write!(f, "Unable to interpret the HVM result as a valid Bend term. (Cyclic Term)")
}
}
}
}
/* Recover unscoped vars */
impl Term {
pub fn collect_unscoped(&self, unscoped: &mut HashSet<Name>, scope: &mut Vec<Name>) {
maybe_grow(|| match self {
Term::Var { nam } if !scope.contains(nam) => _ = unscoped.insert(nam.clone()),
Term::Swt { arg, bnd, with_bnd: _, with_arg, pred: _, arms } => {
arg.collect_unscoped(unscoped, scope);
for arg in with_arg {
arg.collect_unscoped(unscoped, scope);
}
arms[0].collect_unscoped(unscoped, scope);
if let Some(bnd) = bnd {
scope.push(Name::new(format!("{bnd}-1")));
}
arms[1].collect_unscoped(unscoped, scope);
if bnd.is_some() {
scope.pop();
}
}
_ => {
for (child, binds) in self.children_with_binds() {
let binds: Vec<_> = binds.collect();
for bind in binds.iter().copied().flatten() {
scope.push(bind.clone());
}
child.collect_unscoped(unscoped, scope);
for _bind in binds.into_iter().flatten() {
scope.pop();
}
}
}
})
}
/// Transform the variables that we previously found were unscoped into their unscoped variants.
pub fn apply_unscoped(&mut self, unscoped: &HashSet<Name>) {
maybe_grow(|| {
if let Term::Var { nam } = self {
if unscoped.contains(nam) {
*self = Term::Link { nam: std::mem::take(nam) }
}
}
if let Some(pat) = self.pattern_mut() {
pat.apply_unscoped(unscoped);
}
for child in self.children_mut() {
child.apply_unscoped(unscoped);
}
})
}
}
impl Pattern {
fn apply_unscoped(&mut self, unscoped: &HashSet<Name>) {
maybe_grow(|| {
if let Pattern::Var(Some(nam)) = self {
if unscoped.contains(nam) {
let nam = std::mem::take(nam);
*self = Pattern::Chn(nam);
}
}
for child in self.children_mut() {
child.apply_unscoped(unscoped)
}
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/display.rs | src/fun/display.rs | use super::{Book, Definition, FanKind, Name, Num, Op, Pattern, Rule, Tag, Term, Type};
use crate::maybe_grow;
use std::{fmt, ops::Deref, sync::atomic::AtomicU64};
/* Some aux structures for things that are not so simple to display */
pub struct DisplayFn<F: Fn(&mut fmt::Formatter) -> fmt::Result>(pub F);
impl<F: Fn(&mut fmt::Formatter) -> fmt::Result> fmt::Display for DisplayFn<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0(f)
}
}
pub struct DisplayJoin<F, S>(pub F, pub S);
impl<F, I, S> fmt::Display for DisplayJoin<F, S>
where
F: (Fn() -> I),
I: IntoIterator,
I::Item: fmt::Display,
S: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
for (i, x) in self.0().into_iter().enumerate() {
if i != 0 {
self.1.fmt(f)?;
}
x.fmt(f)?;
}
Ok(())
}
}
macro_rules! display {
($($x:tt)*) => {
DisplayFn(move |f| write!(f, $($x)*))
};
}
/* The actual display implementations */
static NAMEGEN: AtomicU64 = AtomicU64::new(0);
fn gen_fan_pat_name() -> Name {
let n = NAMEGEN.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
Name::new(format!("pat%{}", super::num_to_name(n)))
}
fn namegen_reset() {
NAMEGEN.store(0, std::sync::atomic::Ordering::SeqCst);
}
impl fmt::Display for Term {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
maybe_grow(|| match self {
Term::Lam { tag, pat, bod } => match &**pat {
Pattern::Fan(_, _, _) => {
let name = gen_fan_pat_name();
write!(f, "{}λ{name} let {} = {name}; {}", tag.display_padded(), pat, bod)
}
_ => write!(f, "{}λ{} {}", tag.display_padded(), pat, bod),
},
Term::Var { nam } => write!(f, "{nam}"),
Term::Link { nam } => write!(f, "${nam}"),
Term::Let { pat, val, nxt } => write!(f, "let {} = {}; {}", pat, val, nxt),
Term::With { typ, bod } => write!(f, "with {typ} {{ {bod} }}"),
Term::Ask { pat, val, nxt } => write!(f, "ask {pat} = {val}; {nxt}"),
Term::Use { nam, val, nxt } => {
let Some(nam) = nam else { unreachable!() };
write!(f, "use {} = {}; {}", nam, val, nxt)
}
Term::Ref { nam: def_name } => write!(f, "{def_name}"),
Term::App { tag, fun, arg } => {
write!(f, "{}({} {})", tag.display_padded(), fun.display_app(tag), arg)
}
Term::Mat { arg, bnd, with_bnd, with_arg, arms } => {
write!(f, "match ")?;
if let Some(bnd) = bnd {
write!(f, "{} = ", bnd)?;
}
write!(f, "{} ", arg)?;
if !with_bnd.is_empty() {
write!(f, "with ")?;
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
write!(f, "{} = {}, ", var_as_str(bnd), arg)?;
}
}
write!(f, "{{ ")?;
for arm in arms {
write!(f, "{}", var_as_str(&arm.0))?;
for var in &arm.1 {
write!(f, " {}", var_as_str(var))?;
}
write!(f, ": {}; ", arm.2)?;
}
write!(f, "}}")
}
Term::Swt { arg, bnd, with_bnd, with_arg, pred, arms } => {
write!(f, "switch ")?;
if let Some(bnd) = bnd {
write!(f, "{bnd} = ")?;
}
write!(f, "{arg} ")?;
if !with_bnd.is_empty() {
write!(f, "with ")?;
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
write!(f, "{} = {}, ", var_as_str(bnd), arg)?;
}
}
write!(f, "{{ ")?;
for (i, arm) in arms.iter().enumerate() {
if i == arms.len() - 1 {
write!(f, "_")?;
if let Some(pred) = pred {
write!(f, " {pred}")?;
}
} else {
write!(f, "{i}")?;
}
write!(f, ": {arm}; ")?;
}
write!(f, "}}")
}
Term::Fold { bnd, arg, with_bnd, with_arg, arms } => {
write!(f, "fold ")?;
if let Some(bnd) = bnd {
write!(f, "{} = ", bnd)?;
}
write!(f, "{} ", arg)?;
if !with_bnd.is_empty() {
write!(f, "with ")?;
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
write!(f, "{} = {}, ", var_as_str(bnd), arg)?;
}
}
write!(f, "{{ ")?;
for arm in arms {
write!(f, "{}", var_as_str(&arm.0))?;
for var in &arm.1 {
write!(f, " {}", var_as_str(var))?;
}
write!(f, ": {}; ", arm.2)?;
}
write!(f, "}}")
}
Term::Bend { bnd: bind, arg: init, cond, step, base } => {
write!(f, "bend ")?;
for (bind, init) in bind.iter().zip(init) {
if let Some(bind) = bind {
write!(f, "{} = ", bind)?;
}
write!(f, "{}, ", init)?;
}
write!(f, "{{ when {cond}: {step}; else: {base} }}")
}
Term::Fan { fan: FanKind::Tup, tag, els } => write!(f, "{}({})", tag, DisplayJoin(|| els.iter(), ", ")),
Term::Fan { fan: FanKind::Dup, tag, els } => write!(f, "{}{{{}}}", tag, DisplayJoin(|| els, " ")),
Term::Era => write!(f, "*"),
Term::Num { val: Num::U24(val) } => write!(f, "{val}"),
Term::Num { val: Num::I24(val) } => write!(f, "{}{}", if *val < 0 { "-" } else { "+" }, val.abs()),
Term::Num { val: Num::F24(val) } => write!(f, "{val:.3}"),
Term::Nat { val } => write!(f, "#{val}"),
Term::Str { val } => write!(f, "{val:?}"),
Term::Oper { opr, fst, snd } => {
write!(f, "({} {} {})", opr, fst, snd)
}
Term::List { els } => write!(f, "[{}]", DisplayJoin(|| els.iter(), ", "),),
Term::Open { typ, var, bod } => write!(f, "open {typ} {var}; {bod}"),
Term::Def { def, nxt } => {
write!(f, "def ")?;
for rule in def.rules.iter() {
write!(f, "{}", rule.display(&def.name))?;
}
write!(f, "{nxt}")
}
Term::Err => write!(f, "<Invalid>"),
})
}
}
impl fmt::Display for Tag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Tag::Named(name) => write!(f, "#{name}"),
Tag::Numeric(num) => write!(f, "#{num}"),
Tag::Auto => Ok(()),
Tag::Static => Ok(()),
}
}
}
impl fmt::Display for Pattern {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Pattern::Var(None) => write!(f, "*"),
Pattern::Var(Some(nam)) => write!(f, "{nam}"),
Pattern::Chn(nam) => write!(f, "${nam}"),
Pattern::Ctr(nam, pats) => {
write!(f, "({}{})", nam, DisplayJoin(|| pats.iter().map(|p| display!(" {p}")), ""))
}
Pattern::Num(num) => write!(f, "{num}"),
Pattern::Fan(FanKind::Tup, tag, pats) => write!(f, "{}({})", tag, DisplayJoin(|| pats, ", ")),
Pattern::Fan(FanKind::Dup, tag, pats) => write!(f, "{}{{{}}}", tag, DisplayJoin(|| pats, " ")),
Pattern::Lst(pats) => write!(f, "[{}]", DisplayJoin(|| pats, ", ")),
Pattern::Str(str) => write!(f, "\"{str}\""),
}
}
}
impl Rule {
pub fn display<'a>(&'a self, def_name: &'a Name) -> impl fmt::Display + 'a {
display!(
"({}{}) = {}",
def_name,
DisplayJoin(|| self.pats.iter().map(|x| display!(" {x}")), ""),
self.body
)
}
}
impl fmt::Display for Definition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
namegen_reset();
writeln!(f, "{}{}: {}", if !self.check { "unchecked " } else { "" }, self.name, self.typ)?;
write!(f, "{}", DisplayJoin(|| self.rules.iter().map(|x| x.display(&self.name)), "\n"))
}
}
impl fmt::Display for Book {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", DisplayJoin(|| self.defs.values(), "\n\n"))?;
for def in self.hvm_defs.values() {
writeln!(f, "hvm {}:\n{}\n", def.name, def.body.show())?;
}
Ok(())
}
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}
impl Term {
fn display_app<'a>(&'a self, tag: &'a Tag) -> impl fmt::Display + 'a {
maybe_grow(|| {
DisplayFn(move |f| match self {
Term::App { tag: tag2, fun, arg } if tag2 == tag => {
write!(f, "{} {}", fun.display_app(tag), arg)
}
_ => write!(f, "{}", self),
})
})
}
}
impl fmt::Display for Op {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Op::ADD => write!(f, "+"),
Op::SUB => write!(f, "-"),
Op::MUL => write!(f, "*"),
Op::DIV => write!(f, "/"),
Op::REM => write!(f, "%"),
Op::EQ => write!(f, "=="),
Op::NEQ => write!(f, "!="),
Op::LT => write!(f, "<"),
Op::GT => write!(f, ">"),
Op::AND => write!(f, "&"),
Op::OR => write!(f, "|"),
Op::XOR => write!(f, "^"),
Op::POW => write!(f, "**"),
Op::SHR => write!(f, ">>"),
Op::SHL => write!(f, "<<"),
Op::LE => write!(f, "<="),
Op::GE => write!(f, ">="),
}
}
}
impl Tag {
pub fn display_padded(&self) -> impl fmt::Display + '_ {
DisplayFn(move |f| match self {
Tag::Named(name) => write!(f, "#{name} "),
Tag::Numeric(num) => write!(f, "#{num} "),
Tag::Auto => Ok(()),
Tag::Static => Ok(()),
})
}
}
impl fmt::Display for Type {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
maybe_grow(|| match self {
Type::Hole => write!(f, "_"),
Type::Var(nam) => write!(f, "{nam}"),
Type::Arr(lft, rgt) => write!(f, "({} -> {})", lft, rgt.display_arrow()),
Type::Ctr(nam, args) => {
if args.is_empty() {
write!(f, "{nam}")
} else {
write!(f, "({nam} {})", DisplayJoin(|| args.iter(), " "))
}
}
Type::Number(t) => write!(f, "(Number {t})"),
Type::Integer(t) => write!(f, "(Integer {t})"),
Type::U24 => write!(f, "u24"),
Type::I24 => write!(f, "i24"),
Type::F24 => write!(f, "f24"),
Type::Any => write!(f, "Any"),
Type::None => write!(f, "None"),
Type::Tup(els) => write!(f, "({})", DisplayJoin(|| els.iter(), ", ")),
})
}
}
impl Type {
pub fn display_arrow(&self) -> impl fmt::Display + '_ {
maybe_grow(|| {
DisplayFn(move |f| match self {
Type::Arr(lft, rgt) => {
write!(f, "{} -> {}", lft, rgt.display_arrow())
}
_ => write!(f, "{}", self),
})
})
}
}
fn var_as_str(nam: &Option<Name>) -> &str {
nam.as_ref().map_or("*", Name::deref)
}
/* Pretty printing */
impl Book {
pub fn display_pretty(&self) -> impl fmt::Display + '_ {
display!(
"{}\n{}",
DisplayJoin(|| self.defs.values().map(|def| def.display_pretty()), "\n\n"),
DisplayJoin(
|| self.hvm_defs.values().map(|def| display!("hvm {}:\n{}", def.name, def.body.show())),
"\n"
)
)
}
}
impl Definition {
pub fn display_pretty(&self) -> impl fmt::Display + '_ {
namegen_reset();
display!("{}", DisplayJoin(|| self.rules.iter().map(|x| x.display_pretty(&self.name)), "\n"))
}
}
impl Rule {
pub fn display_pretty<'a>(&'a self, def_name: &'a Name) -> impl fmt::Display + 'a {
display!(
"({}{}) =\n {}",
def_name,
DisplayJoin(|| self.pats.iter().map(|x| display!(" {x}")), ""),
self.body.display_pretty(2)
)
}
pub fn display_def_aux<'a>(&'a self, def_name: &'a Name, tab: usize) -> impl fmt::Display + 'a {
display!(
"({}{}) =\n {:tab$}{}",
def_name,
DisplayJoin(|| self.pats.iter().map(|x| display!(" {x}")), ""),
"",
self.body.display_pretty(tab + 2)
)
}
}
impl Term {
pub fn display_pretty(&self, tab: usize) -> impl fmt::Display + '_ {
maybe_grow(|| {
DisplayFn(move |f| match self {
Term::Lam { tag, pat, bod } => match &**pat {
Pattern::Fan(_, _, _) => {
let name = gen_fan_pat_name();
write!(
f,
"{}λ{name} let {} = {name};\n{:tab$}{}",
tag.display_padded(),
pat,
"",
bod.display_pretty(tab),
)
}
_ => write!(f, "{}λ{} {}", tag.display_padded(), pat, bod.display_pretty(tab)),
},
Term::Var { nam } => write!(f, "{nam}"),
Term::Link { nam } => write!(f, "${nam}"),
Term::Let { pat, val, nxt } => {
write!(f, "let {} = {};\n{:tab$}{}", pat, val.display_pretty(tab), "", nxt.display_pretty(tab))
}
Term::With { typ, bod } => {
writeln!(f, "with {typ} {{")?;
writeln!(f, "{:tab$}{}", "", bod.display_pretty(tab + 2), tab = tab + 2)?;
write!(f, "{:tab$}}}", "")
}
Term::Ask { pat, val, nxt } => {
write!(f, "ask {} = {};\n{:tab$}{}", pat, val.display_pretty(tab), "", nxt.display_pretty(tab))
}
Term::Use { nam, val, nxt } => {
write!(
f,
"use {} = {};\n{:tab$}{}",
var_as_str(nam),
val.display_pretty(tab),
"",
nxt.display_pretty(tab)
)
}
Term::App { tag, fun, arg } => {
write!(
f,
"{}({} {})",
tag.display_padded(),
fun.display_app_pretty(tag, tab),
arg.display_pretty(tab)
)
}
Term::Fan { fan: FanKind::Tup, tag, els } => {
write!(f, "{}({})", tag, DisplayJoin(|| els.iter().map(|e| e.display_pretty(tab)), ", "))
}
Term::Fan { fan: FanKind::Dup, tag, els } => {
write!(
f,
"{}{{{}}}",
tag.display_padded(),
DisplayJoin(|| els.iter().map(|e| e.display_pretty(tab)), " ")
)
}
Term::List { els } => {
write!(f, "[{}]", DisplayJoin(|| els.iter().map(|e| e.display_pretty(tab)), " "))
}
Term::Oper { opr, fst, snd } => {
write!(f, "({} {} {})", opr, fst.display_pretty(tab), snd.display_pretty(tab))
}
Term::Mat { bnd, arg, with_bnd, with_arg, arms } => {
write!(f, "match ")?;
if let Some(bnd) = bnd {
write!(f, "{} = ", bnd)?;
}
write!(f, "{} ", arg.display_pretty(tab))?;
if !with_bnd.is_empty() {
write!(f, "with ")?;
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
write!(f, "{} = {}, ", var_as_str(bnd), arg)?;
}
}
write!(f, "{{ ")?;
for arm in arms {
write!(f, "\n{:tab$}{}", "", var_as_str(&arm.0), tab = tab + 2)?;
for var in &arm.1 {
write!(f, " {}", var_as_str(var))?;
}
write!(f, ": {}; ", arm.2.display_pretty(tab + 4))?;
}
write!(f, "\n{:tab$}}}", "")
}
Term::Swt { bnd, arg, with_bnd, with_arg, pred, arms } => {
write!(f, "switch ")?;
if let Some(bnd) = bnd {
write!(f, "{bnd} = ")?;
}
write!(f, "{} ", arg.display_pretty(tab))?;
if !with_bnd.is_empty() {
write!(f, "with ")?;
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
write!(f, "{} = {}, ", var_as_str(bnd), arg)?;
}
}
writeln!(f, "{{")?;
for (i, arm) in arms.iter().enumerate() {
if i == arms.len() - 1 {
write!(f, "{:tab$}_", "", tab = tab + 2)?;
if let Some(pred) = pred {
write!(f, " {pred}")?;
}
} else {
write!(f, "{:tab$}{i}", "", tab = tab + 2)?;
}
writeln!(f, ": {};", arm.display_pretty(tab + 4))?;
}
write!(f, "{:tab$}}}", "")
}
Term::Fold { bnd, arg, with_bnd, with_arg, arms } => {
write!(f, "fold ")?;
if let Some(bnd) = bnd {
write!(f, "{} = ", bnd)?;
}
write!(f, "{} ", arg.display_pretty(tab))?;
if !with_bnd.is_empty() {
write!(f, "with ")?;
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
write!(f, "{} = {}, ", var_as_str(bnd), arg)?;
}
}
write!(f, "{{ ")?;
for arm in arms {
write!(f, "\n{:tab$}{}", "", var_as_str(&arm.0), tab = tab + 2)?;
for var in &arm.1 {
write!(f, " {}", var_as_str(var))?;
}
write!(f, ": {}; ", arm.2.display_pretty(tab + 4))?;
}
write!(f, "\n{:tab$}}}", "")
}
Term::Bend { bnd: bind, arg: init, cond, step, base } => {
write!(f, "bend ")?;
for (bind, init) in bind.iter().zip(init) {
if let Some(bind) = bind {
write!(f, "{} = ", bind)?;
}
write!(f, "{}, ", init)?;
}
writeln!(f, "{{")?;
writeln!(f, "{:tab$}when {}:", "", cond.display_pretty(tab + 2), tab = tab + 2)?;
writeln!(f, "{:tab$}{}", "", step.display_pretty(tab + 4), tab = tab + 4)?;
writeln!(f, "{:tab$}else:", "", tab = tab + 2)?;
writeln!(f, "{:tab$}{}", "", base.display_pretty(tab + 4), tab = tab + 4)?;
write!(f, "{:tab$}}}", "")
}
Term::Open { typ, var, bod } => {
write!(f, "open {typ} {var};\n{:tab$}{}", "", bod.display_pretty(tab))
}
Term::Nat { val } => write!(f, "#{val}"),
Term::Num { val: Num::U24(val) } => write!(f, "{val}"),
Term::Num { val: Num::I24(val) } => write!(f, "{}{}", if *val < 0 { "-" } else { "+" }, val.abs()),
Term::Num { val: Num::F24(val) } => write!(f, "{val:.3}"),
Term::Str { val } => write!(f, "{val:?}"),
Term::Ref { nam } => write!(f, "{nam}"),
Term::Def { def, nxt } => {
write!(f, "def ")?;
for (i, rule) in def.rules.iter().enumerate() {
if i == 0 {
writeln!(f, "{}", rule.display_def_aux(&def.name, tab + 4))?;
} else {
writeln!(f, "{:tab$}{}", "", rule.display_def_aux(&def.name, tab + 4), tab = tab + 4)?;
}
}
write!(f, "{:tab$}{}", "", nxt.display_pretty(tab))
}
Term::Era => write!(f, "*"),
Term::Err => write!(f, "<Error>"),
})
})
}
fn display_app_pretty<'a>(&'a self, tag: &'a Tag, tab: usize) -> impl fmt::Display + 'a {
maybe_grow(|| {
DisplayFn(move |f| match self {
Term::App { tag: tag2, fun, arg } if tag2 == tag => {
write!(f, "{} {}", fun.display_app_pretty(tag, tab), arg.display_pretty(tab))
}
_ => write!(f, "{}", self.display_pretty(tab)),
})
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/builtins.rs | src/fun/builtins.rs | use super::{
parser::{FunParser, ParseBook},
Book, Name, Num, Pattern, Term,
};
use crate::maybe_grow;
const BUILTINS: &str = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/fun/builtins.bend"));
pub const LIST: &str = "List";
pub const LCONS: &str = "List/Cons";
pub const LNIL: &str = "List/Nil";
pub const LCONS_TAG: u32 = 1;
pub const LNIL_TAG_REF: &str = "List/Nil/tag";
pub const LCONS_TAG_REF: &str = "List/Cons/tag";
pub const HEAD: &str = "head";
pub const TAIL: &str = "tail";
pub const STRING: &str = "String";
pub const SCONS: &str = "String/Cons";
pub const SNIL: &str = "String/Nil";
pub const SCONS_TAG: u32 = 1;
pub const SNIL_TAG_REF: &str = "String/Nil/tag";
pub const SCONS_TAG_REF: &str = "String/Cons/tag";
pub const NAT: &str = "Nat";
pub const NAT_SUCC: &str = "Nat/Succ";
pub const NAT_ZERO: &str = "Nat/Zero";
pub const NAT_SUCC_TAG: u32 = 0;
pub const TREE: &str = "Tree";
pub const TREE_NODE: &str = "Tree/Node";
pub const TREE_LEAF: &str = "Tree/Leaf";
pub const MAP: &str = "Map";
pub const MAP_NODE: &str = "Map/Node";
pub const MAP_LEAF: &str = "Map/Leaf";
pub const IO: &str = "IO";
pub const IO_DONE: &str = "IO/Done";
pub const IO_CALL: &str = "IO/Call";
pub const BUILTIN_CTRS: &[&str] =
&[LCONS, LNIL, SCONS, SNIL, NAT_SUCC, NAT_ZERO, TREE_NODE, TREE_LEAF, MAP_NODE, MAP_LEAF, IO_DONE, IO_CALL];
pub const BUILTIN_TYPES: &[&str] = &[LIST, STRING, NAT, TREE, MAP, IO];
impl ParseBook {
pub fn builtins() -> Self {
let book =
FunParser::new(Name::new("/src/fun/builtins.bend"), BUILTINS, true).parse_book(Self::default());
book.unwrap_or_else(|e| panic!("Error parsing builtin file, this should not happen:\n{e}"))
}
}
impl Book {
pub fn encode_builtins(&mut self) {
for def in self.defs.values_mut() {
for rule in def.rules.iter_mut() {
rule.pats.iter_mut().for_each(Pattern::encode_builtins);
rule.body.encode_builtins();
}
}
}
}
impl Term {
fn encode_builtins(&mut self) {
maybe_grow(|| match self {
Term::List { els } => *self = Term::encode_list(std::mem::take(els)),
Term::Str { val } => *self = Term::encode_str(val),
Term::Nat { val } => *self = Term::encode_nat(*val),
Term::Def { def, nxt } => {
for rule in def.rules.iter_mut() {
rule.pats.iter_mut().for_each(Pattern::encode_builtins);
rule.body.encode_builtins();
}
nxt.encode_builtins();
}
_ => {
for child in self.children_mut() {
child.encode_builtins();
}
}
})
}
fn encode_list(elements: Vec<Term>) -> Term {
elements.into_iter().rfold(Term::r#ref(LNIL), |acc, mut nxt| {
nxt.encode_builtins();
Term::call(Term::r#ref(LCONS), [nxt, acc])
})
}
pub fn encode_str(val: &str) -> Term {
val.chars().rfold(Term::r#ref(SNIL), |acc, char| {
Term::call(Term::r#ref(SCONS), [Term::Num { val: Num::U24(char as u32 & 0x00ff_ffff) }, acc])
})
}
pub fn encode_nat(val: u32) -> Term {
(0..val).fold(Term::r#ref(NAT_ZERO), |acc, _| Term::app(Term::r#ref(NAT_SUCC), acc))
}
}
impl Pattern {
pub fn encode_builtins(&mut self) {
match self {
Pattern::Lst(pats) => *self = Self::encode_list(std::mem::take(pats)),
Pattern::Str(str) => *self = Self::encode_str(str),
_ => {
for pat in self.children_mut() {
pat.encode_builtins();
}
}
}
}
fn encode_list(elements: Vec<Pattern>) -> Pattern {
let lnil = Pattern::Ctr(Name::new(LNIL), vec![]);
elements.into_iter().rfold(lnil, |acc, mut nxt| {
nxt.encode_builtins();
Pattern::Ctr(Name::new(LCONS), vec![nxt, acc])
})
}
fn encode_str(str: &str) -> Pattern {
let lnil = Pattern::Ctr(Name::new(SNIL), vec![]);
str.chars().rfold(lnil, |tail, head| {
let head = Pattern::Num(head as u32);
Pattern::Ctr(Name::new(SCONS), vec![head, tail])
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/mod.rs | src/fun/mod.rs | use crate::{
diagnostics::{Diagnostics, DiagnosticsConfig, TextSpan},
imports::Import,
maybe_grow, multi_iterator, ENTRY_POINT,
};
use indexmap::{IndexMap, IndexSet};
use interner::global::{GlobalPool, GlobalString};
use itertools::Itertools;
use std::{
borrow::Cow,
hash::Hash,
ops::{Deref, Range},
};
pub mod builtins;
pub mod check;
pub mod display;
pub mod load_book;
pub mod net_to_term;
pub mod parser;
pub mod term_to_net;
pub mod transform;
pub use net_to_term::{net_to_term, ReadbackError};
pub use term_to_net::{book_to_hvm, term_to_hvm};
pub static STRINGS: GlobalPool<String> = GlobalPool::new();
#[derive(Debug)]
pub struct Ctx<'book> {
pub book: &'book mut Book,
pub info: Diagnostics,
}
impl Ctx<'_> {
pub fn new(book: &mut Book, diagnostics_cfg: DiagnosticsConfig) -> Ctx {
Ctx { book, info: Diagnostics::new(diagnostics_cfg) }
}
}
/// The representation of a program.
#[derive(Debug, Clone, Default)]
pub struct Book {
/// Function definitions.
pub defs: Definitions,
/// HVM native function definitions.
pub hvm_defs: HvmDefinitions,
/// Algebraic datatype definitions.
pub adts: Adts,
/// Map of constructor name to type name.
pub ctrs: Constructors,
/// A custom or default "main" entrypoint.
pub entrypoint: Option<Name>,
/// Imports declared in the program.
pub imports: Vec<Import>,
}
pub type Definitions = IndexMap<Name, Definition>;
pub type HvmDefinitions = IndexMap<Name, HvmDefinition>;
pub type Adts = IndexMap<Name, Adt>;
pub type Constructors = IndexMap<Name, Name>;
/// A pattern matching function definition.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Definition {
pub name: Name,
pub typ: Type,
pub check: bool,
pub rules: Vec<Rule>,
pub source: Source,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Source {
pub file: Option<String>,
pub span: Option<TextSpan>,
pub kind: SourceKind,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum SourceKind {
/// Built into the language.
Builtin,
/// Was generated by the compiler.
Generated,
/// Source code from a local book.
User,
/// Source code from an imported book.
Imported,
/// Unknown by the compiler at this stage.
Unknown,
}
/// An HVM native definition.
#[derive(Debug, Clone)]
pub struct HvmDefinition {
pub name: Name,
pub typ: Type,
pub body: hvm::ast::Net,
pub source: Source,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Type {
Any,
Hole,
Var(Name),
Ctr(Name, Vec<Type>),
Arr(Box<Type>, Box<Type>),
Tup(Vec<Type>),
U24,
F24,
I24,
None,
Number(Box<Type>),
Integer(Box<Type>),
}
/// A pattern matching rule of a definition.
#[derive(Debug, Clone, Default, PartialEq, Eq, Hash)]
pub struct Rule {
pub pats: Vec<Pattern>,
pub body: Term,
}
#[derive(Debug, Default, PartialEq, Eq, Hash)]
pub enum Term {
Lam {
tag: Tag,
pat: Box<Pattern>,
bod: Box<Term>,
},
Var {
nam: Name,
},
Link {
nam: Name,
},
Let {
pat: Box<Pattern>,
val: Box<Term>,
nxt: Box<Term>,
},
With {
typ: Name,
bod: Box<Term>,
},
Ask {
pat: Box<Pattern>,
val: Box<Term>,
nxt: Box<Term>,
},
Use {
nam: Option<Name>,
val: Box<Term>,
nxt: Box<Term>,
},
App {
tag: Tag,
fun: Box<Term>,
arg: Box<Term>,
},
/// Either a tuple or a superposition
Fan {
fan: FanKind,
tag: Tag,
els: Vec<Term>,
},
Num {
val: Num,
},
Nat {
val: u32,
},
Str {
val: GlobalString,
},
List {
els: Vec<Term>,
},
/// A numeric operation between built-in numbers.
Oper {
opr: Op,
fst: Box<Term>,
snd: Box<Term>,
},
/// Pattern matching on an ADT.
Mat {
bnd: Option<Name>,
arg: Box<Term>,
with_bnd: Vec<Option<Name>>,
with_arg: Vec<Term>,
arms: Vec<MatchRule>,
},
/// Native pattern matching on numbers
Swt {
bnd: Option<Name>,
arg: Box<Term>,
with_bnd: Vec<Option<Name>>,
with_arg: Vec<Term>,
pred: Option<Name>,
arms: Vec<Term>,
},
Fold {
bnd: Option<Name>,
arg: Box<Term>,
with_bnd: Vec<Option<Name>>,
with_arg: Vec<Term>,
arms: Vec<MatchRule>,
},
Bend {
bnd: Vec<Option<Name>>,
arg: Vec<Term>,
cond: Box<Term>,
step: Box<Term>,
base: Box<Term>,
},
Open {
typ: Name,
var: Name,
bod: Box<Term>,
},
Ref {
nam: Name,
},
Def {
def: Definition,
nxt: Box<Term>,
},
Era,
#[default]
Err,
}
pub type MatchRule = (Option<Name>, Vec<Option<Name>>, Term);
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FanKind {
Tup,
Dup,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Op {
ADD,
SUB,
MUL,
DIV,
REM,
EQ,
NEQ,
LT,
GT,
AND,
OR,
XOR,
SHL,
SHR,
// a^b
POW,
/// Less than or equal
LE,
/// Greater than or equal
GE,
}
#[derive(Debug, Clone, Copy)]
pub enum Num {
U24(u32),
I24(i32),
F24(f32),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Pattern {
Var(Option<Name>),
Chn(Name),
Ctr(Name, Vec<Pattern>),
Num(u32),
/// Either a tuple or a duplication
Fan(FanKind, Tag, Vec<Pattern>),
Lst(Vec<Pattern>),
Str(GlobalString),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, Default)]
pub enum Tag {
Named(Name),
Numeric(u16),
Auto,
#[default]
Static,
}
/// A user defined datatype
#[derive(Debug, Clone)]
pub struct Adt {
pub name: Name,
pub vars: Vec<Name>,
pub ctrs: IndexMap<Name, AdtCtr>,
pub source: Source,
}
#[derive(Debug, Clone)]
pub struct AdtCtr {
pub name: Name,
pub typ: Type,
pub fields: Vec<CtrField>,
}
#[derive(Debug, Clone)]
pub struct CtrField {
pub nam: Name,
pub rec: bool,
pub typ: Type,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct Name(GlobalString);
/* Implementations */
impl PartialEq<str> for Name {
fn eq(&self, other: &str) -> bool {
&**self == other
}
}
impl PartialEq<&str> for Name {
fn eq(&self, other: &&str) -> bool {
self == *other
}
}
impl PartialEq<Option<Name>> for Name {
fn eq(&self, other: &Option<Name>) -> bool {
if let Some(other) = other.as_ref() {
self == other
} else {
false
}
}
}
impl PartialEq<Name> for Option<Name> {
fn eq(&self, other: &Name) -> bool {
other.eq(self)
}
}
impl PartialEq<Option<&Name>> for Name {
fn eq(&self, other: &Option<&Name>) -> bool {
if let Some(other) = other {
&self == other
} else {
false
}
}
}
impl PartialEq<Name> for Option<&Name> {
fn eq(&self, other: &Name) -> bool {
other.eq(self)
}
}
pub fn num_to_name(mut num: u64) -> String {
let mut name = String::new();
loop {
let c = (num % 26) as u8 + b'a';
name.push(c as char);
num /= 26;
if num == 0 {
break;
}
}
name
}
impl Tag {
pub fn adt_name(name: &Name) -> Self {
Self::Named(name.clone())
}
}
impl Clone for Term {
fn clone(&self) -> Self {
maybe_grow(|| match self {
Self::Lam { tag, pat, bod } => Self::Lam { tag: tag.clone(), pat: pat.clone(), bod: bod.clone() },
Self::Var { nam } => Self::Var { nam: nam.clone() },
Self::Link { nam } => Self::Link { nam: nam.clone() },
Self::Let { pat, val, nxt } => Self::Let { pat: pat.clone(), val: val.clone(), nxt: nxt.clone() },
Self::With { typ, bod } => Self::With { typ: typ.clone(), bod: bod.clone() },
Self::Ask { pat, val, nxt } => Self::Ask { pat: pat.clone(), val: val.clone(), nxt: nxt.clone() },
Self::Use { nam, val, nxt } => Self::Use { nam: nam.clone(), val: val.clone(), nxt: nxt.clone() },
Self::App { tag, fun, arg } => Self::App { tag: tag.clone(), fun: fun.clone(), arg: arg.clone() },
Self::Fan { fan, tag, els } => Self::Fan { fan: *fan, tag: tag.clone(), els: els.clone() },
Self::Num { val } => Self::Num { val: *val },
Self::Nat { val } => Self::Nat { val: *val },
Self::Str { val } => Self::Str { val: val.clone() },
Self::List { els } => Self::List { els: els.clone() },
Self::Oper { opr, fst, snd } => Self::Oper { opr: *opr, fst: fst.clone(), snd: snd.clone() },
Self::Mat { arg, bnd, with_bnd, with_arg, arms } => Self::Mat {
arg: arg.clone(),
bnd: bnd.clone(),
with_bnd: with_bnd.clone(),
with_arg: with_arg.clone(),
arms: arms.clone(),
},
Self::Swt { arg, bnd, with_bnd, with_arg, pred, arms } => Self::Swt {
arg: arg.clone(),
bnd: bnd.clone(),
with_bnd: with_bnd.clone(),
with_arg: with_arg.clone(),
pred: pred.clone(),
arms: arms.clone(),
},
Self::Fold { bnd, arg, with_bnd, with_arg, arms } => Self::Fold {
bnd: bnd.clone(),
arg: arg.clone(),
with_bnd: with_bnd.clone(),
with_arg: with_arg.clone(),
arms: arms.clone(),
},
Self::Bend { bnd: bind, arg: init, cond, step, base } => Self::Bend {
bnd: bind.clone(),
arg: init.clone(),
cond: cond.clone(),
step: step.clone(),
base: base.clone(),
},
Self::Open { typ, var, bod: nxt } => {
Self::Open { typ: typ.clone(), var: var.clone(), bod: nxt.clone() }
}
Self::Ref { nam } => Self::Ref { nam: nam.clone() },
Self::Def { def, nxt } => Self::Def { def: def.clone(), nxt: nxt.clone() },
Self::Era => Self::Era,
Self::Err => Self::Err,
})
}
}
impl Drop for Term {
fn drop(&mut self) {
loop {
// Each iteration moves a child with nested nodes to the last child.
// When no nested on the left, we can just drop it and they'll be handled
// by the special cases;
let mut i = self.children_mut().filter(|x| x.children().next().is_some());
// No nested children, just drop everything
let Some(b) = i.next() else { break };
// Only one child with nested nodes, move it up to be the new root.
// Non-nested (height=0) children are dropped recursively.
if { i }.next().is_none() {
*self = std::mem::take(b);
continue;
}
// Rotate the tree right:
// ```text
// a b
// / \ / \
// b e -> c a
// / \ / \
// c d d e
// ```
let tmp = Term::Err;
let d = std::mem::replace(b.children_mut().next_back().unwrap(), tmp);
let b = std::mem::replace(b, d);
let a = std::mem::replace(self, b);
let tmp = std::mem::replace(self.children_mut().next_back().unwrap(), a);
std::mem::forget(tmp);
}
}
}
impl From<Option<Name>> for Pattern {
fn from(value: Option<Name>) -> Self {
Pattern::Var(value)
}
}
impl Term {
/* Common construction patterns */
/// Lambda with a static tag
pub fn lam(pat: Pattern, bod: Term) -> Self {
Self::tagged_lam(Tag::Static, pat, bod)
}
/// Lambda with any tag
pub fn tagged_lam(tag: Tag, pat: Pattern, bod: Term) -> Self {
Term::Lam { tag, pat: Box::new(pat), bod: Box::new(bod) }
}
/// Wraps a term in lambdas, so that the outermost lambda is the first given element.
///
/// The lambda equivalent of [`Term::call`].
pub fn rfold_lams(term: Term, pats: impl DoubleEndedIterator<Item = Option<Name>>) -> Self {
pats.into_iter().rfold(term, |bod, nam| Self::lam(Pattern::Var(nam), bod))
}
pub fn var_or_era(nam: Option<Name>) -> Self {
if let Some(nam) = nam {
Term::Var { nam }
} else {
Term::Era
}
}
pub fn app(fun: Term, arg: Term) -> Self {
Self::tagged_app(Tag::Static, fun, arg)
}
pub fn tagged_app(tag: Tag, fun: Term, arg: Term) -> Self {
Term::App { tag, fun: Box::new(fun), arg: Box::new(arg) }
}
/// Make a call term by folding args around a called function term with applications.
pub fn call(called: Term, args: impl IntoIterator<Item = Term>) -> Self {
args.into_iter().fold(called, Term::app)
}
pub fn tagged_call(tag: Tag, called: Term, args: impl IntoIterator<Item = Term>) -> Self {
args.into_iter().fold(called, |acc, arg| Term::tagged_app(tag.clone(), acc, arg))
}
/// Apply a variable to a term by the var name.
pub fn arg_call(fun: Term, arg: Name) -> Self {
Term::app(fun, Term::Var { nam: arg })
}
pub fn r#ref(name: &str) -> Self {
Term::Ref { nam: Name::new(name) }
}
pub fn str(str: &str) -> Self {
Term::Str { val: STRINGS.get(str) }
}
pub fn sub_num(arg: Term, val: Num) -> Term {
if val.is_zero() {
arg
} else {
Term::Oper { opr: Op::SUB, fst: Box::new(arg), snd: Box::new(Term::Num { val }) }
}
}
pub fn add_num(arg: Term, val: Num) -> Term {
if val.is_zero() {
arg
} else {
Term::Oper { opr: Op::ADD, fst: Box::new(arg), snd: Box::new(Term::Num { val }) }
}
}
pub fn pattern(&self) -> Option<&Pattern> {
match self {
Term::Lam { pat, .. } | Term::Let { pat, .. } => Some(pat),
_ => None,
}
}
pub fn pattern_mut(&mut self) -> Option<&mut Pattern> {
match self {
Term::Lam { pat, .. } | Term::Let { pat, .. } => Some(pat),
_ => None,
}
}
/* Iterators */
pub fn children(&self) -> impl DoubleEndedIterator<Item = &Term> + Clone {
multi_iterator!(ChildrenIter { Zero, One, Two, Vec, Mat, Swt, Bend, Fold });
match self {
Term::Mat { arg, bnd: _, with_bnd: _, with_arg, arms } => {
ChildrenIter::Mat([arg.as_ref()].into_iter().chain(with_arg.iter()).chain(arms.iter().map(|r| &r.2)))
}
Term::Swt { arg, bnd: _, with_bnd: _, with_arg, pred: _, arms } => {
ChildrenIter::Swt([arg.as_ref()].into_iter().chain(with_arg.iter()).chain(arms))
}
Term::Bend { bnd: _, arg: init, cond, step, base } => {
ChildrenIter::Bend(init.iter().chain([cond.as_ref(), step.as_ref(), base.as_ref()]))
}
Term::Fold { bnd: _, arg, with_bnd: _, with_arg, arms } => {
ChildrenIter::Fold([arg.as_ref()].into_iter().chain(with_arg.iter()).chain(arms.iter().map(|r| &r.2)))
}
Term::Fan { els, .. } | Term::List { els } => ChildrenIter::Vec(els),
Term::Let { val: fst, nxt: snd, .. }
| Term::Ask { val: fst, nxt: snd, .. }
| Term::Use { val: fst, nxt: snd, .. }
| Term::App { fun: fst, arg: snd, .. }
| Term::Oper { fst, snd, .. } => ChildrenIter::Two([fst.as_ref(), snd.as_ref()]),
Term::Lam { bod, .. } | Term::With { bod, .. } | Term::Open { bod, .. } => {
ChildrenIter::One([bod.as_ref()])
}
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Def { .. }
| Term::Era
| Term::Err => ChildrenIter::Zero([]),
}
}
pub fn children_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Term> {
multi_iterator!(ChildrenIter { Zero, One, Two, Vec, Mat, Swt, Bend, Fold });
match self {
Term::Mat { arg, bnd: _, with_bnd: _, with_arg, arms } => ChildrenIter::Mat(
[arg.as_mut()].into_iter().chain(with_arg.iter_mut()).chain(arms.iter_mut().map(|r| &mut r.2)),
),
Term::Swt { arg, bnd: _, with_bnd: _, with_arg, pred: _, arms } => {
ChildrenIter::Swt([arg.as_mut()].into_iter().chain(with_arg.iter_mut()).chain(arms))
}
Term::Bend { bnd: _, arg: init, cond, step, base } => {
ChildrenIter::Bend(init.iter_mut().chain([cond.as_mut(), step.as_mut(), base.as_mut()]))
}
Term::Fold { bnd: _, arg, with_bnd: _, with_arg, arms } => ChildrenIter::Fold(
[arg.as_mut()].into_iter().chain(with_arg.iter_mut()).chain(arms.iter_mut().map(|r| &mut r.2)),
),
Term::Fan { els, .. } | Term::List { els } => ChildrenIter::Vec(els),
Term::Let { val: fst, nxt: snd, .. }
| Term::Ask { val: fst, nxt: snd, .. }
| Term::Use { val: fst, nxt: snd, .. }
| Term::App { fun: fst, arg: snd, .. }
| Term::Oper { fst, snd, .. } => ChildrenIter::Two([fst.as_mut(), snd.as_mut()]),
Term::Lam { bod, .. } | Term::With { bod, .. } | Term::Open { bod, .. } => {
ChildrenIter::One([bod.as_mut()])
}
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Def { .. }
| Term::Era
| Term::Err => ChildrenIter::Zero([]),
}
}
/// An iterator over the subterms with an iterator over the binds
/// introduced by the current term for each subterm.
///
/// Must only be called after fix_matches.
///
/// Example: A lambda introduces 1 bind for it's only subterm,
/// while a let expression introduces 0 binds for the value and
/// many binds for the next term.
pub fn children_with_binds(
&self,
) -> impl DoubleEndedIterator<Item = (&Term, impl DoubleEndedIterator<Item = &Option<Name>> + Clone)> + Clone
{
multi_iterator!(ChildrenIter { Zero, One, Two, Vec, Mat, Swt, Bend });
multi_iterator!(BindsIter { Zero, One, Mat, Pat, SwtNum, SwtSucc, Bend });
match self {
Term::Mat { arg, bnd, with_bnd, with_arg, arms }
| Term::Fold { bnd, arg, with_bnd, with_arg, arms } => {
let arg = [(arg.as_ref(), BindsIter::Zero([]))].into_iter();
let with_arg = with_arg.iter().map(|a| (a, BindsIter::Zero([])));
let arms = arms
.iter()
.map(move |r| (&r.2, BindsIter::Mat([bnd].into_iter().chain(r.1.iter()).chain(with_bnd.iter()))));
ChildrenIter::Mat(arg.chain(with_arg).chain(arms))
}
Term::Swt { arg, bnd, with_bnd, with_arg, pred, arms } => {
let (succ, nums) = arms.split_last().unwrap();
ChildrenIter::Swt(
[(arg.as_ref(), BindsIter::Zero([]))]
.into_iter()
.chain(with_arg.iter().map(|a| (a, BindsIter::Zero([]))))
.chain(nums.iter().map(move |x| (x, BindsIter::SwtNum([bnd].into_iter().chain(with_bnd.iter())))))
.chain([(succ, BindsIter::SwtSucc([bnd, pred].into_iter().chain(with_bnd.iter())))]),
)
}
Term::Bend { bnd: bind, arg: init, cond, step, base } => {
ChildrenIter::Bend(init.iter().map(|x| (x, BindsIter::Zero([]))).chain([
(cond.as_ref(), BindsIter::Bend(bind.iter())),
(step.as_ref(), BindsIter::Bend(bind.iter())),
(base.as_ref(), BindsIter::Bend(bind.iter())),
]))
}
Term::Fan { els, .. } | Term::List { els } => {
ChildrenIter::Vec(els.iter().map(|el| (el, BindsIter::Zero([]))))
}
Term::Let { pat, val, nxt, .. } | Term::Ask { pat, val, nxt, .. } => {
ChildrenIter::Two([(val.as_ref(), BindsIter::Zero([])), (nxt.as_ref(), BindsIter::Pat(pat.binds()))])
}
Term::Use { nam, val, nxt, .. } => {
ChildrenIter::Two([(val.as_ref(), BindsIter::Zero([])), (nxt.as_ref(), BindsIter::One([nam]))])
}
Term::App { fun: fst, arg: snd, .. } | Term::Oper { fst, snd, .. } => {
ChildrenIter::Two([(fst.as_ref(), BindsIter::Zero([])), (snd.as_ref(), BindsIter::Zero([]))])
}
Term::Lam { pat, bod, .. } => ChildrenIter::One([(bod.as_ref(), BindsIter::Pat(pat.binds()))]),
Term::With { bod, .. } => ChildrenIter::One([(bod.as_ref(), BindsIter::Zero([]))]),
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Def { .. }
| Term::Era
| Term::Err => ChildrenIter::Zero([]),
Term::Open { .. } => unreachable!("Open should be removed in earlier pass"),
}
}
/// Must only be called after fix_matches.
pub fn children_mut_with_binds(
&mut self,
) -> impl DoubleEndedIterator<Item = (&mut Term, impl DoubleEndedIterator<Item = &Option<Name>> + Clone)>
{
multi_iterator!(ChildrenIter { Zero, One, Two, Vec, Mat, Swt, Bend });
multi_iterator!(BindsIter { Zero, One, Mat, SwtNum, SwtSucc, Pat, Bend });
match self {
Term::Mat { arg, bnd, with_bnd, with_arg, arms }
| Term::Fold { bnd, arg, with_bnd, with_arg, arms } => {
let arg = [(arg.as_mut(), BindsIter::Zero([]))].into_iter();
let with_arg = with_arg.iter_mut().map(|a| (a, BindsIter::Zero([])));
let arms = arms
.iter_mut()
.map(|r| (&mut r.2, BindsIter::Mat([&*bnd].into_iter().chain(r.1.iter()).chain(with_bnd.iter()))));
ChildrenIter::Mat(arg.chain(with_arg).chain(arms))
}
Term::Swt { arg, bnd, with_bnd, with_arg, pred, arms } => {
let (succ, nums) = arms.split_last_mut().unwrap();
ChildrenIter::Swt(
[(arg.as_mut(), BindsIter::Zero([]))]
.into_iter()
.chain(with_arg.iter_mut().map(|a| (a, BindsIter::Zero([]))))
.chain(
nums.iter_mut().map(|x| (x, BindsIter::SwtNum([&*bnd].into_iter().chain(with_bnd.iter())))),
)
.chain([(succ, BindsIter::SwtSucc([&*bnd, &*pred].into_iter().chain(with_bnd.iter())))]),
)
}
Term::Bend { bnd, arg, cond, step, base } => {
ChildrenIter::Bend(arg.iter_mut().map(|x| (x, BindsIter::Zero([]))).chain([
(cond.as_mut(), BindsIter::Bend(bnd.iter())),
(step.as_mut(), BindsIter::Bend(bnd.iter())),
(base.as_mut(), BindsIter::Bend(bnd.iter())),
]))
}
Term::Fan { els, .. } | Term::List { els } => {
ChildrenIter::Vec(els.iter_mut().map(|el| (el, BindsIter::Zero([]))))
}
Term::Let { pat, val, nxt, .. } | Term::Ask { pat, val, nxt, .. } => {
ChildrenIter::Two([(val.as_mut(), BindsIter::Zero([])), (nxt.as_mut(), BindsIter::Pat(pat.binds()))])
}
Term::Use { nam, val, nxt } => {
ChildrenIter::Two([(val.as_mut(), BindsIter::Zero([])), (nxt.as_mut(), BindsIter::One([&*nam]))])
}
Term::App { fun: fst, arg: snd, .. } | Term::Oper { fst, snd, .. } => {
ChildrenIter::Two([(fst.as_mut(), BindsIter::Zero([])), (snd.as_mut(), BindsIter::Zero([]))])
}
Term::Lam { pat, bod, .. } => ChildrenIter::One([(bod.as_mut(), BindsIter::Pat(pat.binds()))]),
Term::With { bod, .. } => ChildrenIter::One([(bod.as_mut(), BindsIter::Zero([]))]),
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Def { .. }
| Term::Era
| Term::Err => ChildrenIter::Zero([]),
Term::Open { .. } => unreachable!("Open should be removed in earlier pass"),
}
}
/* Common checks and transformations */
/// Substitute the occurrences of a variable in a term with the given term.
///
/// Caution: can cause invalid shadowing of variables if used incorrectly.
/// Ex: Using subst to beta-reduce `(@a @b a b)` converting it into `@b b`.
///
/// NOTE: Expects var bind information to be properly stored in match expressions,
/// so it must run AFTER `fix_match_terms`.
///
/// NOTE: Since it doesn't (can't) handle `with` clauses in match terms,
/// it must be run only AFTER `with` linearization.
pub fn subst(&mut self, from: &Name, to: &Term) {
maybe_grow(|| {
for (child, binds) in self.children_mut_with_binds() {
if !binds.flat_map(|b| b.as_ref()).contains(from) {
child.subst(from, to);
}
}
});
if let Term::Var { nam } = self {
if nam == from {
*self = to.clone();
}
}
}
/// Substitute the occurrences of a constructor name with the given name.
pub fn subst_ctrs(&mut self, from: &Name, to: &Name) {
maybe_grow(|| {
for child in self.children_mut() {
child.subst_ctrs(from, to);
}
});
match self {
Term::Fold { arms, .. } | Term::Mat { arms, .. } => {
for (arm, _, _) in arms {
if let Some(nam) = arm {
if nam == from {
*nam = to.clone();
}
}
}
}
Term::Open { typ, .. } => {
if typ == from {
*typ = to.clone();
}
}
_ => (),
}
}
/// Substitutes the occurrences of a type constructor in the term with the given name.
pub fn subst_type_ctrs(&mut self, from: &Name, to: &Name) {
maybe_grow(|| {
match self {
Term::Def { def, nxt: _ } => {
def.typ.subst_ctr(from, to);
}
Term::With { typ, bod: _ } => {
if typ == from {
*typ = to.clone();
}
}
_ => (),
}
for child in self.children_mut() {
child.subst_type_ctrs(from, to);
}
});
}
/// Substitute the occurrence of an unscoped variable with the given term.
pub fn subst_unscoped(&mut self, from: &Name, to: &Term) {
maybe_grow(|| {
// We don't check the unscoped binds because there can be only one bind of an unscoped var.
// TODO: potentially there could be some situation where this causes an incorrect program to compile?
for child in self.children_mut() {
child.subst_unscoped(from, to);
}
});
if let Term::Link { nam } = self {
if nam == from {
*self = to.clone();
}
}
}
/// Collects all the free variables that a term has
/// and the number of times each var is used
pub fn free_vars(&self) -> IndexMap<Name, u64> {
fn go_term(term: &Term, free_vars: &mut IndexMap<Name, u64>) {
maybe_grow(|| {
if let Term::Var { nam } = term {
*free_vars.entry(nam.clone()).or_default() += 1;
}
for (child, binds) in term.children_with_binds() {
let mut new_scope = Default::default();
go_term(child, &mut new_scope);
for nam in binds.flatten() {
new_scope.shift_remove(nam);
}
free_vars.extend(new_scope);
}
})
}
let mut free_vars = Default::default();
go_term(self, &mut free_vars);
free_vars
}
/// Returns the set of declared and the set of used unscoped variables
pub fn unscoped_vars(&self) -> (IndexSet<Name>, IndexSet<Name>) {
fn go_pat(pat: &Pattern, decls: &mut IndexSet<Name>) {
maybe_grow(|| {
if let Pattern::Chn(name) = pat {
decls.insert(name.clone());
}
for child in pat.children() {
go_pat(child, decls);
}
})
}
fn go_term(term: &Term, decls: &mut IndexSet<Name>, uses: &mut IndexSet<Name>) {
maybe_grow(|| {
if let Term::Link { nam } = term {
uses.insert(nam.clone());
}
if let Some(pat) = term.pattern() {
go_pat(pat, decls)
}
for child in term.children() {
go_term(child, decls, uses);
}
})
}
let mut decls = Default::default();
let mut uses = Default::default();
go_term(self, &mut decls, &mut uses);
(decls, uses)
}
pub fn has_unscoped(&self) -> bool {
maybe_grow(|| {
let mut has_unscoped = match self {
Term::Let { pat, .. } if pat.has_unscoped() => true,
Term::Link { .. } => true,
_ => false,
};
for child in self.children() {
if has_unscoped {
return true;
}
has_unscoped |= child.has_unscoped()
}
has_unscoped
})
}
}
impl Num {
pub fn is_zero(&self) -> bool {
match self {
Num::U24(val) => *val == 0,
Num::I24(val) => *val == 0,
Num::F24(val) => *val == 0.0,
}
}
pub fn to_bits(&self) -> u32 {
match self {
Num::U24(val) => hvm::hvm::Numb::new_u24(*val).0,
Num::I24(val) => hvm::hvm::Numb::new_i24(*val).0,
Num::F24(val) => hvm::hvm::Numb::new_f24(*val).0,
}
}
pub fn from_bits(bits: u32) -> Self {
match hvm::hvm::Numb::get_typ(&hvm::hvm::Numb(bits)) {
hvm::hvm::TY_U24 => Num::U24(hvm::hvm::Numb::get_u24(&hvm::hvm::Numb(bits))),
hvm::hvm::TY_I24 => Num::I24(hvm::hvm::Numb::get_i24(&hvm::hvm::Numb(bits))),
hvm::hvm::TY_F24 => Num::F24(hvm::hvm::Numb::get_f24(&hvm::hvm::Numb(bits))),
_ => unreachable!("Invalid Num bits"),
}
}
}
impl Hash for Num {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.to_bits().hash(state);
}
}
impl PartialEq for Num {
fn eq(&self, other: &Self) -> bool {
self.to_bits() == other.to_bits()
}
}
impl Eq for Num {}
impl Pattern {
pub fn binds(&self) -> impl DoubleEndedIterator<Item = &Option<Name>> + Clone {
self.iter().filter_map(|pat| match pat {
Pattern::Var(nam) => Some(nam),
_ => None,
})
}
pub fn binds_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Option<Name>> {
// Can't have a Pattern::iter_mut() since it has a tree-like structure.
let mut binds = vec![];
let mut to_visit = vec![self];
while let Some(pat) = to_visit.pop() {
match pat {
Pattern::Var(nam) => binds.push(nam),
_ => to_visit.extend(pat.children_mut().rev()),
}
}
binds.into_iter()
}
/// Returns an iterator over each immediate child sub-pattern of `self`.
/// Considers Lists as its own pattern and not a sequence of Cons.
pub fn children(&self) -> impl DoubleEndedIterator<Item = &Pattern> + Clone {
multi_iterator!(ChildrenIter { Zero, Vec });
match self {
Pattern::Ctr(_, els) | Pattern::Fan(.., els) | Pattern::Lst(els) => ChildrenIter::Vec(els.iter()),
Pattern::Var(_) | Pattern::Chn(_) | Pattern::Num(_) | Pattern::Str(_) => ChildrenIter::Zero([]),
}
}
pub fn children_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Pattern> {
multi_iterator!(ChildrenIter { Zero, Vec });
match self {
Pattern::Ctr(_, els) | Pattern::Fan(.., els) | Pattern::Lst(els) => ChildrenIter::Vec(els.iter_mut()),
Pattern::Var(_) | Pattern::Chn(_) | Pattern::Num(_) | Pattern::Str(_) => ChildrenIter::Zero([]),
}
}
/// Returns an iterator over each subpattern in depth-first, left to right order.
// TODO: Not lazy.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = &Pattern> + Clone {
let mut to_visit = vec![self];
let mut els = vec![];
while let Some(pat) = to_visit.pop() {
els.push(pat);
to_visit.extend(pat.children().rev());
}
els.into_iter()
}
pub fn is_wildcard(&self) -> bool {
matches!(self, Pattern::Var(_) | Pattern::Chn(_))
}
pub fn to_term(&self) -> Term {
match self {
Pattern::Var(nam) => Term::var_or_era(nam.clone()),
Pattern::Chn(nam) => Term::Link { nam: nam.clone() },
Pattern::Ctr(ctr, args) => {
Term::call(Term::Ref { nam: ctr.clone() }, args.iter().map(|arg| arg.to_term()))
}
Pattern::Num(val) => Term::Num { val: Num::U24(*val) },
Pattern::Fan(fan, tag, args) => {
Term::Fan { fan: *fan, tag: tag.clone(), els: args.iter().map(|p| p.to_term()).collect() }
}
Pattern::Lst(_) | Pattern::Str(_) => todo!(),
}
}
pub fn has_unscoped(&self) -> bool {
match self {
Pattern::Chn(_) => true,
Pattern::Var(_) | Pattern::Str(_) | Pattern::Num(_) => false,
Pattern::Ctr(_, x) | Pattern::Fan(_, _, x) | Pattern::Lst(x) => x.iter().any(|x| x.has_unscoped()),
}
}
pub fn has_nested(&self) -> bool {
for child in self.children() {
if matches!(child, Pattern::Ctr(_, _) | Pattern::Fan(_, _, _) | Pattern::Lst(_)) {
return true;
}
}
false
}
}
impl Rule {
pub fn arity(&self) -> usize {
self.pats.len()
}
}
impl Definition {
pub fn new_gen(name: Name, rules: Vec<Rule>, source: Source, check: bool) -> Self {
let kind = if source.is_builtin() { SourceKind::Builtin } else { SourceKind::Generated };
let source = Source { kind, ..source };
Self { name, typ: Type::Hole, check, rules, source }
}
pub fn is_builtin(&self) -> bool {
self.source.is_builtin()
}
pub fn arity(&self) -> usize {
self.rules[0].arity()
}
#[track_caller]
pub fn assert_no_pattern_matching_rules(&self) {
assert!(self.rules.len() == 1, "Definition rules should have been removed in earlier pass");
assert!(self.rules[0].pats.is_empty(), "Definition args should have been removed in an earlier pass");
}
#[track_caller]
pub fn rule(&self) -> &Rule {
self.assert_no_pattern_matching_rules();
&self.rules[0]
}
#[track_caller]
pub fn rule_mut(&mut self) -> &mut Rule {
self.assert_no_pattern_matching_rules();
&mut self.rules[0]
}
}
impl Type {
/// Substitutes the occurrences of a type constructor with the given name.
/// Substitutes both `Var` and `Ctr` types since `Var` could be referring to
/// an unresolved type constructor.
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | true |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/load_book.rs | src/fun/load_book.rs | use super::{
parser::{FunParser, ParseBook},
Book, Name, Source, SourceKind,
};
use crate::{
diagnostics::{Diagnostics, DiagnosticsConfig, TextSpan},
imports::PackageLoader,
};
use std::path::Path;
// TODO: Refactor so that we don't mix the two syntaxes here.
/// Reads a file and parses to a definition book.
pub fn load_file_to_book(
path: &Path,
package_loader: impl PackageLoader,
diag: DiagnosticsConfig,
) -> Result<Book, Diagnostics> {
match path.try_exists() {
Ok(exists) => {
if !exists {
return Err(format!("The file '{}' was not found.", path.display()).into());
}
let code = std::fs::read_to_string(path).map_err(|e| e.to_string())?;
load_to_book(path, &code, package_loader, diag)
}
Err(e) => Err(e.to_string().into()),
}
}
pub fn load_to_book(
origin: &Path,
code: &str,
package_loader: impl PackageLoader,
diag: DiagnosticsConfig,
) -> Result<Book, Diagnostics> {
let builtins = ParseBook::builtins();
let book = do_parse_book(code, origin, builtins)?;
book.load_imports(package_loader, diag)
}
pub fn do_parse_book(code: &str, origin: &Path, mut book: ParseBook) -> Result<ParseBook, Diagnostics> {
book.source = Name::new(origin.to_string_lossy());
FunParser::new(book.source.clone(), code, false).parse_book(book).map_err(|err| {
let mut diagnostics = Diagnostics::default();
let span = TextSpan::from_byte_span(code, err.span.0..err.span.1);
let source =
Source { file: Some(origin.to_string_lossy().into()), span: Some(span), kind: SourceKind::User };
diagnostics.add_parsing_error(err, source);
diagnostics
})
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/shared_names.rs | src/fun/check/shared_names.rs | use crate::fun::{Ctx, Name};
use indexmap::IndexMap;
use std::fmt::Display;
#[derive(Debug, Clone)]
pub struct RepeatedTopLevelNameErr {
kind_fst: NameKind,
kind_snd: NameKind,
name: Name,
}
impl Ctx<'_> {
/// Checks if there are any repeated top level names. Constructors
/// and functions can't share names and adts can't share names.
pub fn check_shared_names(&mut self) {
let mut names = NameInfo::default();
for adt_name in self.book.adts.keys() {
names.add_name(adt_name, NameKind::Adt);
}
for ctr_name in self.book.ctrs.keys() {
names.add_name(ctr_name, NameKind::Ctr);
}
for def_name in self.book.defs.keys() {
names.add_name(def_name, NameKind::Def);
}
for err in names.into_errs() {
self.info.add_book_error(err);
}
}
}
#[derive(Debug, Clone, Copy)]
enum NameKind {
Adt,
Def,
Ctr,
}
#[derive(Debug, Default)]
struct NameInfo<'a>(IndexMap<&'a Name, Vec<NameKind>>);
impl<'a> NameInfo<'a> {
fn add_name(&mut self, name: &'a Name, kind: NameKind) {
self.0.entry(name).or_default().push(kind);
}
fn into_errs(self) -> Vec<RepeatedTopLevelNameErr> {
let mut errs = vec![];
for (name, kinds) in self.0 {
let mut num_adts = 0;
let mut fst_ctr_def = None;
for kind in kinds {
if let NameKind::Adt = kind {
num_adts += 1;
if num_adts >= 2 {
errs.push(RepeatedTopLevelNameErr {
kind_fst: NameKind::Adt,
kind_snd: NameKind::Adt,
name: name.clone(),
});
}
} else if let Some(fst) = fst_ctr_def {
errs.push(RepeatedTopLevelNameErr { kind_fst: fst, kind_snd: kind, name: name.clone() });
} else {
fst_ctr_def = Some(kind);
}
}
}
errs
}
}
impl Display for NameKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
NameKind::Adt => write!(f, "data type"),
NameKind::Def => write!(f, "function"),
NameKind::Ctr => write!(f, "constructor"),
}
}
}
impl std::fmt::Display for RepeatedTopLevelNameErr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut snd = self.kind_snd.to_string();
snd[0..1].make_ascii_uppercase();
write!(f, "{} '{}' has the same name as a previously defined {}", snd, self.name, self.kind_fst)
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/unbound_refs.rs | src/fun/check/unbound_refs.rs | use crate::{
diagnostics::Diagnostics,
fun::{Book, Ctx, Name, Term},
maybe_grow,
};
use std::collections::HashSet;
impl Ctx<'_> {
pub fn check_unbound_refs(&mut self) -> Result<(), Diagnostics> {
for def in self.book.defs.values() {
let mut unbounds = HashSet::new();
for rule in def.rules.iter() {
rule.body.check_unbound_refs(self.book, &mut unbounds);
}
for unbound in unbounds {
self.info.add_function_error(
format!("Reference to undefined function '{unbound}'"),
def.name.clone(),
def.source.clone(),
);
}
}
self.info.fatal(())
}
}
impl Term {
pub fn check_unbound_refs(&self, book: &Book, unbounds: &mut HashSet<Name>) {
maybe_grow(|| {
if let Term::Ref { nam } = self {
if !(book.defs.contains_key(nam) || book.hvm_defs.contains_key(nam)) {
unbounds.insert(nam.clone());
}
}
for child in self.children() {
child.check_unbound_refs(book, unbounds);
}
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/type_check.rs | src/fun/check/type_check.rs | //! Optional Hindley-Milner-like type system.
//!
//! Based on https://github.com/developedby/algorithm-w-rs
//! and https://github.com/mgrabmueller/AlgorithmW.
use crate::{
diagnostics::Diagnostics,
fun::{num_to_name, Adt, Book, Ctx, FanKind, MatchRule, Name, Num, Op, Pattern, Tag, Term, Type},
maybe_grow,
};
use std::collections::{BTreeMap, BTreeSet, HashMap};
impl Ctx<'_> {
pub fn type_check(&mut self) -> Result<(), Diagnostics> {
let types = infer_book(self.book, &mut self.info)?;
for def in self.book.defs.values_mut() {
def.typ = types[&def.name].instantiate(&mut VarGen::default());
}
Ok(())
}
}
type ProgramTypes = HashMap<Name, Scheme>;
/// A type scheme, aka a polymorphic type.
#[derive(Clone, Debug)]
struct Scheme(Vec<Name>, Type);
/// A finite mapping from type variables to types.
#[derive(Clone, Default, Debug)]
struct Subst(BTreeMap<Name, Type>);
/// A mapping from term variables to type schemes.
#[derive(Clone, Default, Debug)]
struct TypeEnv(BTreeMap<Name, Scheme>);
/// Variable generator for type variables.
#[derive(Default)]
struct VarGen(usize);
/// Topologically ordered set of mutually recursive groups of functions.
struct RecGroups(Vec<Vec<Name>>);
/* Implementations */
impl Type {
fn free_type_vars(&self) -> BTreeSet<Name> {
maybe_grow(|| match self {
Type::Var(x) => BTreeSet::from([x.clone()]),
Type::Ctr(_, ts) | Type::Tup(ts) => ts.iter().flat_map(|t| t.free_type_vars()).collect(),
Type::Arr(t1, t2) => t1.free_type_vars().union(&t2.free_type_vars()).cloned().collect(),
Type::Number(t) | Type::Integer(t) => t.free_type_vars(),
Type::U24 | Type::F24 | Type::I24 | Type::None | Type::Any | Type::Hole => BTreeSet::new(),
})
}
fn subst(&self, subst: &Subst) -> Type {
maybe_grow(|| match self {
Type::Var(nam) => match subst.0.get(nam) {
Some(new) => new.clone(),
None => self.clone(),
},
Type::Ctr(name, ts) => Type::Ctr(name.clone(), ts.iter().map(|t| t.subst(subst)).collect()),
Type::Arr(t1, t2) => Type::Arr(Box::new(t1.subst(subst)), Box::new(t2.subst(subst))),
Type::Tup(els) => Type::Tup(els.iter().map(|t| t.subst(subst)).collect()),
Type::Number(t) => Type::Number(Box::new(t.subst(subst))),
Type::Integer(t) => Type::Integer(Box::new(t.subst(subst))),
t @ (Type::U24 | Type::F24 | Type::I24 | Type::None | Type::Any | Type::Hole) => t.clone(),
})
}
/// Converts a monomorphic type into a type scheme by abstracting
/// over the type variables free in `t`, but not free in the type
/// environment.
fn generalize(&self, env: &TypeEnv) -> Scheme {
let vars_env = env.free_type_vars();
let vars_t = self.free_type_vars();
let vars = vars_t.difference(&vars_env).cloned().collect();
Scheme(vars, self.clone())
}
}
impl Scheme {
fn free_type_vars(&self) -> BTreeSet<Name> {
let vars = self.1.free_type_vars();
let bound_vars = self.0.iter().cloned().collect();
vars.difference(&bound_vars).cloned().collect()
}
fn subst(&self, subst: &Subst) -> Scheme {
let mut subst = subst.clone();
for x in self.0.iter() {
subst.0.remove(x);
}
let t = self.1.subst(&subst);
Scheme(self.0.clone(), t)
}
/// Converts a type scheme into a monomorphic type by assigning
/// fresh type variables to each variable bound by the scheme.
fn instantiate(&self, var_gen: &mut VarGen) -> Type {
let new_vars = self.0.iter().map(|_| var_gen.fresh());
let subst = Subst(self.0.iter().cloned().zip(new_vars).collect());
self.1.subst(&subst)
}
}
impl Subst {
/// Compose two substitutions.
///
/// Applies the first substitution to the second, and then inserts the result into the first.
fn compose(mut self, other: Subst) -> Subst {
let other = other.0.into_iter().map(|(x, t)| (x, t.subst(&self))).collect::<Vec<_>>();
self.0.extend(other);
self
}
}
impl TypeEnv {
fn free_type_vars(&self) -> BTreeSet<Name> {
let mut vars = BTreeSet::new();
for scheme in self.0.values() {
let scheme_vars = scheme.free_type_vars();
vars = vars.union(&scheme_vars).cloned().collect();
}
vars
}
fn subst(&self, subst: &Subst) -> TypeEnv {
let env = self.0.iter().map(|(x, scheme)| (x.clone(), scheme.subst(subst))).collect();
TypeEnv(env)
}
fn insert(&mut self, name: Name, scheme: Scheme) {
self.0.insert(name, scheme);
}
fn add_binds<'a>(
&mut self,
bnd: impl IntoIterator<Item = (&'a Option<Name>, Scheme)>,
) -> Vec<(Name, Option<Scheme>)> {
let mut old_bnd = vec![];
for (name, scheme) in bnd {
if let Some(name) = name {
let old = self.0.insert(name.clone(), scheme);
old_bnd.push((name.clone(), old));
}
}
old_bnd
}
fn pop_binds(&mut self, old_bnd: Vec<(Name, Option<Scheme>)>) {
for (name, scheme) in old_bnd {
if let Some(scheme) = scheme {
self.0.insert(name, scheme);
}
}
}
}
impl VarGen {
fn fresh(&mut self) -> Type {
let x = self.fresh_name();
Type::Var(x)
}
fn fresh_name(&mut self) -> Name {
let x = num_to_name(self.0 as u64);
self.0 += 1;
Name::new(x)
}
}
impl RecGroups {
fn from_book(book: &Book) -> RecGroups {
type DependencyGraph<'a> = BTreeMap<&'a Name, BTreeSet<&'a Name>>;
fn collect_dependencies<'a>(
term: &'a Term,
book: &'a Book,
scope: &mut Vec<Name>,
deps: &mut BTreeSet<&'a Name>,
) {
if let Term::Ref { nam } = term {
if book.ctrs.contains_key(nam) || book.hvm_defs.contains_key(nam) || !book.defs[nam].check {
// Don't infer types for constructors or unchecked functions
} else {
deps.insert(nam);
}
}
for (child, binds) in term.children_with_binds() {
scope.extend(binds.clone().flatten().cloned());
collect_dependencies(child, book, scope, deps);
scope.truncate(scope.len() - binds.flatten().count());
}
}
/// Tarjan's algorithm for finding strongly connected components.
fn strong_connect<'a>(
v: &'a Name,
deps: &DependencyGraph<'a>,
index: &mut usize,
index_map: &mut BTreeMap<&'a Name, usize>,
low_link: &mut BTreeMap<&'a Name, usize>,
stack: &mut Vec<&'a Name>,
components: &mut Vec<BTreeSet<Name>>,
) {
maybe_grow(|| {
index_map.insert(v, *index);
low_link.insert(v, *index);
*index += 1;
stack.push(v);
if let Some(neighbors) = deps.get(v) {
for w in neighbors {
if !index_map.contains_key(w) {
// Successor w has not yet been visited, recurse on it.
strong_connect(w, deps, index, index_map, low_link, stack, components);
low_link.insert(v, low_link[v].min(low_link[w]));
} else if stack.contains(w) {
// Successor w is in stack S and hence in the current SCC.
low_link.insert(v, low_link[v].min(index_map[w]));
} else {
// If w is not on stack, then (v, w) is an edge pointing
// to an SCC already found and must be ignored.
}
}
}
// If v is a root node, pop the stack and generate an SCC.
if low_link[v] == index_map[v] {
let mut component = BTreeSet::new();
while let Some(w) = stack.pop() {
component.insert(w.clone());
if w == v {
break;
}
}
components.push(component);
}
})
}
// Build the dependency graph
let mut deps = DependencyGraph::default();
for (name, def) in &book.defs {
if book.ctrs.contains_key(name) || !def.check {
// Don't infer types for constructors or unchecked functions
continue;
}
let mut fn_deps = Default::default();
collect_dependencies(&def.rule().body, book, &mut vec![], &mut fn_deps);
deps.insert(name, fn_deps);
}
let mut index = 0;
let mut stack = Vec::new();
let mut index_map = BTreeMap::new();
let mut low_link = BTreeMap::new();
let mut components = Vec::new();
for name in deps.keys() {
if !index_map.contains_key(name) {
strong_connect(name, &deps, &mut index, &mut index_map, &mut low_link, &mut stack, &mut components);
}
}
let components = components.into_iter().map(|x| x.into_iter().collect()).collect();
RecGroups(components)
}
}
/* Inference, unification and type checking */
fn infer_book(book: &Book, diags: &mut Diagnostics) -> Result<ProgramTypes, Diagnostics> {
let groups = RecGroups::from_book(book);
let mut env = TypeEnv::default();
// Note: We store the inferred and generalized types in a separate
// environment, to avoid unnecessary cloning (since no immutable data).
let mut types = ProgramTypes::default();
// Add the constructors to the environment.
for adt in book.adts.values() {
for ctr in adt.ctrs.values() {
types.insert(ctr.name.clone(), ctr.typ.generalize(&TypeEnv::default()));
}
}
// Add the types of unchecked functions to the environment.
for def in book.defs.values() {
if !def.check {
types.insert(def.name.clone(), def.typ.generalize(&TypeEnv::default()));
}
}
// Add the types of hvm functions to the environment.
for def in book.hvm_defs.values() {
types.insert(def.name.clone(), def.typ.generalize(&TypeEnv::default()));
}
// Infer the types of regular functions.
for group in &groups.0 {
infer_group(&mut env, book, group, &mut types, diags)?;
}
Ok(types)
}
fn infer_group(
env: &mut TypeEnv,
book: &Book,
group: &[Name],
types: &mut ProgramTypes,
diags: &mut Diagnostics,
) -> Result<(), Diagnostics> {
let var_gen = &mut VarGen::default();
// Generate fresh type variables for each function in the group.
let tvs = group.iter().map(|_| var_gen.fresh()).collect::<Vec<_>>();
for (name, tv) in group.iter().zip(tvs.iter()) {
env.insert(name.clone(), Scheme(vec![], tv.clone()));
}
// Infer the types of the functions in the group.
let mut ss = vec![];
let mut inf_ts = vec![];
let mut exp_ts = vec![];
for name in group {
let def = &book.defs[name];
let (s, t) = infer(env, book, types, &def.rule().body, var_gen).map_err(|e| {
diags.add_function_error(e, name.clone(), def.source.clone());
std::mem::take(diags)
})?;
let t = t.subst(&s);
ss.push(s);
inf_ts.push(t);
exp_ts.push(&def.typ);
}
// Remove the type variables of the group from the environment.
// This avoids cloning of already generalized types.
for name in group.iter() {
env.0.remove(name);
}
// Unify the inferred body with the corresponding type variable.
let mut s = ss.into_iter().fold(Subst::default(), |acc, s| acc.compose(s));
let mut ts = vec![];
for ((bod_t, tv), nam) in inf_ts.into_iter().zip(tvs.iter()).zip(group.iter()) {
let (t, s2) = unify_term(&tv.subst(&s), &bod_t, &book.defs[nam].rule().body)?;
ts.push(t);
s = s.compose(s2);
}
let ts = ts.into_iter().map(|t| t.subst(&s)).collect::<Vec<_>>();
// Specialize against the expected type, then generalize and store.
for ((name, exp_t), inf_t) in group.iter().zip(exp_ts.iter()).zip(ts.iter()) {
let t = specialize(inf_t, exp_t).map_err(|e| {
diags.add_function_error(e, name.clone(), book.defs[name].source.clone());
std::mem::take(diags)
})?;
types.insert(name.clone(), t.generalize(&TypeEnv::default()));
}
diags.fatal(())
}
/// Infer the type of a term in the given environment.
///
/// The type environment must contain bindings for all the free variables of the term.
///
/// The returned substitution records the type constraints imposed on type variables by the term.
/// The returned type is the type of the term.
fn infer(
env: &mut TypeEnv,
book: &Book,
types: &ProgramTypes,
term: &Term,
var_gen: &mut VarGen,
) -> Result<(Subst, Type), String> {
let res = maybe_grow(|| match term {
Term::Var { nam } | Term::Ref { nam } => {
if let Some(scheme) = env.0.get(nam) {
Ok::<_, String>((Subst::default(), scheme.instantiate(var_gen)))
} else if let Some(scheme) = types.get(nam) {
Ok((Subst::default(), scheme.instantiate(var_gen)))
} else {
unreachable!("unbound name '{}'", nam)
}
}
Term::Lam { tag: Tag::Static, pat, bod } => match pat.as_ref() {
Pattern::Var(nam) => {
let tv = var_gen.fresh();
let old_bnd = env.add_binds([(nam, Scheme(vec![], tv.clone()))]);
let (s, bod_t) = infer(env, book, types, bod, var_gen)?;
env.pop_binds(old_bnd);
let var_t = tv.subst(&s);
Ok((s, Type::Arr(Box::new(var_t), Box::new(bod_t))))
}
_ => unreachable!("{}", term),
},
Term::App { tag: Tag::Static, fun, arg } => {
let (s1, fun_t) = infer(env, book, types, fun, var_gen)?;
let (s2, arg_t) = infer(&mut env.subst(&s1), book, types, arg, var_gen)?;
let app_t = var_gen.fresh();
let (_, s3) = unify_term(&fun_t.subst(&s2), &Type::Arr(Box::new(arg_t), Box::new(app_t.clone())), fun)?;
let t = app_t.subst(&s3);
Ok((s3.compose(s2).compose(s1), t))
}
Term::Let { pat, val, nxt } => match pat.as_ref() {
Pattern::Var(nam) => {
let (s1, val_t) = infer(env, book, types, val, var_gen)?;
let old_bnd = env.add_binds([(nam, val_t.generalize(&env.subst(&s1)))]);
let (s2, nxt_t) = infer(&mut env.subst(&s1), book, types, nxt, var_gen)?;
env.pop_binds(old_bnd);
Ok((s2.compose(s1), nxt_t))
}
Pattern::Fan(FanKind::Tup, Tag::Static, _) => {
// Tuple elimination behaves like pattern matching.
// Variables from tuple patterns don't get generalized.
debug_assert!(!(pat.has_unscoped() || pat.has_nested()));
let (s1, val_t) = infer(env, book, types, val, var_gen)?;
let tvs = pat.binds().map(|_| var_gen.fresh()).collect::<Vec<_>>();
let old_bnd = env.add_binds(pat.binds().zip(tvs.iter().map(|tv| Scheme(vec![], tv.clone()))));
let (s2, nxt_t) = infer(&mut env.subst(&s1), book, types, nxt, var_gen)?;
env.pop_binds(old_bnd);
let tvs = tvs.into_iter().map(|tv| tv.subst(&s2)).collect::<Vec<_>>();
let (_, s3) = unify_term(&val_t, &Type::Tup(tvs), val)?;
Ok((s3.compose(s2).compose(s1), nxt_t))
}
Pattern::Fan(FanKind::Dup, Tag::Auto, _) => {
// We pretend that sups don't exist and dups don't collide.
// All variables must have the same type as the body of the dup.
debug_assert!(!(pat.has_unscoped() || pat.has_nested()));
let (s1, mut val_t) = infer(env, book, types, val, var_gen)?;
let tvs = pat.binds().map(|_| var_gen.fresh()).collect::<Vec<_>>();
let old_bnd = env.add_binds(pat.binds().zip(tvs.iter().map(|tv| Scheme(vec![], tv.clone()))));
let (mut s2, nxt_t) = infer(&mut env.subst(&s1), book, types, nxt, var_gen)?;
env.pop_binds(old_bnd);
for tv in tvs {
let (val_t_, s) = unify_term(&val_t, &tv.subst(&s2), val)?;
val_t = val_t_;
s2 = s2.compose(s);
}
Ok((s2.compose(s1), nxt_t))
}
_ => unreachable!(),
},
Term::Mat { bnd: _, arg, with_bnd: _, with_arg: _, arms } => {
// Infer type of the scrutinee
let (s1, t1) = infer(env, book, types, arg, var_gen)?;
// Instantiate the expected type of the scrutinee
let adt_name = book.ctrs.get(arms[0].0.as_ref().unwrap()).unwrap();
let adt = &book.adts[adt_name];
let (adt_s, adt_t) = instantiate_adt(adt, var_gen)?;
// For each case, infer the types and unify them all.
// Unify the inferred type of the destructured fields with the
// expected from what we inferred from the scrutinee.
let (s2, nxt_t) = infer_match_cases(env.subst(&s1), book, types, adt, arms, &adt_s, var_gen)?;
// Unify the inferred type with the expected type
let (_, s3) = unify_term(&t1, &adt_t.subst(&s2), arg)?;
Ok((s3.compose(s2).compose(s1), nxt_t))
}
Term::Num { val } => {
let t = match val {
Num::U24(_) => Type::U24,
Num::I24(_) => Type::I24,
Num::F24(_) => Type::F24,
};
Ok((Subst::default(), t))
}
Term::Oper { opr, fst, snd } => {
let (s1, t1) = infer(env, book, types, fst, var_gen)?;
let (s2, t2) = infer(&mut env.subst(&s1), book, types, snd, var_gen)?;
let (t2, s3) = unify_term(&t2.subst(&s1), &t1.subst(&s2), term)?;
let s_args = s3.compose(s2).compose(s1);
let t_args = t2.subst(&s_args);
// Check numeric type matches the operation
let tv = var_gen.fresh();
let (t_opr, s_opr) = match opr {
// Any numeric type
Op::ADD | Op::SUB | Op::MUL | Op::DIV => {
unify_term(&t_args, &Type::Number(Box::new(tv.clone())), term)?
}
Op::EQ | Op::NEQ | Op::LT | Op::GT | Op::GE | Op::LE => {
let (_, s) = unify_term(&t_args, &Type::Number(Box::new(tv.clone())), term)?;
(Type::U24, s)
}
// Integers
Op::REM | Op::AND | Op::OR | Op::XOR | Op::SHL | Op::SHR => {
unify_term(&t_args, &Type::Integer(Box::new(tv.clone())), term)?
}
// Floating
Op::POW => unify_term(&t_args, &Type::F24, term)?,
};
let t = t_opr.subst(&s_opr);
Ok((s_opr.compose(s_args), t))
}
Term::Swt { bnd: _, arg, with_bnd: _, with_arg: _, pred, arms } => {
let (s1, t1) = infer(env, book, types, arg, var_gen)?;
let (_, s2) = unify_term(&t1, &Type::U24, arg)?;
let s_arg = s2.compose(s1);
let mut env = env.subst(&s_arg);
let mut ss_nums = vec![];
let mut ts_nums = vec![];
for arm in arms.iter().rev().skip(1) {
let (s, t) = infer(&mut env, book, types, arm, var_gen)?;
env = env.subst(&s);
ss_nums.push(s);
ts_nums.push(t);
}
let old_bnd = env.add_binds([(pred, Scheme(vec![], Type::U24))]);
let (s_succ, t_succ) = infer(&mut env, book, types, &arms[1], var_gen)?;
env.pop_binds(old_bnd);
let s_arms = ss_nums.into_iter().fold(s_succ, |acc, s| acc.compose(s));
let mut t_swt = t_succ;
let mut s_swt = Subst::default();
for t_num in ts_nums {
let (t, s) = unify_term(&t_swt, &t_num, term)?;
t_swt = t;
s_swt = s.compose(s_swt);
}
let s = s_swt.compose(s_arms).compose(s_arg);
let t = t_swt.subst(&s);
Ok((s, t))
}
Term::Fan { fan: FanKind::Tup, tag: Tag::Static, els } => {
let res = els.iter().map(|el| infer(env, book, types, el, var_gen)).collect::<Result<Vec<_>, _>>()?;
let (ss, ts): (Vec<Subst>, Vec<Type>) = res.into_iter().unzip();
let t = Type::Tup(ts);
let s = ss.into_iter().fold(Subst::default(), |acc, s| acc.compose(s));
Ok((s, t))
}
Term::Era => Ok((Subst::default(), Type::None)),
Term::Fan { .. } | Term::Lam { tag: _, .. } | Term::App { tag: _, .. } | Term::Link { .. } => {
unreachable!("'{term}' while type checking. Should never occur in checked functions")
}
Term::Use { .. }
| Term::With { .. }
| Term::Ask { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::List { .. }
| Term::Fold { .. }
| Term::Bend { .. }
| Term::Open { .. }
| Term::Def { .. }
| Term::Err => unreachable!("'{term}' while type checking. Should have been removed in earlier pass"),
})?;
Ok(res)
}
/// Instantiates the type constructor of an ADT, also returning the
/// ADT var to instantiated var substitution, to be used when
/// instantiating the types of the fields of the eliminated constructors.
fn instantiate_adt(adt: &Adt, var_gen: &mut VarGen) -> Result<(Subst, Type), String> {
let tvs = adt.vars.iter().map(|_| var_gen.fresh());
let s = Subst(adt.vars.iter().zip(tvs).map(|(x, t)| (x.clone(), t)).collect());
let t = Type::Ctr(adt.name.clone(), adt.vars.iter().cloned().map(Type::Var).collect());
let t = t.subst(&s);
Ok((s, t))
}
fn infer_match_cases(
mut env: TypeEnv,
book: &Book,
types: &ProgramTypes,
adt: &Adt,
arms: &[MatchRule],
adt_s: &Subst,
var_gen: &mut VarGen,
) -> Result<(Subst, Type), String> {
maybe_grow(|| {
if let Some(((ctr_nam, vars, bod), rest)) = arms.split_first() {
let ctr = &adt.ctrs[ctr_nam.as_ref().unwrap()];
// One fresh var per field, we later unify with the expected type.
let tvs = vars.iter().map(|_| var_gen.fresh()).collect::<Vec<_>>();
// Infer the body and unify the inferred field types with the expected.
let old_bnd = env.add_binds(vars.iter().zip(tvs.iter().map(|tv| Scheme(vec![], tv.clone()))));
let (s1, t1) = infer(&mut env, book, types, bod, var_gen)?;
env.pop_binds(old_bnd);
let inf_ts = tvs.into_iter().map(|tv| tv.subst(&s1)).collect::<Vec<_>>();
let exp_ts = ctr.fields.iter().map(|f| f.typ.subst(adt_s)).collect::<Vec<_>>();
let s2 = unify_fields(inf_ts.iter().zip(exp_ts.iter()), bod)?;
// Recurse and unify with the other arms.
let s = s2.compose(s1);
let (s_rest, t_rest) = infer_match_cases(env.subst(&s), book, types, adt, rest, adt_s, var_gen)?;
let (t_final, s_final) = unify_term(&t1.subst(&s), &t_rest, bod)?;
Ok((s_final.compose(s_rest).compose(s), t_final))
} else {
Ok((Subst::default(), var_gen.fresh()))
}
})
}
fn unify_fields<'a>(ts: impl Iterator<Item = (&'a Type, &'a Type)>, ctx: &Term) -> Result<Subst, String> {
let ss = ts.map(|(inf, exp)| unify_term(inf, exp, ctx)).collect::<Result<Vec<_>, _>>()?;
let mut s = Subst::default();
for (_, s2) in ss.into_iter().rev() {
s = s.compose(s2);
}
Ok(s)
}
fn unify_term(t1: &Type, t2: &Type, ctx: &Term) -> Result<(Type, Subst), String> {
match unify(t1, t2) {
Ok((t, s)) => Ok((t, s)),
Err(msg) => Err(format!("In {ctx}:\n Can't unify '{t1}' and '{t2}'.{msg}")),
}
}
fn unify(t1: &Type, t2: &Type) -> Result<(Type, Subst), String> {
maybe_grow(|| match (t1, t2) {
(t, Type::Hole) | (Type::Hole, t) => Ok((t.clone(), Subst::default())),
(t, Type::Var(x)) | (Type::Var(x), t) => {
// Try to bind variable `x` to `t`
if let Type::Var(y) = t {
if y == x {
// Don't bind a variable to itself
return Ok((t.clone(), Subst::default()));
}
}
// Occurs check
if t.free_type_vars().contains(x) {
return Err(format!(" Variable '{x}' occurs in '{t}'"));
}
Ok((t.clone(), Subst(BTreeMap::from([(x.clone(), t.clone())]))))
}
(Type::Arr(l1, r1), Type::Arr(l2, r2)) => {
let (t1, s1) = unify(l1, l2)?;
let (t2, s2) = unify(&r1.subst(&s1), &r2.subst(&s1))?;
Ok((Type::Arr(Box::new(t1), Box::new(t2)), s2.compose(s1)))
}
(Type::Ctr(name1, ts1), Type::Ctr(name2, ts2)) if name1 == name2 && ts1.len() == ts2.len() => {
let mut s = Subst::default();
let mut ts = vec![];
for (t1, t2) in ts1.iter().zip(ts2.iter()) {
let (t, s2) = unify(t1, t2)?;
ts.push(t);
s = s.compose(s2);
}
Ok((Type::Ctr(name1.clone(), ts), s))
}
(Type::Tup(els1), Type::Tup(els2)) if els1.len() == els2.len() => {
let mut s = Subst::default();
let mut ts = vec![];
for (t1, t2) in els1.iter().zip(els2.iter()) {
let (t, s2) = unify(t1, t2)?;
ts.push(t);
s = s.compose(s2);
}
Ok((Type::Tup(ts), s))
}
t @ ((Type::U24, Type::U24)
| (Type::F24, Type::F24)
| (Type::I24, Type::I24)
| (Type::None, Type::None)) => Ok((t.0.clone(), Subst::default())),
(Type::Number(t1), Type::Number(t2)) => {
let (t, s) = unify(t1, t2)?;
Ok((Type::Number(Box::new(t)), s))
}
(Type::Number(tn), Type::Integer(ti)) | (Type::Integer(ti), Type::Number(tn)) => {
let (t, s) = unify(ti, tn)?;
Ok((Type::Integer(Box::new(t)), s))
}
(Type::Integer(t1), Type::Integer(t2)) => {
let (t, s) = unify(t1, t2)?;
Ok((Type::Integer(Box::new(t)), s))
}
(Type::Number(t1) | Type::Integer(t1), t2 @ (Type::U24 | Type::I24 | Type::F24))
| (t2 @ (Type::U24 | Type::I24 | Type::F24), Type::Number(t1) | Type::Integer(t1)) => {
let (t, s) = unify(t1, t2)?;
Ok((t, s))
}
(Type::Any, t) | (t, Type::Any) => {
let mut s = Subst::default();
// Recurse to assign variables to `Any` as well
for child in t.children() {
let (_, s2) = unify(&Type::Any, child)?;
s = s2.compose(s);
}
Ok((Type::Any, s))
}
_ => Err(String::new()),
})
}
/// Specializes the inferred type against the type annotation.
/// This way, the annotation can be less general than the inferred type.
///
/// It also forces inferred 'Any' to the annotated, inferred types to
/// annotated 'Any' and fills 'Hole' with the inferred type.
///
/// Errors if the first type is not a superset of the second type.
fn specialize(inf: &Type, ann: &Type) -> Result<Type, String> {
fn merge_specialization(inf: &Type, exp: &Type, s: &mut Subst) -> Result<Type, String> {
maybe_grow(|| match (inf, exp) {
// These rules have to come before
(t, Type::Hole) => Ok(t.clone()),
(Type::Hole, _) => unreachable!("Hole should never appear in the inferred type"),
(_inf, Type::Any) => Ok(Type::Any),
(Type::Any, exp) => Ok(exp.clone()),
(Type::Var(x), new) => {
if let Some(old) = s.0.get(x) {
if old == new {
Ok(new.clone())
} else {
Err(format!(" Inferred type variable '{x}' must be both '{old}' and '{new}'"))
}
} else {
s.0.insert(x.clone(), new.clone());
Ok(new.clone())
}
}
(Type::Arr(l1, r1), Type::Arr(l2, r2)) => {
let l = merge_specialization(l1, l2, s)?;
let r = merge_specialization(r1, r2, s)?;
Ok(Type::Arr(Box::new(l), Box::new(r)))
}
(Type::Ctr(name1, ts1), Type::Ctr(name2, ts2)) if name1 == name2 && ts1.len() == ts2.len() => {
let mut ts = vec![];
for (t1, t2) in ts1.iter().zip(ts2.iter()) {
let t = merge_specialization(t1, t2, s)?;
ts.push(t);
}
Ok(Type::Ctr(name1.clone(), ts))
}
(Type::Tup(ts1), Type::Tup(ts2)) if ts1.len() == ts2.len() => {
let mut ts = vec![];
for (t1, t2) in ts1.iter().zip(ts2.iter()) {
let t = merge_specialization(t1, t2, s)?;
ts.push(t);
}
Ok(Type::Tup(ts))
}
(Type::Number(t1), Type::Number(t2)) => Ok(Type::Number(Box::new(merge_specialization(t1, t2, s)?))),
(Type::Integer(t1), Type::Integer(t2)) => Ok(Type::Integer(Box::new(merge_specialization(t1, t2, s)?))),
(Type::U24, Type::U24) | (Type::F24, Type::F24) | (Type::I24, Type::I24) | (Type::None, Type::None) => {
Ok(inf.clone())
}
_ => Err(String::new()),
})
}
// Refresh the variable names to avoid conflicts when unifying
// Names of type vars in the annotation have nothing to do with names in the inferred type.
let var_gen = &mut VarGen::default();
let inf2 = inf.generalize(&TypeEnv::default()).instantiate(var_gen);
let ann2 = ann.generalize(&TypeEnv::default()).instantiate(var_gen);
let (t, s) = unify(&inf2, &ann2)
.map_err(|e| format!("Type Error: Expected function type '{ann}' but found '{inf}'.{e}"))?;
let t = t.subst(&s);
// Merge the inferred specialization with the expected type.
// This is done to cast to/from `Any` and `_` types.
let mut merge_s = Subst::default();
let t2 = merge_specialization(&t, ann, &mut merge_s).map_err(|e| {
format!("Type Error: Annotated type '{ann}' is not a subtype of inferred type '{inf2}'.{e}")
})?;
Ok(t2.subst(&merge_s))
}
impl std::fmt::Display for Subst {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "Subst {{")?;
for (x, y) in &self.0 {
writeln!(f, " {x} => {y},")?;
}
write!(f, "}}")
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/check_untyped.rs | src/fun/check/check_untyped.rs | use crate::{
diagnostics::Diagnostics,
fun::{Ctx, FanKind, Pattern, Tag, Term},
maybe_grow,
};
impl Ctx<'_> {
/// Checks that terms that cannot be typed are only used inside untyped functions.
pub fn check_untyped_terms(&mut self) -> Result<(), Diagnostics> {
for def in self.book.defs.values() {
if def.check {
for rule in def.rules.iter() {
if let Err(e) = rule.body.check_untyped_terms() {
self.info.add_function_error(e, def.name.clone(), def.source.clone());
}
}
}
}
self.info.fatal(())
}
}
impl Term {
fn check_untyped_terms(&self) -> Result<(), String> {
maybe_grow(|| {
match self {
Term::Lam { tag: Tag::Static, pat, .. } => pat.check_untyped_patterns()?,
Term::Lam { tag: _, .. } => {
return Err("Tagged lambda in type-checked function".to_string());
}
Term::Link { nam } => {
return Err(format!("Unscoped variable '${nam}' in type-checked function"));
}
Term::App { tag: Tag::Static, .. } => {}
Term::App { tag: _, .. } => {
return Err("Tagged application in type-checked function".to_string());
}
Term::Fan { fan: FanKind::Dup, .. } => {
return Err("Superposition term in type-checked function".to_string());
}
Term::Fan { fan: FanKind::Tup, tag: Tag::Static, .. } => {}
Term::Fan { fan: FanKind::Tup, tag: _, .. } => {
return Err("Tagged tuple in type-checked function".to_string());
}
Term::Let { pat, .. } => {
pat.check_untyped_patterns()?;
}
_ => {}
}
for child in self.children() {
child.check_untyped_terms()?;
}
Ok(())
})
}
}
impl Pattern {
fn check_untyped_patterns(&self) -> Result<(), String> {
maybe_grow(|| {
match self {
Pattern::Chn(x) => {
return Err(format!("Unscoped variable bind '${x}' in type-checked function"));
}
Pattern::Fan(FanKind::Dup, Tag::Auto, _) => {}
Pattern::Fan(FanKind::Dup, _, _) => {
return Err("Tagged duplication in type-checked function".to_string());
}
Pattern::Fan(FanKind::Tup, Tag::Static, _) => {}
Pattern::Fan(FanKind::Tup, _, _) => {
return Err("Tagged tuple elimination in type-checked function".to_string());
}
_ => {}
}
for pat in self.children() {
pat.check_untyped_patterns()?;
}
Ok(())
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/set_entrypoint.rs | src/fun/check/set_entrypoint.rs | use crate::{
diagnostics::WarningType,
fun::{Book, Ctx, Definition, Name},
ENTRY_POINT, HVM1_ENTRY_POINT,
};
#[derive(Debug, Clone)]
pub enum EntryErr {
NotFound(Name),
Multiple(Vec<Name>),
MultipleRules,
}
impl Ctx<'_> {
pub fn set_entrypoint(&mut self) {
let mut entrypoint = None;
let (custom, main, hvm1_main) = self.book.get_possible_entry_points();
match (custom, main, hvm1_main) {
(Some(entry), None, None) | (None, Some(entry), None) | (None, None, Some(entry)) => {
match validate_entry_point(entry) {
Ok(name) => entrypoint = Some(name),
Err(err) => self.info.add_book_error(err),
}
}
(Some(a), Some(b), None) | (None, Some(a), Some(b)) | (Some(a), None, Some(b)) => {
self.info.add_book_error(EntryErr::Multiple(vec![a.name.clone(), b.name.clone()]));
match validate_entry_point(a) {
Ok(name) => entrypoint = Some(name),
Err(err) => self.info.add_book_error(err),
}
}
(Some(a), Some(b), Some(c)) => {
self.info.add_book_error(EntryErr::Multiple(vec![a.name.clone(), b.name.clone(), c.name.clone()]));
match validate_entry_point(a) {
Ok(name) => entrypoint = Some(name),
Err(err) => self.info.add_book_error(err),
}
}
(None, None, None) => {
let entrypoint = self.book.entrypoint.clone().unwrap_or(Name::new(ENTRY_POINT));
self.info.add_book_warning(EntryErr::NotFound(entrypoint), WarningType::MissingMain)
}
}
self.book.entrypoint = entrypoint;
}
}
fn validate_entry_point(entry: &Definition) -> Result<Name, EntryErr> {
if entry.rules.len() > 1 {
Err(EntryErr::MultipleRules)
} else {
Ok(entry.name.clone())
}
}
impl Book {
fn get_possible_entry_points(&self) -> (Option<&Definition>, Option<&Definition>, Option<&Definition>) {
let custom = self.entrypoint.as_ref().and_then(|e| self.defs.get(e));
let main = self.defs.get(&Name::new(ENTRY_POINT));
let hvm1_main = self.defs.get(&Name::new(HVM1_ENTRY_POINT));
(custom, main, hvm1_main)
}
}
impl std::fmt::Display for EntryErr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
EntryErr::NotFound(name) => write!(f, "File has no '{name}' definition."),
EntryErr::Multiple(fnd) if fnd.len() == 2 => {
write!(f, "File has both '{}' and '{}' definitions.", fnd[0], fnd[1])
}
EntryErr::Multiple(fnd) => {
write!(f, "File has '{}', '{}' and '{}' definitions.", fnd[0], fnd[1], fnd[2])
}
EntryErr::MultipleRules => write!(f, "Main definition can't have more than one rule."),
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/mod.rs | src/fun/check/mod.rs | pub mod check_untyped;
pub mod set_entrypoint;
pub mod shared_names;
pub mod type_check;
pub mod unbound_refs;
pub mod unbound_vars;
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/check/unbound_vars.rs | src/fun/check/unbound_vars.rs | use crate::{
diagnostics::Diagnostics,
fun::{transform::desugar_bend, Ctx, Name, Pattern, Term},
maybe_grow,
};
use std::collections::HashMap;
#[derive(Debug, Clone)]
pub enum UnboundVarErr {
Local(Name),
Global { var: Name, declared: usize, used: usize },
}
impl Ctx<'_> {
/// Checks that there are no unbound variables in all definitions.
pub fn check_unbound_vars(&mut self) -> Result<(), Diagnostics> {
for (def_name, def) in self.book.defs.iter_mut() {
let mut errs = Vec::new();
for rule in &mut def.rules {
// Note: Using a Vec instead of a Map is a deliberate optimization.
let mut scope = rule.pats.iter().flat_map(|pat| pat.binds()).map(|x| x.as_ref()).collect::<Vec<_>>();
rule.body.check_unbound_vars(&mut scope, &mut errs);
}
for err in errs {
self.info.add_function_error(err, def_name.clone(), def.source.clone());
}
}
self.info.fatal(())
}
}
impl Term {
/// Checks that all variables are bound.
/// Precondition: References have been resolved, implicit binds have been solved.
pub fn check_unbound_vars<'a>(
&'a mut self,
scope: &mut Vec<Option<&'a Name>>,
errs: &mut Vec<UnboundVarErr>,
) {
let mut globals = HashMap::new();
check_uses(self, scope, &mut globals, errs);
// Check global vars
for (nam, (declared, used)) in globals.into_iter().filter(|(_, (d, u))| !(*d == 1 && *u == 1)) {
errs.push(UnboundVarErr::Global { var: nam.clone(), declared, used });
}
}
}
/// Scope has the number of times a name was declared in the current scope
/// Globals has how many times a global var name was declared and used.
pub fn check_uses<'a>(
term: &'a mut Term,
scope: &mut Vec<Option<&'a Name>>,
globals: &mut HashMap<Name, (usize, usize)>,
errs: &mut Vec<UnboundVarErr>,
) {
maybe_grow(move || match term {
Term::Var { nam } => {
if !scope_contains(nam, scope) {
errs.push(UnboundVarErr::Local(nam.clone()));
*term = Term::Err;
}
}
Term::Link { nam } => {
globals.entry(nam.clone()).or_default().1 += 1;
}
_ => {
if let Some(pat) = term.pattern() {
check_global_binds(pat, globals)
}
for (child, binds) in term.children_mut_with_binds() {
for bind in binds.clone() {
scope.push(bind.as_ref());
}
check_uses(child, scope, globals, errs);
for _ in binds {
scope.pop();
}
}
}
})
}
pub fn check_global_binds(pat: &Pattern, globals: &mut HashMap<Name, (usize, usize)>) {
match pat {
Pattern::Chn(nam) => {
globals.entry(nam.clone()).or_default().0 += 1;
}
_ => {
for child in pat.children() {
check_global_binds(child, globals)
}
}
}
}
fn scope_contains(nam: &Name, scope: &[Option<&Name>]) -> bool {
scope.iter().rev().any(|scope_nam| scope_nam == nam)
}
impl std::fmt::Display for UnboundVarErr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
UnboundVarErr::Local(var) => {
if var == desugar_bend::RECURSIVE_KW {
write!(
f,
"Unbound variable '{}'.\n Note: '{}' is only a keyword inside the 'when' arm of a 'bend'.",
var,
desugar_bend::RECURSIVE_KW
)
} else if let Some((pre, suf)) = var.rsplit_once('-') {
write!(
f,
"Unbound variable '{var}'. If you wanted to subtract '{pre}' from '{suf}', you must separate it with spaces ('{pre} - {suf}') since '-' is a valid name character."
)
} else {
write!(f, "Unbound variable '{var}'.")
}
}
UnboundVarErr::Global { var, declared, used } => match (declared, used) {
(0, _) => write!(f, "Unbound unscoped variable '${var}'."),
(_, 0) => write!(f, "Unscoped variable from lambda 'λ${var}' is never used."),
(1, _) => write!(f, "Unscoped variable '${var}' used more than once."),
(_, 1) => write!(f, "Unscoped lambda 'λ${var}' declared more than once."),
(_, _) => {
write!(f, "Unscoped lambda 'λ${var}' and unscoped variable '${var}' used more than once.")
}
},
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/desugar_open.rs | src/fun/transform/desugar_open.rs | use crate::{
diagnostics::Diagnostics,
fun::{Adts, Ctx, Term},
maybe_grow,
};
impl Ctx<'_> {
pub fn desugar_open(&mut self) -> Result<(), Diagnostics> {
for def in self.book.defs.values_mut() {
for rule in def.rules.iter_mut() {
if let Err(err) = rule.body.desugar_open(&self.book.adts) {
self.info.add_function_error(err, def.name.clone(), def.source.clone());
}
}
}
self.info.fatal(())
}
}
impl Term {
fn desugar_open(&mut self, adts: &Adts) -> Result<(), String> {
maybe_grow(|| {
match self {
Term::Open { typ, var, bod } => {
bod.desugar_open(adts)?;
if let Some(adt) = adts.get(&*typ) {
if adt.ctrs.len() == 1 {
let ctr = adt.ctrs.keys().next().unwrap();
*self = Term::Mat {
arg: Box::new(Term::Var { nam: var.clone() }),
bnd: Some(std::mem::take(var)),
with_bnd: vec![],
with_arg: vec![],
arms: vec![(Some(ctr.clone()), vec![], std::mem::take(bod))],
}
} else {
return Err(format!("Type '{typ}' of an 'open' has more than one constructor"));
}
} else {
return Err(format!("Type '{typ}' of an 'open' is not defined"));
}
}
Term::Def { def, nxt } => {
for rule in def.rules.iter_mut() {
rule.body.desugar_open(adts)?;
}
nxt.desugar_open(adts)?;
}
_ => {
for child in self.children_mut() {
child.desugar_open(adts)?;
}
}
}
Ok(())
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/float_combinators.rs | src/fun/transform/float_combinators.rs | use crate::{
fun::{Book, Definition, Name, Pattern, Rule, Source, Term},
maybe_grow, multi_iterator,
};
use std::collections::{BTreeMap, HashSet};
pub const NAME_SEP: &str = "__C";
impl Book {
/// Extracts combinator terms into new definitions.
///
/// Precondition: Variables must have been sanitized.
///
/// The floating algorithm follows these rules:
/// For each child of the term:
/// - Recursively float every grandchild term.
/// - If the child is a combinator:
/// * If the child is not "safe", extract it.
/// * If the term is a combinator and it's "safe":
/// - If the term is currently larger than `max_size`, extract the child.
/// * Otherwise, always extract the child to a new definition.
/// - If the child is not a combinator, we can't extract it since
/// it would generate an invalid term.
///
/// Terms are considered combinators if they have no free vars,
/// no unmatched unscoped binds/vars and are not references (to
/// avoid infinite recursion).
///
/// See [`Term::is_safe`] for what is considered safe here.
///
/// See [`Term::size`] for the measurement of size.
/// It should more or less correspond to the compiled inet size.
pub fn float_combinators(&mut self, max_size: usize) {
let book = self.clone();
let mut ctx = FloatCombinatorsCtx::new(&book, max_size);
for (def_name, def) in self.defs.iter_mut() {
// Don't float combinators in the main entrypoint.
// This avoids making programs unexpectedly too lazy,
// returning just a reference without executing anything.
if let Some(main) = self.entrypoint.as_ref() {
if def_name == main {
continue;
}
}
let source = def.source.clone();
let check = def.check;
let body = &mut def.rule_mut().body;
ctx.reset();
ctx.def_size = body.size();
body.float_combinators(&mut ctx, def_name, source, check);
}
self.defs.extend(ctx.combinators.into_iter().map(|(nam, (_, def))| (nam, def)));
}
}
struct FloatCombinatorsCtx<'b> {
pub combinators: BTreeMap<Name, (bool, Definition)>,
pub name_gen: usize,
pub seen: HashSet<Name>,
pub book: &'b Book,
pub max_size: usize,
pub def_size: usize,
}
impl<'b> FloatCombinatorsCtx<'b> {
fn new(book: &'b Book, max_size: usize) -> Self {
Self {
combinators: Default::default(),
name_gen: 0,
seen: Default::default(),
book,
max_size,
def_size: 0,
}
}
fn reset(&mut self) {
self.def_size = 0;
self.name_gen = 0;
self.seen = Default::default();
}
}
impl Term {
fn float_combinators(
&mut self,
ctx: &mut FloatCombinatorsCtx,
def_name: &Name,
source: Source,
check: bool,
) {
maybe_grow(|| {
// Recursively float the grandchildren terms.
for child in self.float_children_mut() {
child.float_combinators(ctx, def_name, source.clone(), check);
}
let mut size = self.size();
let is_combinator = self.is_combinator();
// Float unsafe children and children that make the term too big.
for child in self.float_children_mut() {
let child_is_safe = child.is_safe(ctx);
let child_size = child.size();
let extract_for_size = if is_combinator { size > ctx.max_size } else { ctx.def_size > ctx.max_size };
if child.is_combinator() && child_size > 0 && (!child_is_safe || extract_for_size) {
ctx.def_size -= child_size;
size -= child_size;
child.float(ctx, def_name, source.clone(), check, child_is_safe);
}
}
})
}
/// Inserts a new definition for the given term in the combinators map.
fn float(
&mut self,
ctx: &mut FloatCombinatorsCtx,
def_name: &Name,
source: Source,
check: bool,
is_safe: bool,
) {
let comb_name = Name::new(format!("{}{}{}", def_name, NAME_SEP, ctx.name_gen));
ctx.name_gen += 1;
let comb_ref = Term::Ref { nam: comb_name.clone() };
let extracted_term = std::mem::replace(self, comb_ref);
let rules = vec![Rule { body: extracted_term, pats: Vec::new() }];
let rule = Definition::new_gen(comb_name.clone(), rules, source, check);
ctx.combinators.insert(comb_name, (is_safe, rule));
}
}
impl Term {
/// A term can be considered safe if it is:
/// - A Number or an Eraser.
/// - A Tuple or Superposition where all elements are safe.
/// - An application or numeric operation where all arguments are safe.
/// - A safe Lambda, e.g. a nullary constructor or a lambda with safe body.
/// - A Reference with a safe body.
///
/// A reference to a recursive definition (or mutually recursive) is not safe.
fn is_safe(&self, ctx: &mut FloatCombinatorsCtx) -> bool {
maybe_grow(|| match self {
Term::Num { .. }
| Term::Era
| Term::Err
| Term::Fan { .. }
| Term::App { .. }
| Term::Oper { .. }
| Term::Swt { .. } => self.children().all(|c| c.is_safe(ctx)),
Term::Lam { .. } => self.is_safe_lambda(ctx),
Term::Ref { nam } => {
// Constructors are safe
if ctx.book.ctrs.contains_key(nam) {
return true;
}
// If recursive, not safe
if ctx.seen.contains(nam) {
return false;
}
ctx.seen.insert(nam.clone());
// Check if the function it's referring to is safe
let safe = if let Some(def) = ctx.book.defs.get(nam) {
def.rule().body.is_safe(ctx)
} else if let Some((safe, _)) = ctx.combinators.get(nam) {
*safe
} else {
false
};
ctx.seen.remove(nam);
safe
}
// TODO: Variables can be safe depending on how they're used
// For example, in a well-typed numop they're safe.
_ => false,
})
}
/// Checks if the term is a lambda sequence with a safe body.
/// If the body is a variable bound in the lambdas, it's a nullary constructor.
/// If the body is a reference, it's in inactive position, so always safe.
fn is_safe_lambda(&self, ctx: &mut FloatCombinatorsCtx) -> bool {
let mut current = self;
let mut scope = Vec::new();
while let Term::Lam { pat, bod, .. } = current {
scope.extend(pat.binds().filter_map(|x| x.as_ref()));
current = bod;
}
match current {
Term::Var { nam } if scope.contains(&nam) => true,
Term::Ref { .. } => true,
term => term.is_safe(ctx),
}
}
pub fn has_unscoped_diff(&self) -> bool {
let (declared, used) = self.unscoped_vars();
declared.difference(&used).count() != 0 || used.difference(&declared).count() != 0
}
fn is_combinator(&self) -> bool {
self.free_vars().is_empty() && !self.has_unscoped_diff() && !matches!(self, Term::Ref { .. })
}
fn base_size(&self) -> usize {
match self {
Term::Let { pat, .. } => pat.size(),
Term::Fan { els, .. } => els.len() - 1,
Term::Mat { arms, .. } => arms.len(),
Term::Swt { arms, .. } => 2 * (arms.len() - 1),
Term::Lam { .. } => 1,
Term::App { .. } => 1,
Term::Oper { .. } => 1,
Term::Var { .. } => 0,
Term::Link { .. } => 0,
Term::Use { .. } => 0,
Term::Num { .. } => 0,
Term::Ref { .. } => 0,
Term::Era => 0,
Term::Bend { .. }
| Term::Fold { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::List { .. }
| Term::With { .. }
| Term::Ask { .. }
| Term::Open { .. }
| Term::Def { .. }
| Term::Err => unreachable!(),
}
}
fn size(&self) -> usize {
maybe_grow(|| {
let children_size: usize = self.children().map(|c| c.size()).sum();
self.base_size() + children_size
})
}
pub fn float_children_mut(&mut self) -> impl Iterator<Item = &mut Term> {
multi_iterator!(FloatIter { Zero, Two, Vec, Mat, App, Swt });
match self {
Term::App { .. } => {
let mut next = Some(self);
FloatIter::App(std::iter::from_fn(move || {
let cur = next.take();
if let Some(Term::App { fun, arg, .. }) = cur {
next = Some(&mut *fun);
Some(&mut **arg)
} else {
cur
}
}))
}
Term::Mat { arg, bnd: _, with_bnd: _, with_arg, arms } => FloatIter::Mat(
[arg.as_mut()].into_iter().chain(with_arg.iter_mut()).chain(arms.iter_mut().map(|r| &mut r.2)),
),
Term::Swt { arg, bnd: _, with_bnd: _, with_arg, pred: _, arms } => {
FloatIter::Swt([arg.as_mut()].into_iter().chain(with_arg.iter_mut()).chain(arms.iter_mut()))
}
Term::Fan { els, .. } | Term::List { els } => FloatIter::Vec(els),
Term::Let { val: fst, nxt: snd, .. }
| Term::Use { val: fst, nxt: snd, .. }
| Term::Oper { fst, snd, .. } => FloatIter::Two([fst.as_mut(), snd.as_mut()]),
Term::Lam { bod, .. } => bod.float_children_mut(),
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Era
| Term::Err => FloatIter::Zero([]),
Term::With { .. }
| Term::Ask { .. }
| Term::Bend { .. }
| Term::Fold { .. }
| Term::Open { .. }
| Term::Def { .. } => {
unreachable!()
}
}
}
}
impl Pattern {
fn size(&self) -> usize {
match self {
Pattern::Var(_) => 0,
Pattern::Chn(_) => 0,
Pattern::Fan(_, _, pats) => pats.len() - 1 + pats.iter().map(|p| p.size()).sum::<usize>(),
Pattern::Num(_) | Pattern::Lst(_) | Pattern::Str(_) | Pattern::Ctr(_, _) => unreachable!(),
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/desugar_use.rs | src/fun/transform/desugar_use.rs | use crate::{
fun::{Book, Term},
maybe_grow,
};
impl Book {
/// Inline copies of the declared bind in the `use` expression.
///
/// Example:
/// ```bend
/// use id = λx x
/// (id id id)
///
/// // Transforms to:
/// (λx x λx x λx x)
/// ```
pub fn desugar_use(&mut self) {
for def in self.defs.values_mut() {
for rule in def.rules.iter_mut() {
rule.body.desugar_use();
}
}
}
/// Inline copies of the declared bind in `Fold`, `Mat` and `Open` inside `use` expressions.
pub fn desugar_ctr_use(&mut self) {
for def in self.defs.values_mut() {
for rule in def.rules.iter_mut() {
rule.body.desugar_ctr_use();
}
}
}
}
impl Term {
pub fn desugar_use(&mut self) {
maybe_grow(|| {
for children in self.children_mut() {
children.desugar_use();
}
});
if let Term::Use { nam: Some(nam), val, nxt } = self {
nxt.subst(nam, val);
*self = std::mem::take(nxt);
}
}
pub fn desugar_ctr_use(&mut self) {
maybe_grow(|| {
for children in self.children_mut() {
children.desugar_ctr_use();
}
});
if let Term::Use { nam: Some(nam), val, nxt } = self {
if let Term::Var { nam: val } = val.as_ref() {
nxt.subst_ctrs(nam, val);
}
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/desugar_with_blocks.rs | src/fun/transform/desugar_with_blocks.rs | use crate::{
diagnostics::Diagnostics,
fun::{Ctx, Name, Pattern, Term},
maybe_grow,
};
use std::collections::HashSet;
impl Ctx<'_> {
/// Converts `ask` terms inside `with` blocks into calls to a monadic bind operation.
pub fn desugar_with_blocks(&mut self) -> Result<(), Diagnostics> {
let def_names = self.book.defs.keys().cloned().collect::<HashSet<_>>();
for def in self.book.defs.values_mut() {
for rule in def.rules.iter_mut() {
if let Err(e) = rule.body.desugar_with_blocks(None, &def_names) {
self.info.add_function_error(e, def.name.clone(), def.source.clone());
}
}
}
self.info.fatal(())
}
}
impl Term {
pub fn desugar_with_blocks(
&mut self,
cur_block: Option<&Name>,
def_names: &HashSet<Name>,
) -> Result<(), String> {
maybe_grow(|| {
if let Term::With { typ, bod } = self {
bod.desugar_with_blocks(Some(typ), def_names)?;
let wrap_ref = Term::r#ref(&format!("{typ}/wrap"));
*self = Term::Use { nam: Some(Name::new("wrap")), val: Box::new(wrap_ref), nxt: std::mem::take(bod) };
}
if let Term::Ask { pat, val, nxt } = self {
if let Some(typ) = cur_block {
let bind_nam = Name::new(format!("{typ}/bind"));
if def_names.contains(&bind_nam) {
let nxt = Term::lam(*pat.clone(), std::mem::take(nxt));
let nxt = nxt.defer();
*self = Term::call(Term::Ref { nam: bind_nam }, [*val.clone(), nxt]);
} else {
return Err(format!("Could not find definition {bind_nam} for type {typ}."));
}
} else {
return Err(format!("Monadic bind operation '{pat} <- ...' used outside of a `do` block."));
}
}
for children in self.children_mut() {
children.desugar_with_blocks(cur_block, def_names)?;
}
Ok(())
})
}
/// Converts a term with free vars `(f x1 .. xn)` into a deferred
/// call that passes those vars to the term.
///
/// Ex: `(f x1 .. xn)` becomes `@x (x @x1 .. @xn (f x1 .. x2) x1 .. x2)`.
///
/// The user must call this lazy thunk by calling the builtin
/// `undefer` function, or by applying `@x x` to the term.
fn defer(self) -> Term {
let free_vars = self.free_vars().into_keys().collect::<Vec<_>>();
let term = Term::rfold_lams(self, free_vars.iter().cloned().map(Some));
let term = Term::call(Term::Var { nam: Name::new("%x") }, [term]);
let term = Term::call(term, free_vars.iter().cloned().map(|nam| Term::Var { nam }));
Term::lam(Pattern::Var(Some(Name::new("%x"))), term)
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/linearize_matches.rs | src/fun/transform/linearize_matches.rs | use crate::{
fun::{Book, Name, Pattern, Term},
maybe_grow,
};
use std::collections::{BTreeSet, HashMap, HashSet, VecDeque};
/* Linearize preceding binds */
impl Book {
/// Linearization of binds preceding match/switch terms, up to the
/// first bind used in either the scrutinee or the bind.
///
/// Example:
/// ```hvm
/// @a @b @c let d = (b c); switch a {
/// 0: (A b c d)
/// _: (B a-1 b c d)
/// }
/// // Since `b`, `c` and `d` would be eta-reducible if linearized,
/// // they get pushed inside the match.
/// @a switch a {
/// 0: @b @c let d = (b c); (A b c d)
/// _: @b @c let d = (b c); (B a-1 b c d)
/// }
/// ```
pub fn linearize_match_binds(&mut self) {
for def in self.defs.values_mut() {
for rule in def.rules.iter_mut() {
rule.body.linearize_match_binds();
}
}
}
}
impl Term {
/// Linearize any binds preceding a match/switch term, up to the
/// first bind used in either the scrutinee or the bind.
pub fn linearize_match_binds(&mut self) {
self.linearize_match_binds_go(vec![]);
}
fn linearize_match_binds_go(&mut self, mut bind_terms: Vec<Term>) {
maybe_grow(|| match self {
// Binding terms
// Extract them in case they are preceding a match.
Term::Lam { pat, bod, .. } if !pat.has_unscoped() => {
let bod = std::mem::take(bod.as_mut());
let term = std::mem::replace(self, bod);
bind_terms.push(term);
self.linearize_match_binds_go(bind_terms);
}
Term::Let { val, nxt, .. } | Term::Use { val, nxt, .. } => {
val.linearize_match_binds_go(vec![]);
if val.has_unscoped() {
// Terms with unscoped can't be linearized since their names must be unique.
nxt.linearize_match_binds_go(vec![]);
self.wrap_with_bind_terms(bind_terms);
} else {
let nxt = std::mem::take(nxt.as_mut());
let term = std::mem::replace(self, nxt);
bind_terms.push(term);
self.linearize_match_binds_go(bind_terms);
}
}
// Matching terms
Term::Mat { .. } | Term::Swt { .. } => {
self.linearize_binds_single_match(bind_terms);
}
// Others
// Not a match preceded by binds, so put the extracted terms back.
term => {
for child in term.children_mut() {
child.linearize_match_binds_go(vec![]);
}
// Recover the extracted terms
term.wrap_with_bind_terms(bind_terms);
}
})
}
fn linearize_binds_single_match(&mut self, mut bind_terms: Vec<Term>) {
let (used_vars, with_bnd, with_arg, arms) = match self {
Term::Mat { arg, bnd: _, with_bnd, with_arg, arms } => {
let vars = arg.free_vars().into_keys().collect::<HashSet<_>>();
let arms = arms.iter_mut().map(|arm| &mut arm.2).collect::<Vec<_>>();
(vars, with_bnd, with_arg, arms)
}
Term::Swt { arg, bnd: _, with_bnd, with_arg, pred: _, arms } => {
let vars = arg.free_vars().into_keys().collect::<HashSet<_>>();
let arms = arms.iter_mut().collect();
(vars, with_bnd, with_arg, arms)
}
_ => unreachable!(),
};
// Add 'with' args as lets that can be moved
for (bnd, arg) in with_bnd.iter().zip(with_arg.iter()) {
let term = Term::Let {
pat: Box::new(Pattern::Var(bnd.clone())),
val: Box::new(arg.clone()),
nxt: Box::new(Term::Err),
};
bind_terms.push(term)
}
let (mut non_linearized, linearized) = fixed_and_linearized_terms(used_vars, bind_terms);
// Add the linearized terms to the arms and recurse
for arm in arms {
arm.wrap_with_bind_terms(linearized.clone());
arm.linearize_match_binds_go(vec![]);
}
// Remove the linearized binds from the with clause
let linearized_binds = linearized
.iter()
.flat_map(|t| match t {
Term::Lam { pat, .. } | Term::Let { pat, .. } => pat.binds().flatten().cloned().collect::<Vec<_>>(),
Term::Use { nam, .. } => {
if let Some(nam) = nam {
vec![nam.clone()]
} else {
vec![]
}
}
_ => unreachable!(),
})
.collect::<BTreeSet<_>>();
update_with_clause(with_bnd, with_arg, &linearized_binds);
// Remove the non-linearized 'with' binds from the terms that need
// to be added back (since we didn't move them).
non_linearized.retain(|term| {
if let Term::Let { pat, .. } = term {
if let Pattern::Var(bnd) = pat.as_ref() {
if with_bnd.contains(bnd) {
return false;
}
}
}
true
});
// Add the non-linearized terms back to before the match
self.wrap_with_bind_terms(non_linearized);
}
/// Given a term `self` and a sequence of `bind_terms`, wrap `self` with those binds.
///
/// Example:
/// ```hvm
/// self = X
/// match_terms = [λb *, let c = (a b); *, λd *]
/// ```
///
/// becomes
///
/// ```hvm
/// self = λb let c = (a b); λd X
/// ```
fn wrap_with_bind_terms(
&mut self,
bind_terms: impl IntoIterator<IntoIter = impl DoubleEndedIterator<Item = Term>>,
) {
*self = bind_terms.into_iter().rfold(std::mem::take(self), |acc, mut term| {
match &mut term {
Term::Lam { bod: nxt, .. } | Term::Let { nxt, .. } | Term::Use { nxt, .. } => {
*nxt.as_mut() = acc;
}
_ => unreachable!(),
}
term
});
}
}
/// Separates the bind terms surround the match in two partitions,
/// one to be linearized, one to stay where they where.
///
/// We try to move down any binds that would become eta-reducible with linearization
/// and that will not introduce extra duplications.
///
/// This requires the bind to follow some rules:
/// * Can only depend on binds that will be moved
/// * Can't come before any bind that will not be moved.
/// * Must be a scoped bind.
///
/// Examples:
///
/// ```hvm
/// @a @b @c switch b { 0: c; _: (c b-1) }
/// // Will linearize `c` but not `a` since it comes before a lambda that can't be moved
/// // Becomes
/// @a @b switch b { 0: @c c; _: @c (c b-1) }
/// ```
///
/// ```hvm
/// @a let b = a; @c let e = b; let d = c; switch a { 0: X; _: Y }
/// // Will not linearize `let b = a` since it would duplicate `a`
/// // Will linearize `c` since it's a lambda that is not depended on by the argument
/// // Will not linearize `let e = b` since it would duplicate `b`
/// // Will linearize `let d = c` since it depends only on variables that will be moved
/// // and is not depended on by the argument
/// ```
fn fixed_and_linearized_terms(used_in_arg: HashSet<Name>, bind_terms: Vec<Term>) -> (Vec<Term>, Vec<Term>) {
let fixed_binds = binds_fixed_by_dependency(used_in_arg, &bind_terms);
let mut fixed = VecDeque::new();
let mut linearized = VecDeque::new();
let mut stop = false;
for term in bind_terms.into_iter().rev() {
let to_linearize = match &term {
Term::Use { nam, .. } => nam.as_ref().map_or(true, |nam| !fixed_binds.contains(nam)),
Term::Let { pat, .. } => pat.binds().flatten().all(|nam| !fixed_binds.contains(nam)),
Term::Lam { pat, .. } => pat.binds().flatten().all(|nam| !fixed_binds.contains(nam)),
_ => unreachable!(),
};
let to_linearize = to_linearize && !stop;
if to_linearize {
linearized.push_front(term);
} else {
if matches!(term, Term::Lam { .. }) {
stop = true;
}
fixed.push_front(term);
}
}
(fixed.into_iter().collect(), linearized.into_iter().collect())
}
/// Get which binds are fixed because they are in the dependency graph
/// of a free var or of a var used in the match arg.
fn binds_fixed_by_dependency(used_in_arg: HashSet<Name>, bind_terms: &[Term]) -> HashSet<Name> {
let mut fixed_binds = used_in_arg;
// Find the use dependencies of each bind
let mut binds = vec![];
let mut dependency_digraph = HashMap::new();
for term in bind_terms {
// Gather what are the binds of this term and what vars it is directly using
let (term_binds, term_uses) = match term {
Term::Lam { pat, .. } => {
let binds = pat.binds().flatten().cloned().collect::<Vec<_>>();
(binds, vec![])
}
Term::Let { pat, val, .. } => {
let binds = pat.binds().flatten().cloned().collect::<Vec<_>>();
let uses = val.free_vars().into_keys().collect();
(binds, uses)
}
Term::Use { nam, val, .. } => {
let binds = if let Some(nam) = nam { vec![nam.clone()] } else { vec![] };
let uses = val.free_vars().into_keys().collect();
(binds, uses)
}
_ => unreachable!(),
};
for bind in term_binds {
dependency_digraph.insert(bind.clone(), term_uses.clone());
binds.push(bind);
}
}
// Mark binds that depend on free vars as fixed
for (bind, deps) in dependency_digraph.iter() {
if deps.iter().any(|dep| !binds.contains(dep)) {
fixed_binds.insert(bind.clone());
}
}
// Convert to undirected graph
let mut dependency_graph: HashMap<Name, HashSet<Name>> =
HashMap::from_iter(binds.iter().map(|k| (k.clone(), HashSet::new())));
for (bind, deps) in dependency_digraph {
for dep in deps {
if !binds.contains(&dep) {
dependency_graph.insert(dep.clone(), HashSet::new());
}
dependency_graph.get_mut(&dep).unwrap().insert(bind.clone());
dependency_graph.get_mut(&bind).unwrap().insert(dep);
}
}
// Find which binds are connected to the vars used in the match arg or to free vars.
let mut used_component = HashSet::new();
let mut visited = HashSet::new();
let mut to_visit = fixed_binds.iter().collect::<Vec<_>>();
while let Some(node) = to_visit.pop() {
if visited.contains(node) {
continue;
}
used_component.insert(node.clone());
visited.insert(node);
// Add these dependencies to be checked (if it's not a free var in the match arg)
if let Some(deps) = dependency_graph.get(node) {
to_visit.extend(deps);
}
}
// Mark lambdas that come before a fixed lambda as also fixed
let mut fixed_start = false;
let mut fixed_lams = HashSet::new();
for term in bind_terms.iter().rev() {
if let Term::Lam { pat, .. } = term {
if pat.binds().flatten().any(|p| used_component.contains(p)) {
fixed_start = true;
}
if fixed_start {
for bind in pat.binds().flatten() {
fixed_lams.insert(bind.clone());
}
}
}
}
let mut fixed_binds = used_component;
// Mark binds that depend on fixed lambdas as also fixed.
let mut visited = HashSet::new();
let mut to_visit = fixed_lams.iter().collect::<Vec<_>>();
while let Some(node) = to_visit.pop() {
if visited.contains(node) {
continue;
}
fixed_binds.insert(node.clone());
visited.insert(node);
// Add these dependencies to be checked (if it's not a free var in the match arg)
if let Some(deps) = dependency_graph.get(node) {
to_visit.extend(deps);
}
}
fixed_binds
}
fn update_with_clause(
with_bnd: &mut Vec<Option<Name>>,
with_arg: &mut Vec<Term>,
vars_to_lift: &BTreeSet<Name>,
) {
let mut to_remove = Vec::new();
for i in 0..with_bnd.len() {
if let Some(with_bnd) = &with_bnd[i] {
if vars_to_lift.contains(with_bnd) {
to_remove.push(i);
}
}
}
for (removed, to_remove) in to_remove.into_iter().enumerate() {
with_bnd.remove(to_remove - removed);
with_arg.remove(to_remove - removed);
}
}
/* Linearize all used vars */
impl Book {
/// Linearizes all variables used in a matches' arms.
pub fn linearize_matches(&mut self) {
for def in self.defs.values_mut() {
for rule in def.rules.iter_mut() {
rule.body.linearize_matches();
}
}
}
}
impl Term {
fn linearize_matches(&mut self) {
maybe_grow(|| {
for child in self.children_mut() {
child.linearize_matches();
}
if matches!(self, Term::Mat { .. } | Term::Swt { .. }) {
lift_match_vars(self);
}
})
}
}
/// Converts free vars inside the match arms into lambdas with
/// applications around the match to pass them the external value.
///
/// Makes the rules extractable and linear (no need for dups even
/// when a variable is used in multiple rules).
///
/// Obs: This does not modify unscoped variables.
pub fn lift_match_vars(match_term: &mut Term) -> &mut Term {
// Collect match arms with binds
let (with_bnd, with_arg, arms) = match match_term {
Term::Mat { arg: _, bnd: _, with_bnd, with_arg, arms: rules } => {
let args =
rules.iter().map(|(_, binds, body)| (binds.iter().flatten().cloned().collect(), body)).collect();
(with_bnd.clone(), with_arg.clone(), args)
}
Term::Swt { arg: _, bnd: _, with_bnd, with_arg, pred, arms } => {
let (succ, nums) = arms.split_last_mut().unwrap();
let mut arms = nums.iter().map(|body| (vec![], body)).collect::<Vec<_>>();
arms.push((vec![pred.clone().unwrap()], succ));
(with_bnd.clone(), with_arg.clone(), arms)
}
_ => unreachable!(),
};
// Collect all free vars in the match arms
let mut free_vars = Vec::<Vec<_>>::new();
for (binds, body) in arms {
let mut arm_free_vars = body.free_vars();
for bind in binds {
arm_free_vars.shift_remove(&bind);
}
free_vars.push(arm_free_vars.into_keys().collect());
}
// Collect the vars to lift
// We need consistent iteration order.
let vars_to_lift: BTreeSet<Name> = free_vars.into_iter().flatten().collect();
// Add lambdas to the arms
match match_term {
Term::Mat { arg: _, bnd: _, with_bnd, with_arg, arms } => {
update_with_clause(with_bnd, with_arg, &vars_to_lift);
for arm in arms {
let old_body = std::mem::take(&mut arm.2);
arm.2 = Term::rfold_lams(old_body, vars_to_lift.iter().cloned().map(Some));
}
}
Term::Swt { arg: _, bnd: _, with_bnd, with_arg, pred: _, arms } => {
update_with_clause(with_bnd, with_arg, &vars_to_lift);
for arm in arms {
let old_body = std::mem::take(arm);
*arm = Term::rfold_lams(old_body, vars_to_lift.iter().cloned().map(Some));
}
}
_ => unreachable!(),
}
// Add apps to the match
let args = vars_to_lift
.into_iter()
.map(|nam| {
if let Some(idx) = with_bnd.iter().position(|x| x == &nam) {
with_arg[idx].clone()
} else {
Term::Var { nam }
}
})
.collect::<Vec<_>>();
let term = Term::call(std::mem::take(match_term), args);
*match_term = term;
get_match_reference(match_term)
}
/// Get a reference to the match again
/// It returns a reference and not an owned value because we want
/// to keep the new surrounding Apps but still modify the match further.
fn get_match_reference(mut match_term: &mut Term) -> &mut Term {
loop {
match match_term {
Term::App { tag: _, fun, arg: _ } => match_term = fun.as_mut(),
Term::Swt { .. } | Term::Mat { .. } => return match_term,
_ => unreachable!(),
}
}
}
/* Linearize `with` vars */
impl Book {
/// Linearizes all variables specified in the `with` clauses of match terms.
pub fn linearize_match_with(&mut self) {
for def in self.defs.values_mut() {
for rule in def.rules.iter_mut() {
rule.body.linearize_match_with();
}
}
}
}
impl Term {
fn linearize_match_with(&mut self) {
maybe_grow(|| {
for child in self.children_mut() {
child.linearize_match_with();
}
});
match self {
Term::Mat { arg: _, bnd: _, with_bnd, with_arg, arms } => {
for rule in arms {
rule.2 = Term::rfold_lams(std::mem::take(&mut rule.2), with_bnd.clone().into_iter());
}
*with_bnd = vec![];
let call_args = std::mem::take(with_arg).into_iter();
*self = Term::call(std::mem::take(self), call_args);
}
Term::Swt { arg: _, bnd: _, with_bnd, with_arg, pred: _, arms } => {
for rule in arms {
*rule = Term::rfold_lams(std::mem::take(rule), with_bnd.clone().into_iter());
}
*with_bnd = vec![];
let call_args = std::mem::take(with_arg).into_iter();
*self = Term::call(std::mem::take(self), call_args);
}
_ => {}
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/desugar_bend.rs | src/fun/transform/desugar_bend.rs | use crate::{
diagnostics::Diagnostics,
fun::{Ctx, Definition, Name, Rule, Source, Term},
maybe_grow,
};
use indexmap::IndexMap;
pub const RECURSIVE_KW: &str = "fork";
const NEW_FN_SEP: &str = "__bend";
impl Ctx<'_> {
pub fn desugar_bend(&mut self) -> Result<(), Diagnostics> {
let mut new_defs = IndexMap::new();
for def in self.book.defs.values_mut() {
let mut fresh = 0;
for rule in def.rules.iter_mut() {
if let Err(err) =
rule.body.desugar_bend(&def.name, &mut fresh, &mut new_defs, def.source.clone(), def.check)
{
self.info.add_function_error(err, def.name.clone(), def.source.clone());
break;
}
}
}
self.book.defs.extend(new_defs);
self.info.fatal(())
}
}
impl Term {
fn desugar_bend(
&mut self,
def_name: &Name,
fresh: &mut usize,
new_defs: &mut IndexMap<Name, Definition>,
source: Source,
check: bool,
) -> Result<(), String> {
maybe_grow(|| {
// Recursively encode bends in the children
for child in self.children_mut() {
child.desugar_bend(def_name, fresh, new_defs, source.clone(), check)?;
}
// Convert a bend into a new recursive function and call it.
if let Term::Bend { .. } = self {
// Can't have unmatched unscoped because this'll be extracted
if self.has_unscoped_diff() {
return Err("Can't have non self-contained unscoped variables in a 'bend'".into());
}
let Term::Bend { bnd, arg, cond, step, base } = self else { unreachable!() };
let new_nam = Name::new(format!("{}{}{}", def_name, NEW_FN_SEP, fresh));
*fresh += 1;
// Gather the free variables
// They will be implicitly captured by the new function
let mut free_vars = step.free_vars();
free_vars.shift_remove(&Name::new(RECURSIVE_KW));
free_vars.extend(base.free_vars());
free_vars.extend(cond.free_vars());
for bnd in bnd.iter().flatten() {
free_vars.shift_remove(bnd);
}
let free_vars = free_vars.into_keys().collect::<Vec<_>>();
// Add a substitution of `fork`, a use term with a partially applied recursive call
let step = Term::Use {
nam: Some(Name::new(RECURSIVE_KW)),
val: Box::new(Term::call(
Term::Ref { nam: new_nam.clone() },
free_vars.iter().cloned().map(|nam| Term::Var { nam }),
)),
nxt: Box::new(std::mem::take(step.as_mut())),
};
// Create the function body for the bend.
let body = Term::Swt {
arg: Box::new(std::mem::take(cond)),
bnd: Some(Name::new("_")),
with_bnd: vec![],
with_arg: vec![],
pred: Some(Name::new("_-1")),
arms: vec![std::mem::take(base.as_mut()), step],
};
let body = Term::rfold_lams(body, std::mem::take(bnd).into_iter());
let body = Term::rfold_lams(body, free_vars.iter().cloned().map(Some));
// Make a definition from the new function
let def = Definition::new_gen(new_nam.clone(), vec![Rule { pats: vec![], body }], source, check);
new_defs.insert(new_nam.clone(), def);
// Call the new function in the original term.
let call =
Term::call(Term::Ref { nam: new_nam }, free_vars.iter().map(|v| Term::Var { nam: v.clone() }));
*self = Term::call(call, arg.drain(..));
}
Ok(())
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/fix_match_terms.rs | src/fun/transform/fix_match_terms.rs | use crate::{
diagnostics::{Diagnostics, WarningType, ERR_INDENT_SIZE},
fun::{Adts, Constructors, CtrField, Ctx, MatchRule, Name, Num, Term},
maybe_grow,
};
use std::collections::HashMap;
enum FixMatchErr {
AdtMismatch { expected: Name, found: Name, ctr: Name },
NonExhaustiveMatch { typ: Name, missing: Name },
IrrefutableMatch { var: Option<Name> },
UnreachableMatchArms { var: Option<Name> },
RedundantArm { ctr: Name },
}
impl Ctx<'_> {
/// Convert all match and switch expressions to a normalized form.
/// * For matches, resolve the constructors and create the name of the field variables.
/// * For switches, the resolution and name bind is already done during parsing.
/// * Check for redundant arms and non-exhaustive matches.
/// * Converts the initial bind to an alias on every arm, rebuilding the eliminated constructor
/// * Since the bind is not needed anywhere else, it's erased from the term.
///
/// Example:
/// For the program
/// ```hvm
/// data MyList = (Cons h t) | Nil
/// match x {
/// Cons: (A x.h x.t)
/// Nil: switch %arg = (Foo y) { 0: B; 1: C; _ %arg-2: D }
/// }
/// ```
/// The following AST transformations will be made:
/// * The binds `x.h` and `x.t` will be generated and stored in the match term.
/// * If it was missing one of the match cases, we'd get an error.
/// * If it included one of the cases more than once (including wildcard patterns), we'd get a warning.
/// ```hvm
/// match * = x {
/// Cons x.h x.t: use x = (Cons x.h x.t); (A x.h x.t)
/// Nil: use x = Nil;
/// switch * = (Foo y) {
/// 0: use %arg = 0; B;
/// 1: use %arg = 1; C;
/// _: use %arg = (+ %arg-2 2); D
/// }
/// }
/// ```
pub fn fix_match_terms(&mut self) -> Result<(), Diagnostics> {
for def in self.book.defs.values_mut() {
for rule in def.rules.iter_mut() {
let errs = rule.body.fix_match_terms(&self.book.ctrs, &self.book.adts);
for err in errs {
match err {
FixMatchErr::AdtMismatch { .. } | FixMatchErr::NonExhaustiveMatch { .. } => {
self.info.add_function_error(err, def.name.clone(), def.source.clone())
}
FixMatchErr::IrrefutableMatch { .. } => self.info.add_function_warning(
err,
WarningType::IrrefutableMatch,
def.name.clone(),
def.source.clone(),
),
FixMatchErr::UnreachableMatchArms { .. } => self.info.add_function_warning(
err,
WarningType::UnreachableMatch,
def.name.clone(),
def.source.clone(),
),
FixMatchErr::RedundantArm { .. } => self.info.add_function_warning(
err,
WarningType::RedundantMatch,
def.name.clone(),
def.source.clone(),
),
}
}
}
}
self.info.fatal(())
}
}
impl Term {
fn fix_match_terms(&mut self, ctrs: &Constructors, adts: &Adts) -> Vec<FixMatchErr> {
maybe_grow(|| {
let mut errs = Vec::new();
for child in self.children_mut() {
let mut e = child.fix_match_terms(ctrs, adts);
errs.append(&mut e);
}
if matches!(self, Term::Mat { .. } | Term::Fold { .. }) {
self.fix_match(&mut errs, ctrs, adts);
}
match self {
Term::Def { def, nxt } => {
for rule in def.rules.iter_mut() {
errs.extend(rule.body.fix_match_terms(ctrs, adts));
}
errs.extend(nxt.fix_match_terms(ctrs, adts));
}
// Add a use term to each arm rebuilding the matched variable
Term::Mat { arg: _, bnd, with_bnd: _, with_arg: _, arms }
| Term::Fold { bnd, arg: _, with_bnd: _, with_arg: _, arms } => {
for (ctr, fields, body) in arms {
if let Some(ctr) = ctr {
*body = Term::Use {
nam: bnd.clone(),
val: Box::new(Term::call(
Term::Ref { nam: ctr.clone() },
fields.iter().flatten().cloned().map(|nam| Term::Var { nam }),
)),
nxt: Box::new(std::mem::take(body)),
};
}
}
}
Term::Swt { arg: _, bnd, with_bnd: _, with_arg: _, pred, arms } => {
let n_nums = arms.len() - 1;
for (i, arm) in arms.iter_mut().enumerate() {
let orig = if i == n_nums {
Term::add_num(Term::Var { nam: pred.clone().unwrap() }, Num::U24(i as u32))
} else {
Term::Num { val: Num::U24(i as u32) }
};
*arm = Term::Use { nam: bnd.clone(), val: Box::new(orig), nxt: Box::new(std::mem::take(arm)) };
}
}
_ => {}
}
// Remove the bound name
match self {
Term::Mat { bnd, .. } | Term::Swt { bnd, .. } | Term::Fold { bnd, .. } => *bnd = None,
_ => {}
}
errs
})
}
fn fix_match(&mut self, errs: &mut Vec<FixMatchErr>, ctrs: &Constructors, adts: &Adts) {
let (Term::Mat { bnd, arg, with_bnd, with_arg, arms }
| Term::Fold { bnd, arg, with_bnd, with_arg, arms }) = self
else {
unreachable!()
};
let bnd = bnd.clone().unwrap();
// Normalize arms, making one arm for each constructor of the matched adt.
if let Some(ctr_nam) = &arms[0].0 {
if let Some(adt_nam) = ctrs.get(ctr_nam) {
// First arm matches a constructor as expected, so we can normalize the arms.
let adt_ctrs = &adts[adt_nam].ctrs;
// Decide which constructor corresponds to which arm of the match.
let mut bodies = fixed_match_arms(&bnd, arms, adt_nam, adt_ctrs.keys(), ctrs, adts, errs);
// Build the match arms, with all constructors
let mut new_rules = vec![];
for (ctr_nam, ctr) in adt_ctrs.iter() {
let fields = ctr.fields.iter().map(|f| Some(match_field(&bnd, &f.nam))).collect::<Vec<_>>();
let body = if let Some(Some(body)) = bodies.remove(ctr_nam) {
body
} else {
errs.push(FixMatchErr::NonExhaustiveMatch { typ: adt_nam.clone(), missing: ctr_nam.clone() });
Term::Err
};
new_rules.push((Some(ctr_nam.clone()), fields, body));
}
*arms = new_rules;
return;
}
}
// First arm was not matching a constructor, irrefutable match, convert into a use term.
errs.push(FixMatchErr::IrrefutableMatch { var: arms[0].0.clone() });
let match_var = arms[0].0.take();
let arg = std::mem::take(arg);
let with_bnd = std::mem::take(with_bnd);
let with_arg = std::mem::take(with_arg);
// Replaces `self` by its irrefutable arm
*self = std::mem::take(&mut arms[0].2);
// `with` clause desugaring
// Performs the same as `Term::linearize_match_with`.
// Note that it only wraps the arm with function calls if `with_bnd` and `with_arg` aren't empty.
*self = Term::rfold_lams(std::mem::take(self), with_bnd.into_iter());
*self = Term::call(std::mem::take(self), with_arg);
if let Some(var) = match_var {
*self = Term::Use {
nam: Some(bnd.clone()),
val: arg,
nxt: Box::new(Term::Use {
nam: Some(var),
val: Box::new(Term::Var { nam: bnd }),
nxt: Box::new(std::mem::take(self)),
}),
}
}
}
}
/// Given the rules of a match term, return the bodies that match
/// each of the constructors of the matched ADT.
///
/// If no rules match a certain constructor, return None in the map,
/// indicating a non-exhaustive match.
fn fixed_match_arms<'a>(
bnd: &Name,
rules: &mut Vec<MatchRule>,
adt_nam: &Name,
adt_ctrs: impl Iterator<Item = &'a Name>,
ctrs: &Constructors,
adts: &Adts,
errs: &mut Vec<FixMatchErr>,
) -> HashMap<&'a Name, Option<Term>> {
let mut bodies = HashMap::<&Name, Option<Term>>::from_iter(adt_ctrs.map(|ctr| (ctr, None)));
for rule_idx in 0..rules.len() {
// If Ctr arm, use the body of this rule for this constructor.
if let Some(ctr_nam) = &rules[rule_idx].0 {
if let Some(found_adt) = ctrs.get(ctr_nam) {
if found_adt == adt_nam {
let body = bodies.get_mut(ctr_nam).unwrap();
if body.is_none() {
// Use this rule for this constructor
*body = Some(rules[rule_idx].2.clone());
} else {
errs.push(FixMatchErr::RedundantArm { ctr: ctr_nam.clone() });
}
} else {
errs.push(FixMatchErr::AdtMismatch {
expected: adt_nam.clone(),
found: found_adt.clone(),
ctr: ctr_nam.clone(),
})
}
continue;
}
}
// Otherwise, Var arm, use the body of this rule for all non-covered constructors.
for (ctr, body) in bodies.iter_mut() {
if body.is_none() {
let mut new_body = rules[rule_idx].2.clone();
if let Some(var) = &rules[rule_idx].0 {
new_body = Term::Use {
nam: Some(var.clone()),
val: Box::new(rebuild_ctr(bnd, ctr, &adts[adt_nam].ctrs[&**ctr].fields)),
nxt: Box::new(new_body),
};
}
*body = Some(new_body);
}
}
if rule_idx != rules.len() - 1 {
errs.push(FixMatchErr::UnreachableMatchArms { var: rules[rule_idx].0.clone() });
rules.truncate(rule_idx + 1);
}
break;
}
bodies
}
fn match_field(arg: &Name, field: &Name) -> Name {
Name::new(format!("{arg}.{field}"))
}
fn rebuild_ctr(arg: &Name, ctr: &Name, fields: &[CtrField]) -> Term {
let ctr = Term::Ref { nam: ctr.clone() };
let fields = fields.iter().map(|f| Term::Var { nam: match_field(arg, &f.nam) });
Term::call(ctr, fields)
}
impl std::fmt::Display for FixMatchErr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
FixMatchErr::AdtMismatch { expected, found, ctr } => write!(
f,
"Type mismatch in 'match' expression: Expected a constructor of type '{expected}', found '{ctr}' of type '{found}'"
),
FixMatchErr::NonExhaustiveMatch { typ, missing } => {
write!(f, "Non-exhaustive 'match' expression of type '{typ}'. Case '{missing}' not covered.")
}
FixMatchErr::IrrefutableMatch { var } => {
writeln!(
f,
"Irrefutable 'match' expression. All cases after variable pattern '{}' will be ignored.",
var.as_ref().unwrap_or(&Name::new("*")),
)?;
writeln!(
f,
"{:ERR_INDENT_SIZE$}Note that to use a 'match' expression, the matched constructors need to be defined in a 'data' definition.",
"",
)?;
write!(
f,
"{:ERR_INDENT_SIZE$}If this is not a mistake, consider using a 'let' expression instead.",
""
)
}
FixMatchErr::UnreachableMatchArms { var } => write!(
f,
"Unreachable arms in 'match' expression. All cases after '{}' will be ignored.",
var.as_ref().unwrap_or(&Name::new("*"))
),
FixMatchErr::RedundantArm { ctr } => {
write!(f, "Redundant arm in 'match' expression. Case '{ctr}' appears more than once.")
}
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/desugar_match_defs.rs | src/fun/transform/desugar_match_defs.rs | use crate::{
diagnostics::{Diagnostics, WarningType},
fun::{builtins, Adts, Constructors, Ctx, Definition, FanKind, Name, Num, Pattern, Rule, Tag, Term},
maybe_grow,
};
use itertools::Itertools;
use std::collections::{BTreeSet, HashSet};
pub enum DesugarMatchDefErr {
AdtNotExhaustive { adt: Name, ctr: Name },
NumMissingDefault,
TypeMismatch { expected: Type, found: Type, pat: Pattern },
RepeatedBind { bind: Name },
UnreachableRule { idx: usize, nam: Name, pats: Vec<Pattern> },
}
impl Ctx<'_> {
/// Converts equational-style pattern matching function definitions into trees of match terms.
pub fn desugar_match_defs(&mut self) -> Result<(), Diagnostics> {
for (def_name, def) in self.book.defs.iter_mut() {
let errs = def.desugar_match_def(&self.book.ctrs, &self.book.adts);
for err in errs {
match err {
DesugarMatchDefErr::AdtNotExhaustive { .. }
| DesugarMatchDefErr::NumMissingDefault
| DesugarMatchDefErr::TypeMismatch { .. } => {
self.info.add_function_error(err, def_name.clone(), def.source.clone())
}
DesugarMatchDefErr::RepeatedBind { .. } => self.info.add_function_warning(
err,
WarningType::RepeatedBind,
def_name.clone(),
def.source.clone(),
),
DesugarMatchDefErr::UnreachableRule { .. } => self.info.add_function_warning(
err,
WarningType::UnreachableMatch,
def_name.clone(),
def.source.clone(),
),
}
}
}
self.info.fatal(())
}
}
impl Definition {
pub fn desugar_match_def(&mut self, ctrs: &Constructors, adts: &Adts) -> Vec<DesugarMatchDefErr> {
let mut errs = vec![];
for rule in self.rules.iter_mut() {
desugar_inner_match_defs(&mut rule.body, ctrs, adts, &mut errs);
}
let repeated_bind_errs = fix_repeated_binds(&mut self.rules);
errs.extend(repeated_bind_errs);
let args = (0..self.arity()).map(|i| Name::new(format!("%arg{i}"))).collect::<Vec<_>>();
let rules = std::mem::take(&mut self.rules);
let idx = (0..rules.len()).collect::<Vec<_>>();
let mut used = BTreeSet::new();
match simplify_rule_match(args.clone(), rules.clone(), idx.clone(), vec![], &mut used, ctrs, adts) {
Ok(body) => {
let body = Term::rfold_lams(body, args.into_iter().map(Some));
self.rules = vec![Rule { pats: vec![], body }];
for i in idx {
if !used.contains(&i) {
let e = DesugarMatchDefErr::UnreachableRule {
idx: i,
nam: self.name.clone(),
pats: rules[i].pats.clone(),
};
errs.push(e);
}
}
}
Err(e) => errs.push(e),
}
errs
}
}
fn desugar_inner_match_defs(
term: &mut Term,
ctrs: &Constructors,
adts: &Adts,
errs: &mut Vec<DesugarMatchDefErr>,
) {
maybe_grow(|| match term {
Term::Def { def, nxt } => {
errs.extend(def.desugar_match_def(ctrs, adts));
desugar_inner_match_defs(nxt, ctrs, adts, errs);
}
_ => {
for child in term.children_mut() {
desugar_inner_match_defs(child, ctrs, adts, errs);
}
}
})
}
/// When a rule has repeated bind, the only one that is actually useful is the last one.
///
/// Example: In `(Foo x x x x) = x`, the function should return the fourth argument.
///
/// To avoid having to deal with this, we can just erase any repeated binds.
/// ```hvm
/// (Foo a (Succ a) (Cons a)) = (a a)
/// // After this transformation, becomes:
/// (Foo * (Succ *) (Cons a)) = (a a)
/// ```
fn fix_repeated_binds(rules: &mut [Rule]) -> Vec<DesugarMatchDefErr> {
let mut errs = vec![];
for rule in rules {
let mut binds = HashSet::new();
rule.pats.iter_mut().flat_map(|p| p.binds_mut()).rev().for_each(|nam| {
if binds.contains(nam) {
// Repeated bind, not reachable and can be erased.
if let Some(nam) = nam {
errs.push(DesugarMatchDefErr::RepeatedBind { bind: nam.clone() });
}
*nam = None;
// TODO: Send a repeated bind warning
} else {
binds.insert(&*nam);
}
});
}
errs
}
/// Creates the match tree for a given pattern matching function definition.
/// For each constructor, a match case is created.
///
/// The match cases follow the same order as the order the constructors are in
/// the ADT declaration.
///
/// If there are constructors of different types for the same arg, returns a type error.
///
/// If no patterns match one of the constructors, returns a non-exhaustive match error.
///
/// Any nested subpatterns are extracted and moved into a nested match
/// expression, together with the remaining match arguments.
///
/// Linearizes all the arguments that are used in at least one of the bodies.
///
/// `args`: Name of the generated argument variables that sill have to be processed.
/// `rules`: The rules to simplify.
/// `idx`: The original index of the rules, to check for unreachable rules.
/// `with`: Name of the variables to be inserted in the `with` clauses.
fn simplify_rule_match(
args: Vec<Name>,
rules: Vec<Rule>,
idx: Vec<usize>,
with: Vec<Name>,
used: &mut BTreeSet<usize>,
ctrs: &Constructors,
adts: &Adts,
) -> Result<Term, DesugarMatchDefErr> {
if args.is_empty() {
used.insert(idx[0]);
Ok(rules.into_iter().next().unwrap().body)
} else if rules[0].pats.iter().all(|p| p.is_wildcard()) {
Ok(irrefutable_fst_row_rule(args, rules.into_iter().next().unwrap(), idx[0], used))
} else {
let typ = Type::infer_from_def_arg(&rules, 0, ctrs)?;
match typ {
Type::Any => var_rule(args, rules, idx, with, used, ctrs, adts),
Type::Fan(fan, tag, tup_len) => fan_rule(args, rules, idx, with, used, fan, tag, tup_len, ctrs, adts),
Type::Num => num_rule(args, rules, idx, with, used, ctrs, adts),
Type::Adt(adt_name) => switch_rule(args, rules, idx, with, adt_name, used, ctrs, adts),
}
}
}
/// Irrefutable first row rule.
/// Short-circuits the encoding in case the first rule always matches.
/// This is useful to avoid unnecessary pattern matching.
fn irrefutable_fst_row_rule(args: Vec<Name>, rule: Rule, idx: usize, used: &mut BTreeSet<usize>) -> Term {
let mut term = rule.body;
for (arg, pat) in args.into_iter().zip(rule.pats.into_iter()) {
match pat {
Pattern::Var(None) => {}
Pattern::Var(Some(var)) => {
term = Term::Use { nam: Some(var), val: Box::new(Term::Var { nam: arg }), nxt: Box::new(term) };
}
Pattern::Chn(var) => {
term = Term::Let {
pat: Box::new(Pattern::Chn(var)),
val: Box::new(Term::Var { nam: arg }),
nxt: Box::new(term),
};
}
_ => unreachable!(),
}
}
used.insert(idx);
term
}
/// Var rule.
/// `case x0 ... xN { var p1 ... pN: (Body var p1 ... pN) }`
/// becomes
/// `case x1 ... xN { p1 ... pN: use var = x0; (Body var p1 ... pN) }`
fn var_rule(
mut args: Vec<Name>,
rules: Vec<Rule>,
idx: Vec<usize>,
mut with: Vec<Name>,
used: &mut BTreeSet<usize>,
ctrs: &Constructors,
adts: &Adts,
) -> Result<Term, DesugarMatchDefErr> {
let arg = args[0].clone();
let new_args = args.split_off(1);
let mut new_rules = vec![];
for mut rule in rules {
let new_pats = rule.pats.split_off(1);
let pat = rule.pats.pop().unwrap();
if let Pattern::Var(Some(nam)) = &pat {
rule.body = Term::Use {
nam: Some(nam.clone()),
val: Box::new(Term::Var { nam: arg.clone() }),
nxt: Box::new(std::mem::take(&mut rule.body)),
};
}
let new_rule = Rule { pats: new_pats, body: rule.body };
new_rules.push(new_rule);
}
with.push(arg);
simplify_rule_match(new_args, new_rules, idx, with, used, ctrs, adts)
}
/// Tuple rule.
/// ```hvm
/// case x0 ... xN {
/// (p0_0, ... p0_M) p1 ... pN:
/// (Body p0_0 ... p0_M p1 ... pN)
/// }
/// ```
/// becomes
/// ```hvm
/// let (x0.0, ... x0.M) = x0;
/// case x0.0 ... x0.M x1 ... xN {
/// p0_0 ... p0_M p1 ... pN:
/// (Body p0_0 ... p0_M p1 ... pN)
/// }
/// ```
#[allow(clippy::too_many_arguments)]
fn fan_rule(
mut args: Vec<Name>,
rules: Vec<Rule>,
idx: Vec<usize>,
with: Vec<Name>,
used: &mut BTreeSet<usize>,
fan: FanKind,
tag: Tag,
len: usize,
ctrs: &Constructors,
adts: &Adts,
) -> Result<Term, DesugarMatchDefErr> {
let arg = args[0].clone();
let old_args = args.split_off(1);
let new_args = (0..len).map(|i| Name::new(format!("{arg}.{i}")));
let mut new_rules = vec![];
for mut rule in rules {
let pat = rule.pats[0].clone();
let old_pats = rule.pats.split_off(1);
// Extract subpatterns from the tuple pattern
let mut new_pats = match pat {
Pattern::Fan(.., sub_pats) => sub_pats,
Pattern::Var(var) => {
if let Some(var) = var {
// Rebuild the tuple if it was a var pattern
let tup =
Term::Fan { fan, tag: tag.clone(), els: new_args.clone().map(|nam| Term::Var { nam }).collect() };
rule.body =
Term::Use { nam: Some(var), val: Box::new(tup), nxt: Box::new(std::mem::take(&mut rule.body)) };
}
new_args.clone().map(|nam| Pattern::Var(Some(nam))).collect()
}
_ => unreachable!(),
};
new_pats.extend(old_pats);
let new_rule = Rule { pats: new_pats, body: rule.body };
new_rules.push(new_rule);
}
let bnd = new_args.clone().map(|x| Pattern::Var(Some(x))).collect();
let args = new_args.chain(old_args).collect();
let nxt = simplify_rule_match(args, new_rules, idx, with, used, ctrs, adts)?;
let term = Term::Let {
pat: Box::new(Pattern::Fan(fan, tag.clone(), bnd)),
val: Box::new(Term::Var { nam: arg }),
nxt: Box::new(nxt),
};
Ok(term)
}
fn num_rule(
mut args: Vec<Name>,
rules: Vec<Rule>,
idx: Vec<usize>,
with: Vec<Name>,
used: &mut BTreeSet<usize>,
ctrs: &Constructors,
adts: &Adts,
) -> Result<Term, DesugarMatchDefErr> {
// Number match must always have a default case
if !rules.iter().any(|r| r.pats[0].is_wildcard()) {
return Err(DesugarMatchDefErr::NumMissingDefault);
}
let arg = args[0].clone();
let args = args.split_off(1);
let pred_var = Name::new(format!("{arg}-1"));
// Since numbers have infinite (2^60) constructors, they require special treatment.
// We first iterate over each present number then get the default.
let nums = rules
.iter()
.filter_map(|r| if let Pattern::Num(n) = r.pats[0] { Some(n) } else { None })
.collect::<BTreeSet<_>>()
.into_iter()
.collect::<Vec<_>>();
// Number cases
let mut num_bodies = vec![];
for num in nums.iter() {
let mut new_rules = vec![];
let mut new_idx = vec![];
for (rule, &idx) in rules.iter().zip(&idx) {
match &rule.pats[0] {
Pattern::Num(n) if n == num => {
let body = rule.body.clone();
let rule = Rule { pats: rule.pats[1..].to_vec(), body };
new_rules.push(rule);
new_idx.push(idx);
}
Pattern::Var(var) => {
let mut body = rule.body.clone();
if let Some(var) = var {
body = Term::Use {
nam: Some(var.clone()),
val: Box::new(Term::Num { val: Num::U24(*num) }),
nxt: Box::new(std::mem::take(&mut body)),
};
}
let rule = Rule { pats: rule.pats[1..].to_vec(), body };
new_rules.push(rule);
new_idx.push(idx);
}
_ => (),
}
}
let body = simplify_rule_match(args.clone(), new_rules, new_idx, with.clone(), used, ctrs, adts)?;
num_bodies.push(body);
}
// Default case
let mut new_rules = vec![];
let mut new_idx = vec![];
for (rule, &idx) in rules.into_iter().zip(&idx) {
if let Pattern::Var(var) = &rule.pats[0] {
let mut body = rule.body.clone();
if let Some(var) = var {
let last_num = *nums.last().unwrap();
let cur_num = 1 + last_num;
let var_recovered = Term::add_num(Term::Var { nam: pred_var.clone() }, Num::U24(cur_num));
body = Term::Use { nam: Some(var.clone()), val: Box::new(var_recovered), nxt: Box::new(body) };
fast_pred_access(&mut body, cur_num, var, &pred_var);
}
let rule = Rule { pats: rule.pats[1..].to_vec(), body };
new_rules.push(rule);
new_idx.push(idx);
}
}
let mut default_with = with.clone();
default_with.push(pred_var.clone());
let default_body = simplify_rule_match(args.clone(), new_rules, new_idx, default_with, used, ctrs, adts)?;
// Linearize previously matched vars and current args.
let with = with.into_iter().chain(args).collect::<Vec<_>>();
let with_bnd = with.iter().cloned().map(Some).collect::<Vec<_>>();
let with_arg = with.iter().cloned().map(|nam| Term::Var { nam }).collect::<Vec<_>>();
let term = num_bodies.into_iter().enumerate().rfold(default_body, |term, (i, body)| {
let val = if i > 0 {
// switch arg = (pred +1 +num_i-1 - num_i) { 0: body_i; _: acc }
// nums[i] >= nums[i-1]+1, so we do a sub here.
Term::sub_num(Term::Var { nam: pred_var.clone() }, Num::U24(nums[i] - 1 - nums[i - 1]))
} else {
// switch arg = (arg -num_0) { 0: body_0; _: acc}
Term::sub_num(Term::Var { nam: arg.clone() }, Num::U24(nums[i]))
};
Term::Swt {
arg: Box::new(val),
bnd: Some(arg.clone()),
with_bnd: with_bnd.clone(),
with_arg: with_arg.clone(),
pred: Some(pred_var.clone()),
arms: vec![body, term],
}
});
Ok(term)
}
/// Replaces `body` to `pred_var` if the term is a operation that subtracts the given var by the current
/// switch number.
fn fast_pred_access(body: &mut Term, cur_num: u32, var: &Name, pred_var: &Name) {
maybe_grow(|| {
if let Term::Oper { opr: crate::fun::Op::SUB, fst, snd } = body {
if let Term::Num { val: crate::fun::Num::U24(val) } = &**snd {
if let Term::Var { nam } = &**fst {
if nam == var && *val == cur_num {
*body = Term::Var { nam: pred_var.clone() };
}
}
}
}
for child in body.children_mut() {
fast_pred_access(child, cur_num, var, pred_var)
}
})
}
/// When the first column has constructors, create a branch on the constructors
/// of the first arg.
///
/// The extracted nested patterns and remaining args are handled recursively in
/// a nested expression for each match arm.
///
/// If we imagine a complex match expression representing what's left of the
/// encoding of a pattern matching function:
/// ```hvm
/// data MyType = (CtrA ctrA_field0 ... ctrA_fieldA) | (CtrB ctrB_field0 ... ctrB_fieldB) | CtrC | ...
///
/// case x0 ... xN {
/// (CtrA p0_0_0 ... p0_A) p0_1 ... p0_N : (Body0 p0_0_0 ... p0_0_A p0_1 ... p0_N)
/// ...
/// varI pI_1 ... pI_N: (BodyI varI pI_1 ... pI_N)
/// ...
/// (CtrB pJ_0_0 ... pJ_0_B) pJ_1 ... pJ_N: (BodyJ pJ_0_0 ... pJ_0_B pJ_1 ... pJ_N)
/// ...
/// (CtrC) pK_1 ... pK_N: (BodyK p_1 ... pK_N)
/// ...
/// }
/// ```
/// is converted into
/// ```hvm
/// match x0 {
/// CtrA: case x.ctrA_field0 ... x.ctrA_fieldA x1 ... xN {
/// p0_0_0 ... p0_0_B p0_1 ... p0_N :
/// (Body0 p0_0_0 ... p0_0_B )
/// x.ctrA_field0 ... x.ctrA_fieldA pI_1 ... pI_N:
/// use varI = (CtrA x.ctrA_field0 ... x.ctrA_fieldN); (BodyI varI pI_1 ... pI_N)
/// ...
/// }
/// CtrB: case x.ctrB_field0 ... x.ctrB_fieldB x1 ... xN {
/// x.ctrB_field0 ... x.ctrB_fieldB pI_1 ... pI_N:
/// use varI = (CtrB x.ctrB_field0 ... x.ctrB_fieldB); (BodyI varI pI_1 ... pI_N)
/// pJ_0_0 ... pJ_0_B pJ_1 ... pJ_N :
/// (BodyJ pJ_0_0 ... pJ_0_B pJ_1 ... pJ_N)
/// ...
/// }
/// CtrC: case * x1 ... xN {
/// * pI_1 ... pI_N:
/// use varI = CtrC; (BodyI varI pI_1 ... pI_N)
/// * pK_1 ... pK_N:
/// (BodyK p_1 ... pK_N)
/// ...
/// }
/// ...
/// }
/// ```
/// Where `case` represents a call of the [`simplify_rule_match`] function.
#[allow(clippy::too_many_arguments)]
fn switch_rule(
mut args: Vec<Name>,
rules: Vec<Rule>,
idx: Vec<usize>,
with: Vec<Name>,
adt_name: Name,
used: &mut BTreeSet<usize>,
ctrs: &Constructors,
adts: &Adts,
) -> Result<Term, DesugarMatchDefErr> {
let arg = args[0].clone();
let old_args = args.split_off(1);
let mut new_arms = vec![];
for (ctr_nam, ctr) in &adts[&adt_name].ctrs {
let new_args = ctr.fields.iter().map(|f| Name::new(format!("{}.{}", arg, f.nam)));
let args = new_args.clone().chain(old_args.clone()).collect();
let mut new_rules = vec![];
let mut new_idx = vec![];
for (rule, &idx) in rules.iter().zip(&idx) {
let old_pats = rule.pats[1..].to_vec();
match &rule.pats[0] {
// Same ctr, extract subpatterns.
// (Ctr pat0_0 ... pat0_m) pat1 ... patN: body
// becomes
// pat0_0 ... pat0_m pat1 ... patN: body
Pattern::Ctr(found_ctr, new_pats) if ctr_nam == found_ctr => {
let pats = new_pats.iter().cloned().chain(old_pats).collect();
let body = rule.body.clone();
let rule = Rule { pats, body };
new_rules.push(rule);
new_idx.push(idx);
}
// Var, match and rebuild the constructor.
// var pat1 ... patN: body
// becomes
// arg0.field0 ... arg0.fieldM pat1 ... patN:
// use var = (Ctr arg0.field0 ... arg0.fieldM); body
Pattern::Var(var) => {
let new_pats = new_args.clone().map(|n| Pattern::Var(Some(n)));
let pats = new_pats.chain(old_pats.clone()).collect();
let mut body = rule.body.clone();
let reconstructed_var =
Term::call(Term::Ref { nam: ctr_nam.clone() }, new_args.clone().map(|nam| Term::Var { nam }));
if let Some(var) = var {
body =
Term::Use { nam: Some(var.clone()), val: Box::new(reconstructed_var), nxt: Box::new(body) };
}
let rule = Rule { pats, body };
new_rules.push(rule);
new_idx.push(idx);
}
_ => (),
}
}
if new_rules.is_empty() {
return Err(DesugarMatchDefErr::AdtNotExhaustive { adt: adt_name, ctr: ctr_nam.clone() });
}
let body = simplify_rule_match(args, new_rules, new_idx, with.clone(), used, ctrs, adts)?;
new_arms.push((Some(ctr_nam.clone()), new_args.map(Some).collect(), body));
}
// Linearize previously matched vars and current args.
let with = with.into_iter().chain(old_args).collect::<Vec<_>>();
let with_bnd = with.iter().cloned().map(Some).collect::<Vec<_>>();
let with_arg = with.iter().cloned().map(|nam| Term::Var { nam }).collect::<Vec<_>>();
let term = Term::Mat {
arg: Box::new(Term::Var { nam: arg.clone() }),
bnd: Some(arg.clone()),
with_bnd,
with_arg,
arms: new_arms,
};
Ok(term)
}
/// Pattern types.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Type {
/// Variables/wildcards.
Any,
/// A native tuple.
Fan(FanKind, Tag, usize),
/// A sequence of arbitrary numbers ending in a variable.
Num,
/// Adt constructors declared with the `data` syntax.
Adt(Name),
}
impl Type {
// Infers the type of a column of a pattern matrix, from the rules of a function def.
fn infer_from_def_arg(
rules: &[Rule],
arg_idx: usize,
ctrs: &Constructors,
) -> Result<Type, DesugarMatchDefErr> {
let pats = rules.iter().map(|r| &r.pats[arg_idx]);
let mut arg_type = Type::Any;
for pat in pats {
arg_type = match (arg_type, pat.to_type(ctrs)) {
(Type::Any, found) => found,
(expected, Type::Any) => expected,
(expected, found) if expected == found => expected,
(expected, found) => {
return Err(DesugarMatchDefErr::TypeMismatch { expected, found, pat: pat.clone() });
}
};
}
Ok(arg_type)
}
}
impl Pattern {
fn to_type(&self, ctrs: &Constructors) -> Type {
match self {
Pattern::Var(_) | Pattern::Chn(_) => Type::Any,
Pattern::Ctr(ctr_nam, _) => {
let adt_nam = ctrs.get(ctr_nam).unwrap_or_else(|| panic!("Unknown constructor '{ctr_nam}'"));
Type::Adt(adt_nam.clone())
}
Pattern::Fan(is_tup, tag, args) => Type::Fan(*is_tup, tag.clone(), args.len()),
Pattern::Num(_) => Type::Num,
Pattern::Lst(..) => Type::Adt(Name::new(builtins::LIST)),
Pattern::Str(..) => Type::Adt(Name::new(builtins::STRING)),
}
}
}
impl std::fmt::Display for Type {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Type::Any => write!(f, "any"),
Type::Fan(FanKind::Tup, tag, n) => write!(f, "{}{n}-tuple", tag.display_padded()),
Type::Fan(FanKind::Dup, tag, n) => write!(f, "{}{n}-dup", tag.display_padded()),
Type::Num => write!(f, "number"),
Type::Adt(nam) => write!(f, "{nam}"),
}
}
}
impl std::fmt::Display for DesugarMatchDefErr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
DesugarMatchDefErr::AdtNotExhaustive { adt, ctr } => {
write!(f, "Non-exhaustive pattern matching rule. Constructor '{ctr}' of type '{adt}' not covered")
}
DesugarMatchDefErr::TypeMismatch { expected, found, pat } => {
write!(
f,
"Type mismatch in pattern matching rule. Expected a constructor of type '{}', found '{}' with type '{}'.",
expected, pat, found
)
}
DesugarMatchDefErr::NumMissingDefault => {
write!(f, "Non-exhaustive pattern matching rule. Default case of number type not covered.")
}
DesugarMatchDefErr::RepeatedBind { bind } => {
write!(f, "Repeated bind in pattern matching rule: '{bind}'.")
}
DesugarMatchDefErr::UnreachableRule { idx, nam, pats } => {
write!(
f,
"Unreachable pattern matching rule '({}{})' (rule index {idx}).",
nam,
pats.iter().map(|p| format!(" {p}")).join("")
)
}
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/encode_match_terms.rs | src/fun/transform/encode_match_terms.rs | use crate::{
fun::{Book, MatchRule, Name, Pattern, Term},
maybe_grow, AdtEncoding,
};
impl Book {
/// Encodes pattern matching expressions in the book into their
/// core form. Must be run after [`Ctr::fix_match_terms`].
///
/// ADT matches are encoded based on `adt_encoding`.
///
/// Num matches are encoded as a sequence of native num matches (on 0 and 1+).
pub fn encode_matches(&mut self, adt_encoding: AdtEncoding) {
for def in self.defs.values_mut() {
for rule in &mut def.rules {
rule.body.encode_matches(adt_encoding);
}
}
}
}
impl Term {
pub fn encode_matches(&mut self, adt_encoding: AdtEncoding) {
maybe_grow(|| {
for child in self.children_mut() {
child.encode_matches(adt_encoding)
}
if let Term::Mat { arg, bnd: _, with_bnd, with_arg, arms } = self {
assert!(with_bnd.is_empty());
assert!(with_arg.is_empty());
let arg = std::mem::take(arg.as_mut());
let rules = std::mem::take(arms);
*self = encode_match(arg, rules, adt_encoding);
} else if let Term::Swt { arg, bnd: _, with_bnd, with_arg, pred, arms } = self {
assert!(with_bnd.is_empty());
assert!(with_arg.is_empty());
let arg = std::mem::take(arg.as_mut());
let pred = std::mem::take(pred);
let rules = std::mem::take(arms);
*self = encode_switch(arg, pred, rules);
}
})
}
}
fn encode_match(arg: Term, rules: Vec<MatchRule>, adt_encoding: AdtEncoding) -> Term {
match adt_encoding {
AdtEncoding::Scott => {
let arms = rules.into_iter().map(|rule| Term::rfold_lams(rule.2, rule.1.into_iter()));
Term::call(arg, arms)
}
AdtEncoding::NumScott => {
fn make_switches(arms: &mut [Term]) -> Term {
maybe_grow(|| match arms {
[] => Term::Err,
[arm] => Term::lam(Pattern::Var(None), std::mem::take(arm)),
[arm, rest @ ..] => Term::lam(
Pattern::Var(Some(Name::new("%tag"))),
Term::Swt {
arg: Box::new(Term::Var { nam: Name::new("%tag") }),
bnd: None,
with_bnd: vec![],
with_arg: vec![],
pred: None,
arms: vec![std::mem::take(arm), make_switches(rest)],
},
),
})
}
let mut arms =
rules.into_iter().map(|rule| Term::rfold_lams(rule.2, rule.1.into_iter())).collect::<Vec<_>>();
let term = if arms.len() == 1 {
// λx (x λtag switch tag {0: Ctr0; _: * })
let arm = arms.pop().unwrap();
let term = Term::Swt {
arg: Box::new(Term::Var { nam: Name::new("%tag") }),
bnd: None,
with_bnd: vec![],
with_arg: vec![],
pred: None,
arms: vec![arm, Term::Era],
};
Term::lam(Pattern::Var(Some(Name::new("%tag"))), term)
} else {
// λx (x λtag switch tag {0: Ctr0; _: switch tag-1 { ... } })
make_switches(arms.as_mut_slice())
};
Term::call(arg, [term])
}
}
}
/// Convert into a sequence of native switches, decrementing by 1 each switch.
/// switch n {0: A; 1: B; _: (C n-2)} converted to
/// switch n {0: A; _: @%x match %x {0: B; _: @n-2 (C n-2)}}
fn encode_switch(arg: Term, pred: Option<Name>, mut rules: Vec<Term>) -> Term {
// Create the cascade of switches
let match_var = Name::new("%x");
let (succ, nums) = rules.split_last_mut().unwrap();
let last_arm = Term::lam(Pattern::Var(pred), std::mem::take(succ));
nums.iter_mut().enumerate().rfold(last_arm, |term, (i, rule)| {
let arms = vec![std::mem::take(rule), term];
if i == 0 {
Term::Swt {
arg: Box::new(arg.clone()),
bnd: None,
with_bnd: vec![],
with_arg: vec![],
pred: None,
arms,
}
} else {
let swt = Term::Swt {
arg: Box::new(Term::Var { nam: match_var.clone() }),
bnd: None,
with_bnd: vec![],
with_arg: vec![],
pred: None,
arms,
};
Term::lam(Pattern::Var(Some(match_var.clone())), swt)
}
})
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/resolve_refs.rs | src/fun/transform/resolve_refs.rs | use crate::{
diagnostics::Diagnostics,
fun::{Ctx, Name, Pattern, Term},
maybe_grow,
};
use std::collections::{HashMap, HashSet};
#[derive(Debug, Clone)]
pub struct ReferencedMainErr;
impl Ctx<'_> {
/// Decides if names inside a term belong to a Var or to a Ref.
/// Converts `Term::Var(nam)` into `Term::Ref(nam)` when the name
/// refers to a function definition and there is no variable in
/// scope shadowing that definition.
///
/// Precondition: Refs are encoded as vars, Constructors are resolved.
///
/// Postcondition: Refs are encoded as refs, with the correct def id.
pub fn resolve_refs(&mut self) -> Result<(), Diagnostics> {
let def_names =
self.book.defs.keys().cloned().chain(self.book.hvm_defs.keys().cloned()).collect::<HashSet<_>>();
for (def_name, def) in &mut self.book.defs {
for rule in def.rules.iter_mut() {
let mut scope = HashMap::new();
for name in rule.pats.iter().flat_map(Pattern::binds) {
push_scope(name.as_ref(), &mut scope);
}
let res =
rule.body.resolve_refs(&def_names, self.book.entrypoint.as_ref(), &mut scope, &mut self.info);
self.info.take_rule_err(res, def_name.clone());
}
}
self.info.fatal(())
}
}
impl Term {
pub fn resolve_refs<'a>(
&'a mut self,
def_names: &HashSet<Name>,
main: Option<&Name>,
scope: &mut HashMap<&'a Name, usize>,
info: &mut Diagnostics,
) -> Result<(), String> {
maybe_grow(move || {
match self {
Term::Var { nam } => {
if is_var_in_scope(nam, scope) {
// If the variable is actually a reference to main, don't swap and return an error.
if let Some(main) = main {
if nam == main {
return Err("Main definition can't be referenced inside the program.".to_string());
}
}
// If the variable is actually a reference to a function, swap the term.
if def_names.contains(nam) {
*self = Term::r#ref(nam);
}
}
}
Term::Def { def, nxt } => {
for rule in def.rules.iter_mut() {
let mut scope = HashMap::new();
for name in rule.pats.iter().flat_map(Pattern::binds) {
push_scope(name.as_ref(), &mut scope);
}
let res = rule.body.resolve_refs(def_names, main, &mut scope, info);
info.take_rule_err(res, def.name.clone());
}
nxt.resolve_refs(def_names, main, scope, info)?;
}
_ => {
for (child, binds) in self.children_mut_with_binds() {
for bind in binds.clone() {
push_scope(bind.as_ref(), scope);
}
child.resolve_refs(def_names, main, scope, info)?;
for bind in binds.rev() {
pop_scope(bind.as_ref(), scope);
}
}
}
}
Ok(())
})
}
}
fn push_scope<'a>(name: Option<&'a Name>, scope: &mut HashMap<&'a Name, usize>) {
if let Some(name) = name {
let var_scope = scope.entry(name).or_default();
*var_scope += 1;
}
}
fn pop_scope<'a>(name: Option<&'a Name>, scope: &mut HashMap<&'a Name, usize>) {
if let Some(name) = name {
let var_scope = scope.entry(name).or_default();
*var_scope -= 1;
}
}
fn is_var_in_scope<'a>(name: &'a Name, scope: &HashMap<&'a Name, usize>) -> bool {
match scope.get(name) {
Some(entry) => *entry == 0,
None => true,
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/resugar_list.rs | src/fun/transform/resugar_list.rs | use crate::{
fun::{builtins, Pattern, Tag, Term},
maybe_grow, AdtEncoding,
};
impl Term {
/// Converts lambda-encoded lists ending with List/Nil to list literals.
pub fn resugar_lists(&mut self, adt_encoding: AdtEncoding) {
match adt_encoding {
AdtEncoding::Scott => self.resugar_lists_scott(),
AdtEncoding::NumScott => self.resugar_lists_num_scott(),
}
}
/// Converts num-scott-encoded lists ending with List/Nil to list literals.
fn resugar_lists_num_scott(&mut self) {
maybe_grow(|| {
// Search for a List/Cons pattern in the term and try to build a list from that point on.
// If successful, replace the term with the list.
// If not, keep as-is.
// Nil: List/nil
if let Term::Ref { nam } = self {
if nam == builtins::LNIL {
*self = Term::List { els: vec![] };
}
}
// Cons: @x (x CONS_TAG <term> <term>)
if let Term::Lam { tag: Tag::Static, pat, bod } = self {
if let Pattern::Var(Some(var_lam)) = pat.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: tail } = bod.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg } = fun.as_mut() {
if let Term::Var { nam: var_app } = fun.as_mut() {
if let Term::Ref { nam } = arg.as_mut() {
if var_lam == var_app && nam == builtins::LCONS_TAG_REF {
let l = build_list_num_scott(tail.as_mut(), vec![std::mem::take(head)]);
match l {
Ok(l) => *self = Term::List { els: l.into_iter().map(|x| *x).collect() },
// Was not a list term, keep as-is.
Err(mut l) => {
*head = l.pop().unwrap();
assert!(l.is_empty())
}
}
}
}
}
}
}
}
}
}
// Cons: (List/Cons <term> <term>)
if let Term::App { tag: Tag::Static, fun, arg: tail } = self {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::Ref { nam } = fun.as_mut() {
if nam == builtins::LCONS {
let l = build_list_num_scott(tail.as_mut(), vec![std::mem::take(head)]);
match l {
Ok(l) => *self = Term::List { els: l.into_iter().map(|x| *x).collect() },
// Was not a list term, keep as-is.
Err(mut l) => {
*head = l.pop().unwrap();
assert!(l.is_empty())
}
}
}
}
}
}
for child in self.children_mut() {
child.resugar_lists_num_scott();
}
})
}
/// Converts scott-encoded lists ending with List/Nil to list literals.
fn resugar_lists_scott(&mut self) {
maybe_grow(|| {
// Search for a List/Cons pattern in the term and try to build a list from that point on.
// If successful, replace the term with the list.
// If not, keep as-is.
// Nil: List/nil
if let Term::Ref { nam } = self {
if nam == builtins::LNIL {
*self = Term::List { els: vec![] };
}
}
// Cons: @* @c (c <term> <term>)
if let Term::Lam { tag: Tag::Static, pat, bod } = self {
if let Pattern::Var(None) = pat.as_mut() {
if let Term::Lam { tag: Tag::Static, pat, bod } = bod.as_mut() {
if let Pattern::Var(Some(var_lam)) = pat.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: tail } = bod.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::Var { nam: var_app } = fun.as_mut() {
if var_lam == var_app {
let l = build_list_scott(tail.as_mut(), vec![std::mem::take(head)]);
match l {
Ok(l) => *self = Term::List { els: l.into_iter().map(|x| *x).collect() },
// Was not a list term, keep as-is.
Err(mut l) => {
*head = l.pop().unwrap();
assert!(l.is_empty())
}
}
}
}
}
}
}
}
}
}
// Cons: (List/Cons <term> <term>)
if let Term::App { tag: Tag::Static, fun, arg: tail } = self {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::Ref { nam } = fun.as_mut() {
if nam == builtins::LCONS {
let l = build_list_scott(tail.as_mut(), vec![std::mem::take(head)]);
match l {
Ok(l) => *self = Term::List { els: l.into_iter().map(|x| *x).collect() },
// Was not a list term, keep as-is.
Err(mut l) => {
*head = l.pop().unwrap();
assert!(l.is_empty())
}
}
}
}
}
}
for child in self.children_mut() {
child.resugar_lists_scott();
}
})
}
}
// TODO: We have to do weird manipulations with Box<Term> because of the borrow checker.
// When we used use box patterns this was a way simpler match statement.
#[allow(clippy::vec_box)]
fn build_list_num_scott(term: &mut Term, mut l: Vec<Box<Term>>) -> Result<Vec<Box<Term>>, Vec<Box<Term>>> {
maybe_grow(|| {
// Nil: List/nil
if let Term::Ref { nam } = term {
if nam == builtins::LNIL {
return Ok(l);
}
}
// Cons: @x (x CONS_TAG <term> <term>)
if let Term::Lam { tag: Tag::Static, pat, bod } = term {
if let Pattern::Var(Some(var_lam)) = pat.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: tail } = bod.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg } = fun.as_mut() {
if let Term::Var { nam: var_app } = fun.as_mut() {
if let Term::Ref { nam } = arg.as_mut() {
if var_lam == var_app && nam == builtins::LCONS_TAG_REF {
// New list element, append and recurse
l.push(std::mem::take(head));
let l = build_list_num_scott(tail, l);
match l {
Ok(l) => return Ok(l),
Err(mut l) => {
// If it wasn't a list, we have to put it back.
*head = l.pop().unwrap();
return Err(l);
}
}
}
}
}
}
}
}
}
}
// Cons: (List/Cons <term> <term>)
if let Term::App { tag: Tag::Static, fun, arg: tail } = term {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::Ref { nam } = fun.as_mut() {
if nam == builtins::LCONS {
// New list element, append and recurse
l.push(std::mem::take(head));
let l = build_list_num_scott(tail, l);
match l {
Ok(l) => return Ok(l),
Err(mut l) => {
// If it wasn't a list, we have to put it back.
*head = l.pop().unwrap();
return Err(l);
}
}
}
}
}
}
// Not a list term, stop
Err(l)
})
}
#[allow(clippy::vec_box)]
fn build_list_scott(term: &mut Term, mut l: Vec<Box<Term>>) -> Result<Vec<Box<Term>>, Vec<Box<Term>>> {
maybe_grow(|| {
// Nil: List/nil
if let Term::Ref { nam } = term {
if nam == builtins::LNIL {
return Ok(l);
}
}
// Cons: @* @c (c <term> <term>)
if let Term::Lam { tag: Tag::Static, pat, bod } = term {
if let Pattern::Var(None) = pat.as_mut() {
if let Term::Lam { tag: Tag::Static, pat, bod } = bod.as_mut() {
if let Pattern::Var(Some(var_lam)) = pat.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: tail } = bod.as_mut() {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::Var { nam: var_app } = fun.as_mut() {
if var_lam == var_app {
// New list element, append and recurse
l.push(std::mem::take(head));
let l = build_list_scott(tail, l);
match l {
Ok(l) => return Ok(l),
Err(mut l) => {
// If it wasn't a list, we have to put it back.
*head = l.pop().unwrap();
return Err(l);
}
}
}
}
}
}
}
}
}
}
// Cons: (List/Cons <term> <term>)
if let Term::App { tag: Tag::Static, fun, arg: tail } = term {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_mut() {
if let Term::Ref { nam } = fun.as_mut() {
if nam == builtins::LCONS {
// New list element, append and recurse
l.push(std::mem::take(head));
let l = build_list_scott(tail, l);
match l {
Ok(l) => return Ok(l),
Err(mut l) => {
// If it wasn't a list, we have to put it back.
*head = l.pop().unwrap();
return Err(l);
}
}
}
}
}
}
// Not a list term, stop
Err(l)
})
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/linearize_vars.rs | src/fun/transform/linearize_vars.rs | use crate::{
fun::{Book, FanKind, Name, Pattern, Tag, Term},
maybe_grow, multi_iterator,
};
use std::collections::HashMap;
/// Erases variables that weren't used, dups the ones that were used more than once.
/// Substitutes lets into their variable use.
/// In details:
/// For all var declarations:
/// If they're used 0 times: erase the declaration
/// If they're used 1 time: leave them as-is
/// If they're used more times: insert dups to make var use affine
/// For all let vars:
/// If they're used 0 times: Discard the let
/// If they're used 1 time: substitute the body in the var use
/// If they're use more times: add dups for all the uses, put the let body at the root dup.
/// Precondition: All variables are bound and have unique names within each definition.
impl Book {
pub fn linearize_vars(&mut self) {
for def in self.defs.values_mut() {
def.rule_mut().body.linearize_vars();
}
}
}
impl Term {
pub fn linearize_vars(&mut self) {
term_to_linear(self, &mut HashMap::new());
}
}
fn term_to_linear(term: &mut Term, var_uses: &mut HashMap<Name, u64>) {
maybe_grow(|| {
if let Term::Let { pat, val, nxt } = term {
if let Pattern::Var(Some(nam)) = pat.as_ref() {
// TODO: This is swapping the order of how the bindings are
// used, since it's not following the usual AST order (first
// val, then nxt). Doesn't change behaviour, but looks strange.
term_to_linear(nxt, var_uses);
let uses = get_var_uses(Some(nam), var_uses);
term_to_linear(val, var_uses);
match uses {
0 => {
let Term::Let { pat, .. } = term else { unreachable!() };
**pat = Pattern::Var(None);
}
1 => {
nxt.subst(nam, val.as_ref());
*term = std::mem::take(nxt.as_mut());
}
_ => {
let new_pat = duplicate_pat(nam, uses);
let Term::Let { pat, .. } = term else { unreachable!() };
*pat = new_pat;
}
}
return;
}
}
if let Term::Var { nam } = term {
let instantiated_count = var_uses.entry(nam.clone()).or_default();
*instantiated_count += 1;
*nam = dup_name(nam, *instantiated_count);
return;
}
for (child, binds) in term.children_mut_with_binds_mut() {
term_to_linear(child, var_uses);
for bind in binds {
let uses = get_var_uses(bind.as_ref(), var_uses);
match uses {
// Erase binding
0 => *bind = None,
// Keep as-is
1 => (),
// Duplicate binding
uses => {
debug_assert!(uses > 1);
let nam = bind.as_ref().unwrap();
*child = Term::Let {
pat: duplicate_pat(nam, uses),
val: Box::new(Term::Var { nam: nam.clone() }),
nxt: Box::new(std::mem::take(child)),
}
}
}
}
}
})
}
fn get_var_uses(nam: Option<&Name>, var_uses: &HashMap<Name, u64>) -> u64 {
nam.and_then(|nam| var_uses.get(nam).copied()).unwrap_or_default()
}
fn duplicate_pat(nam: &Name, uses: u64) -> Box<Pattern> {
Box::new(Pattern::Fan(
FanKind::Dup,
Tag::Auto,
(1..uses + 1).map(|i| Pattern::Var(Some(dup_name(nam, i)))).collect(),
))
}
fn dup_name(nam: &Name, uses: u64) -> Name {
if uses == 1 {
nam.clone()
} else {
Name::new(format!("{nam}_{uses}"))
}
}
impl Term {
/// Because multiple children can share the same binds, this function is very restricted.
/// Should only be called after desugaring bends/folds/matches/switches.
pub fn children_mut_with_binds_mut(
&mut self,
) -> impl DoubleEndedIterator<Item = (&mut Term, impl DoubleEndedIterator<Item = &mut Option<Name>>)> {
multi_iterator!(ChildrenIter { Zero, One, Two, Vec, Swt });
multi_iterator!(BindsIter { Zero, One, Pat });
match self {
Term::Swt { arg, bnd, with_bnd, with_arg, pred, arms } => {
debug_assert!(bnd.is_none());
debug_assert!(with_bnd.is_empty());
debug_assert!(with_arg.is_empty());
debug_assert!(pred.is_none());
ChildrenIter::Swt(
[(arg.as_mut(), BindsIter::Zero([]))]
.into_iter()
.chain(arms.iter_mut().map(|x| (x, BindsIter::Zero([])))),
)
}
Term::Fan { els, .. } | Term::List { els } => {
ChildrenIter::Vec(els.iter_mut().map(|el| (el, BindsIter::Zero([]))))
}
Term::Use { nam, val, nxt } => {
ChildrenIter::Two([(val.as_mut(), BindsIter::Zero([])), (nxt.as_mut(), BindsIter::One([nam]))])
}
Term::Let { pat, val, nxt, .. } | Term::Ask { pat, val, nxt, .. } => ChildrenIter::Two([
(val.as_mut(), BindsIter::Zero([])),
(nxt.as_mut(), BindsIter::Pat(pat.binds_mut())),
]),
Term::App { fun: fst, arg: snd, .. } | Term::Oper { fst, snd, .. } => {
ChildrenIter::Two([(fst.as_mut(), BindsIter::Zero([])), (snd.as_mut(), BindsIter::Zero([]))])
}
Term::Lam { pat, bod, .. } => ChildrenIter::One([(bod.as_mut(), BindsIter::Pat(pat.binds_mut()))]),
Term::With { bod, .. } => ChildrenIter::One([(bod.as_mut(), BindsIter::Zero([]))]),
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Era
| Term::Err => ChildrenIter::Zero([]),
Term::Mat { .. } => unreachable!("'match' should be removed in earlier pass"),
Term::Fold { .. } => unreachable!("'fold' should be removed in earlier pass"),
Term::Bend { .. } => unreachable!("'bend' should be removed in earlier pass"),
Term::Open { .. } => unreachable!("'open' should be removed in earlier pass"),
Term::Def { .. } => unreachable!("'def' should be removed in earlier pass"),
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/expand_generated.rs | src/fun/transform/expand_generated.rs | use std::collections::{BTreeSet, HashMap, HashSet};
use crate::{
fun::{Book, Name, Term},
maybe_grow,
};
/// Dereferences any non recursive generated definitions in the term.
/// Used after readback.
impl Term {
pub fn expand_generated(&mut self, book: &Book, recursive_defs: &RecursiveDefs) {
maybe_grow(|| {
if let Term::Ref { nam } = &*self {
if nam.is_generated() && !recursive_defs.contains(nam) {
*self = book.defs.get(nam).unwrap().rule().body.clone();
}
}
for child in self.children_mut() {
child.expand_generated(book, recursive_defs);
}
})
}
}
type DepGraph = HashMap<Name, HashSet<Name>>;
type Cycles = Vec<Vec<Name>>;
type RecursiveDefs = BTreeSet<Name>;
impl Book {
pub fn recursive_defs(&self) -> RecursiveDefs {
let mut cycle_map = BTreeSet::new();
let deps = book_def_deps(self);
let cycles = cycles(&deps);
for cycle in cycles {
for name in cycle {
cycle_map.insert(name);
}
}
cycle_map
}
}
/// Find all cycles in the dependency graph.
fn cycles(deps: &DepGraph) -> Cycles {
let mut cycles = vec![];
let mut visited = HashSet::new();
// let mut stack = vec![];
for nam in deps.keys() {
if !visited.contains(nam) {
find_cycles(deps, nam, &mut visited, &mut cycles);
}
}
cycles
}
fn find_cycles(deps: &DepGraph, nam: &Name, visited: &mut HashSet<Name>, cycles: &mut Cycles) {
let mut stack = vec![(nam.clone(), vec![])];
while let Some((current, path)) = stack.pop() {
if visited.contains(¤t) {
// Check if the current ref is already in the stack, which indicates a cycle.
if let Some(cycle_start) = path.iter().position(|n| n == ¤t) {
// If found, add the cycle to the cycles vector.
cycles.push(path[cycle_start..].to_vec());
}
continue;
}
// If the ref has not been visited yet, mark it as visited.
visited.insert(current.clone());
// Add the current ref to the stack to keep track of the path.
let mut new_path = path.clone();
new_path.push(current.clone());
// Search for cycles from each dependency.
if let Some(deps) = deps.get(¤t) {
for dep in deps {
stack.push((dep.clone(), new_path.clone()));
}
}
}
}
fn book_def_deps(book: &Book) -> DepGraph {
book.defs.iter().map(|(nam, def)| (nam.clone(), def_deps(def))).collect()
}
fn def_deps(def: &crate::fun::Definition) -> HashSet<Name> {
fn collect_refs(term: &Term, set: &mut HashSet<Name>) {
if let Term::Ref { nam } = term {
set.insert(nam.clone());
}
for children in term.children() {
collect_refs(children, set);
}
}
let mut set = HashSet::new();
let term = &def.rule().body;
collect_refs(term, &mut set);
set
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/resugar_string.rs | src/fun/transform/resugar_string.rs | use crate::{
fun::{builtins, Name, Num, Pattern, Tag, Term},
maybe_grow, AdtEncoding,
};
impl Term {
/// Converts lambda-encoded strings ending with String/nil to string literals.
pub fn resugar_strings(&mut self, adt_encoding: AdtEncoding) {
match adt_encoding {
AdtEncoding::Scott => self.try_resugar_strings_with(Self::resugar_strings_scott),
AdtEncoding::NumScott => self.try_resugar_strings_with(Self::resugar_strings_num_scott),
}
}
/// Converts encoded strings to string literals using the provided extraction function.
fn try_resugar_strings_with(&mut self, extract_fn: fn(&Term) -> Option<(char, &Term)>) {
maybe_grow(|| {
// Try to resugar nil or cons patterns. If unsuccessful, recurse into child terms.
if !self.try_resugar_strings_nil() && !self.try_resugar_strings_cons(extract_fn) {
for child in self.children_mut() {
child.try_resugar_strings_with(extract_fn);
}
}
})
}
/// Attempts to resugar a nil term (String/nil) to an empty string literal.
fn try_resugar_strings_nil(&mut self) -> bool {
matches!(self, Term::Ref { nam } if nam == builtins::SNIL).then(|| *self = Term::str("")).is_some()
}
/// Attempts to resugar a cons term to a string literal.
fn try_resugar_strings_cons(&mut self, extract_fn: fn(&Term) -> Option<(char, &Term)>) -> bool {
self
.try_resugar_strings_cons_with(extract_fn)
.or_else(|| self.try_resugar_strings_cons_common())
.map(|str| *self = Term::str(&str))
.is_some()
}
/// Attempts to resugar a cons term using the provided extraction function.
fn try_resugar_strings_cons_with(&self, extract_fn: fn(&Term) -> Option<(char, &Term)>) -> Option<String> {
extract_fn(self)
.and_then(|(head_char, tail)| Self::build_strings_common(tail, head_char.to_string(), extract_fn))
}
/// Attempts to resugar a cons term using the common extraction method.
fn try_resugar_strings_cons_common(&self) -> Option<String> {
if let Term::App { tag: Tag::Static, fun, arg: tail } = self {
if let Term::App { tag: Tag::Static, fun: inner_fun, arg: head } = fun.as_ref() {
if let (Term::Ref { nam }, Term::Num { val: Num::U24(head_val) }) =
(inner_fun.as_ref(), head.as_ref())
{
if nam == builtins::SCONS {
let head_char = char::from_u32(*head_val).unwrap_or(char::REPLACEMENT_CHARACTER);
return Self::build_strings_common(tail, head_char.to_string(), Self::extract_strings_common);
}
}
}
}
None
}
/// Builds a string from a term structure using the provided extraction function.
fn build_strings_common(
term: &Term,
mut s: String,
extract_fn: fn(&Term) -> Option<(char, &Term)>,
) -> Option<String> {
maybe_grow(|| {
let mut current = term;
loop {
match current {
// If we reach a nil term, we've completed the string
Term::Ref { nam } if nam == builtins::SNIL => return Some(s),
_ => {
// Extract the next character and continue building the string
let (head, next) = extract_fn(current).or_else(|| Self::extract_strings_common(current))?;
s.push(head);
current = next;
}
}
}
})
}
/// Extracts a character and the remaining term from a Scott-encoded string term.
/// The structure of this function mimics the shape of the AST for easier visualization.
fn resugar_strings_scott(term: &Term) -> Option<(char, &Term)> {
if let Term::Lam { tag: Tag::Static, pat: outer_pat, bod } = term {
if let Pattern::Var(None) = outer_pat.as_ref() {
if let Term::Lam { tag: Tag::Static, pat: inner_pat, bod: inner_bod } = bod.as_ref() {
if let Pattern::Var(Some(var_lam)) = inner_pat.as_ref() {
if let Term::App { tag: Tag::Static, fun, arg: tail } = inner_bod.as_ref() {
if let Term::App { tag: Tag::Static, fun: inner_fun, arg: head } = fun.as_ref() {
if let (Term::Var { nam: var_app }, Term::Num { val: Num::U24(head_val) }) =
(inner_fun.as_ref(), head.as_ref())
{
if var_lam == var_app {
let head_char = char::from_u32(*head_val).unwrap_or(char::REPLACEMENT_CHARACTER);
return Some((head_char, tail));
}
}
}
}
}
}
}
}
None
}
/// Extracts a character and the remaining term from a NumScott-encoded string term.
/// The structure of this function mimics the shape of the AST for easier visualization.
fn resugar_strings_num_scott(term: &Term) -> Option<(char, &Term)> {
if let Term::Lam { tag: Tag::Static, pat, bod } = term {
if let Pattern::Var(Some(var_lam)) = pat.as_ref() {
if let Term::App { tag: Tag::Static, fun, arg: tail } = bod.as_ref() {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_ref() {
if let Term::App { tag: Tag::Static, fun, arg } = fun.as_ref() {
if let (
Term::Var { nam: var_app },
Term::Ref { nam: Name(ref_nam) },
Term::Num { val: Num::U24(head_val) },
) = (fun.as_ref(), arg.as_ref(), head.as_ref())
{
if var_lam == var_app && ref_nam == builtins::SCONS_TAG_REF {
let head_char = char::from_u32(*head_val).unwrap_or(char::REPLACEMENT_CHARACTER);
return Some((head_char, tail));
}
}
}
}
}
}
}
None
}
/// Extracts a character and the remaining term from a common-encoded string term.
/// The structure of this function mimics the shape of the AST for easier visualization.
fn extract_strings_common(term: &Term) -> Option<(char, &Term)> {
if let Term::App { tag: Tag::Static, fun, arg: tail } = term {
if let Term::App { tag: Tag::Static, fun, arg: head } = fun.as_ref() {
if let (Term::Ref { nam }, Term::Num { val: Num::U24(head_val) }) = (fun.as_ref(), head.as_ref()) {
if nam == builtins::SCONS {
let head_char = char::from_u32(*head_val).unwrap_or(char::REPLACEMENT_CHARACTER);
return Some((head_char, tail));
}
}
}
}
None
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/definition_pruning.rs | src/fun/transform/definition_pruning.rs | use crate::{
diagnostics::WarningType,
fun::{Book, Ctx, Name, SourceKind, Term},
maybe_grow,
};
use hvm::ast::{Net, Tree};
use std::collections::{hash_map::Entry, HashMap};
#[derive(Clone, Copy, Debug, PartialEq)]
enum Used {
/// Definition is accessible from the main entry point, should never be pruned.
Main,
/// Definition is not accessible from main, but is accessible from non-builtin definitions.
NonBuiltin,
/// Definition is not accessible from main, but is a user-defined constructor.
Ctr,
}
type Definitions = HashMap<Name, Used>;
impl Ctx<'_> {
/// If `prune_all`, removes all unused definitions and adts starting from Main.
/// Otherwise, prunes only the builtins not accessible from any non-built-in definition.
///
/// Emits unused definition warnings.
pub fn prune(&mut self, prune_all: bool) {
let mut used = Definitions::new();
// Get the functions that are accessible from the main entry point.
if let Some(main) = &self.book.entrypoint {
let def = self.book.defs.get(main).unwrap();
used.insert(main.clone(), Used::Main);
for rule in def.rules.iter() {
self.book.find_used_definitions_from_term(&rule.body, Used::Main, &mut used);
}
}
// Get the functions that are accessible from non-builtins.
for def in self.book.defs.values() {
if !def.is_builtin() && !(used.get(&def.name) == Some(&Used::Main)) {
if self.book.ctrs.contains_key(&def.name) {
used.insert(def.name.clone(), Used::Ctr);
} else {
used.insert(def.name.clone(), Used::NonBuiltin);
}
for rule in def.rules.iter() {
self.book.find_used_definitions_from_term(&rule.body, Used::NonBuiltin, &mut used);
}
}
}
for def in self.book.hvm_defs.values() {
if !def.source.is_builtin() && !(used.get(&def.name) == Some(&Used::Main)) {
used.insert(def.name.clone(), Used::NonBuiltin);
self.book.find_used_definitions_from_hvm_net(&def.body, Used::NonBuiltin, &mut used);
}
}
fn rm_def(book: &mut Book, def_name: &Name) {
if book.defs.contains_key(def_name) {
book.defs.shift_remove(def_name);
} else if book.hvm_defs.contains_key(def_name) {
book.hvm_defs.shift_remove(def_name);
} else {
unreachable!()
}
}
// Remove unused definitions.
let defs = self.book.defs.iter().map(|(nam, def)| (nam.clone(), def.source.clone()));
let hvm_defs = self.book.hvm_defs.iter().map(|(nam, def)| (nam.clone(), def.source.clone()));
let names = defs.chain(hvm_defs).collect::<Vec<_>>();
for (def, src) in names {
if let Some(use_) = used.get(&def) {
match use_ {
Used::Main => {
// Used by the main entry point, never pruned;
}
Used::NonBuiltin => {
// Used by a non-builtin definition.
// Prune if `prune_all`, otherwise show a warning.
if prune_all {
rm_def(self.book, &def);
} else if !def.is_generated() && !matches!(src.kind, SourceKind::Generated) {
self.info.add_function_warning(
"Definition is unused.",
WarningType::UnusedDefinition,
def,
src,
);
}
}
Used::Ctr => {
// Unused, but a user-defined constructor.
// Prune if `prune_all`, otherwise nothing.
if prune_all {
rm_def(self.book, &def);
} else {
// Don't show warning if it's a user-defined constructor.
}
}
}
} else {
// Unused builtin, can always be pruned.
rm_def(self.book, &def);
}
}
}
}
impl Book {
/// Finds all used definitions on the book, starting from the given term.
fn find_used_definitions_from_term(&self, term: &Term, used: Used, uses: &mut Definitions) {
maybe_grow(|| {
let mut to_find = vec![term];
while let Some(term) = to_find.pop() {
match term {
Term::Ref { nam: def_name } => self.insert_used(def_name, used, uses),
Term::List { .. } => {
self.insert_used(&Name::new(crate::fun::builtins::LCONS), used, uses);
self.insert_used(&Name::new(crate::fun::builtins::LNIL), used, uses);
}
Term::Str { .. } => {
self.insert_used(&Name::new(crate::fun::builtins::SCONS), used, uses);
self.insert_used(&Name::new(crate::fun::builtins::SNIL), used, uses);
}
_ => {}
}
for child in term.children() {
to_find.push(child);
}
}
})
}
fn find_used_definitions_from_hvm_net(&self, net: &Net, used: Used, uses: &mut Definitions) {
maybe_grow(|| {
let mut to_find = [&net.root]
.into_iter()
.chain(net.rbag.iter().flat_map(|(_, lft, rgt)| [lft, rgt]))
.collect::<Vec<_>>();
while let Some(term) = to_find.pop() {
match term {
Tree::Ref { nam } => self.insert_used(&Name::new(nam), used, uses),
Tree::Con { fst, snd }
| Tree::Dup { fst, snd }
| Tree::Opr { fst, snd }
| Tree::Swi { fst, snd } => {
to_find.push(fst);
to_find.push(snd);
}
Tree::Era | Tree::Var { .. } | Tree::Num { .. } => {}
}
}
})
}
fn insert_used(&self, def_name: &Name, used: Used, uses: &mut Definitions) {
if let Entry::Vacant(e) = uses.entry(def_name.clone()) {
e.insert(used);
// This needs to be done for each rule in case the pass it's
// ran from has not encoded the pattern match.
// E.g.: the `flatten_rules` golden test
if let Some(def) = self.defs.get(def_name) {
for rule in &def.rules {
self.find_used_definitions_from_term(&rule.body, used, uses);
}
} else if let Some(def) = self.hvm_defs.get(def_name) {
self.find_used_definitions_from_hvm_net(&def.body, used, uses);
} else {
unreachable!()
}
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/expand_main.rs | src/fun/transform/expand_main.rs | use crate::{
fun::{Book, Name, Pattern, Term},
maybe_grow,
};
use std::collections::HashMap;
impl Book {
/// Expands the main function so that it is not just a reference.
/// While technically correct, directly returning a reference is never what users want.
pub fn expand_main(&mut self) {
if self.entrypoint.is_none() {
return;
}
let main = self.defs.get_mut(self.entrypoint.as_ref().unwrap()).unwrap();
let mut main_bod = std::mem::take(&mut main.rule_mut().body);
let mut seen = vec![self.entrypoint.as_ref().unwrap().clone()];
main_bod.expand_ref_return(self, &mut seen, &mut 0);
// Undo the `float_combinators` pass for main, to recover the strictness of the main function.
main_bod.expand_floated_combinators(self);
let main = self.defs.get_mut(self.entrypoint.as_ref().unwrap()).unwrap();
main.rule_mut().body = main_bod;
}
}
impl Term {
/// Expands references in the main function that are in "return" position.
///
/// This applies to:
/// - When main returns a reference.
/// - When main returns a lambda whose body is a reference.
/// - When main returns a pair or superposition and one of its elements is a reference.
///
/// Only expand recursive functions once.
fn expand_ref_return(&mut self, book: &Book, seen: &mut Vec<Name>, globals_count: &mut usize) {
maybe_grow(|| match self {
Term::Ref { nam } => {
if seen.contains(nam) {
// Don't expand recursive references
} else if let Some(def) = book.defs.get(nam) {
// Regular function, expand
seen.push(nam.clone());
let mut body = def.rule().body.clone();
body.rename_unscoped(globals_count, &mut HashMap::new());
*self = body;
self.expand_ref_return(book, seen, globals_count);
seen.pop().unwrap();
} else {
// Not a regular function, don't expand
}
}
Term::Fan { els, .. } | Term::List { els } => {
for el in els {
el.expand_ref_return(book, seen, globals_count);
}
}
// If an application is just a constructor, we expand the arguments.
// That way we can write programs like
// `main = [do_thing1, do_thing2, do_thing3]`
Term::App { .. } => {
let (fun, args) = self.multi_arg_app();
if let Term::Ref { nam } = fun {
if book.ctrs.contains_key(nam) {
for arg in args {
// If the argument is a 0-ary constructor, we don't need to expand it.
if let Term::Ref { nam } = arg {
if let Some(adt_nam) = book.ctrs.get(nam) {
if book.adts.get(adt_nam).unwrap().ctrs.get(nam).unwrap().fields.is_empty() {
continue;
}
}
}
// Otherwise, we expand the argument.
arg.expand_ref_return(book, seen, globals_count);
}
}
}
}
Term::Lam { bod: nxt, .. }
| Term::With { bod: nxt, .. }
| Term::Open { bod: nxt, .. }
| Term::Let { nxt, .. }
| Term::Ask { nxt, .. }
| Term::Use { nxt, .. } => nxt.expand_ref_return(book, seen, globals_count),
Term::Var { .. }
| Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Oper { .. }
| Term::Mat { .. }
| Term::Swt { .. }
| Term::Fold { .. }
| Term::Bend { .. }
| Term::Def { .. }
| Term::Era
| Term::Err => {}
})
}
fn expand_floated_combinators(&mut self, book: &Book) {
maybe_grow(|| {
if let Term::Ref { nam } = self {
if nam.contains(super::float_combinators::NAME_SEP) {
*self = book.defs.get(nam).unwrap().rule().body.clone();
}
}
for child in self.children_mut() {
child.expand_floated_combinators(book);
}
})
}
/// Read the term as an n-ary application.
fn multi_arg_app(&mut self) -> (&mut Term, Vec<&mut Term>) {
fn go<'a>(term: &'a mut Term, args: &mut Vec<&'a mut Term>) -> &'a mut Term {
match term {
Term::App { fun, arg, .. } => {
args.push(arg);
go(fun, args)
}
_ => term,
}
}
let mut args = vec![];
let fun = go(self, &mut args);
(fun, args)
}
}
impl Term {
/// Since expanded functions can contain unscoped variables, and
/// unscoped variable names must be unique, we need to rename them
/// to avoid conflicts.
fn rename_unscoped(&mut self, unscoped_count: &mut usize, unscoped_map: &mut HashMap<Name, Name>) {
match self {
Term::Let { pat, .. } | Term::Lam { pat, .. } => pat.rename_unscoped(unscoped_count, unscoped_map),
Term::Link { nam } => rename_unscoped(nam, unscoped_count, unscoped_map),
_ => {
// Isn't an unscoped bind or use, do nothing, just recurse.
}
}
for child in self.children_mut() {
child.rename_unscoped(unscoped_count, unscoped_map);
}
}
}
impl Pattern {
fn rename_unscoped(&mut self, unscoped_count: &mut usize, unscoped_map: &mut HashMap<Name, Name>) {
maybe_grow(|| {
match self {
Pattern::Chn(nam) => rename_unscoped(nam, unscoped_count, unscoped_map),
_ => {
// Pattern isn't an unscoped bind, just recurse.
}
}
for child in self.children_mut() {
child.rename_unscoped(unscoped_count, unscoped_map);
}
})
}
}
/// Generates a new name for an unscoped variable.
fn rename_unscoped(nam: &mut Name, unscoped_count: &mut usize, unscoped_map: &mut HashMap<Name, Name>) {
if let Some(new_nam) = unscoped_map.get(nam) {
*nam = new_nam.clone();
} else {
let new_nam = Name::new(format!("{nam}%{}", unscoped_count));
unscoped_map.insert(nam.clone(), new_nam.clone());
*unscoped_count += 1;
*nam = new_nam;
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/encode_adts.rs | src/fun/transform/encode_adts.rs | use crate::{
fun::{Book, Definition, Name, Num, Pattern, Rule, Source, Term},
AdtEncoding,
};
impl Book {
/// Defines a function for each constructor in each ADT in the book.
pub fn encode_adts(&mut self, adt_encoding: AdtEncoding) {
let mut defs = vec![];
for (_, adt) in self.adts.iter() {
for (ctr_idx, (ctr_name, ctr)) in adt.ctrs.iter().enumerate() {
let ctrs: Vec<_> = adt.ctrs.keys().cloned().collect();
let body = match adt_encoding {
AdtEncoding::Scott => encode_ctr_scott(ctr.fields.iter().map(|f| &f.nam), ctrs, ctr_name),
AdtEncoding::NumScott => {
let tag = encode_num_scott_tag(ctr_idx as u32, ctr_name, adt.source.clone());
defs.push((tag.name.clone(), tag.clone()));
encode_ctr_num_scott(ctr.fields.iter().map(|f| &f.nam), &tag.name)
}
};
let rules = vec![Rule { pats: vec![], body }];
let def = Definition {
name: ctr_name.clone(),
typ: ctr.typ.clone(),
check: true,
rules,
source: adt.source.clone(),
};
defs.push((ctr_name.clone(), def));
}
}
self.defs.extend(defs);
}
}
fn encode_ctr_scott<'a>(
ctr_args: impl DoubleEndedIterator<Item = &'a Name> + Clone,
ctrs: Vec<Name>,
ctr_name: &Name,
) -> Term {
let ctr = Term::Var { nam: ctr_name.clone() };
let app = Term::call(ctr, ctr_args.clone().cloned().map(|nam| Term::Var { nam }));
let lam = Term::rfold_lams(app, ctrs.into_iter().map(Some));
Term::rfold_lams(lam, ctr_args.cloned().map(Some))
}
fn encode_ctr_num_scott<'a>(ctr_args: impl DoubleEndedIterator<Item = &'a Name> + Clone, tag: &str) -> Term {
let nam = Name::new("%x");
// λa1 .. λan λx (x TAG a1 .. an)
let term = Term::Var { nam: nam.clone() };
let tag = Term::r#ref(tag);
let term = Term::app(term, tag);
let term = Term::call(term, ctr_args.clone().cloned().map(|nam| Term::Var { nam }));
let term = Term::lam(Pattern::Var(Some(nam)), term);
Term::rfold_lams(term, ctr_args.cloned().map(Some))
}
fn encode_num_scott_tag(tag: u32, ctr_name: &Name, source: Source) -> Definition {
let tag_nam = Name::new(format!("{ctr_name}/tag"));
let rules = vec![Rule { pats: vec![], body: Term::Num { val: Num::U24(tag) } }];
Definition::new_gen(tag_nam.clone(), rules, source, true)
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/mod.rs | src/fun/transform/mod.rs | pub mod apply_args;
pub mod definition_merge;
pub mod definition_pruning;
pub mod desugar_bend;
pub mod desugar_fold;
pub mod desugar_match_defs;
pub mod desugar_open;
pub mod desugar_use;
pub mod desugar_with_blocks;
pub mod encode_adts;
pub mod encode_match_terms;
pub mod expand_generated;
pub mod expand_main;
pub mod fix_match_defs;
pub mod fix_match_terms;
pub mod float_combinators;
pub mod lift_local_defs;
pub mod linearize_matches;
pub mod linearize_vars;
pub mod resolve_refs;
pub mod resolve_type_ctrs;
pub mod resugar_list;
pub mod resugar_string;
pub mod unique_names;
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/fix_match_defs.rs | src/fun/transform/fix_match_defs.rs | use crate::{
diagnostics::Diagnostics,
fun::{Adts, Constructors, Ctx, Pattern, Rule, Term},
};
impl Ctx<'_> {
/// Makes every pattern matching definition have correct a left-hand side.
///
/// Does not check exhaustiveness of rules and type mismatches. (Inter-ctr/type proprieties)
pub fn fix_match_defs(&mut self) -> Result<(), Diagnostics> {
for def in self.book.defs.values_mut() {
let mut errs = vec![];
let def_arity = def.arity();
for rule in &mut def.rules {
rule.fix_match_defs(def_arity, &self.book.ctrs, &self.book.adts, &mut errs);
}
for err in errs {
self.info.add_function_error(err, def.name.clone(), def.source.clone());
}
}
self.info.fatal(())
}
}
impl Rule {
fn fix_match_defs(&mut self, def_arity: usize, ctrs: &Constructors, adts: &Adts, errs: &mut Vec<String>) {
if self.arity() != def_arity {
errs.push(format!(
"Incorrect pattern matching rule arity. Expected {} args, found {}.",
def_arity,
self.arity()
));
}
for pat in &mut self.pats {
pat.resolve_pat(ctrs);
pat.check_good_ctr(ctrs, adts, errs);
}
self.body.fix_match_defs(ctrs, adts, errs);
}
}
impl Term {
fn fix_match_defs(&mut self, ctrs: &Constructors, adts: &Adts, errs: &mut Vec<String>) {
match self {
Term::Def { def, nxt } => {
let def_arity = def.arity();
for rule in &mut def.rules {
rule.fix_match_defs(def_arity, ctrs, adts, errs);
}
nxt.fix_match_defs(ctrs, adts, errs);
}
_ => {
for children in self.children_mut() {
children.fix_match_defs(ctrs, adts, errs);
}
}
}
}
}
impl Pattern {
/// If a var pattern actually refers to an ADT constructor, convert it into a constructor pattern.
fn resolve_pat(&mut self, ctrs: &Constructors) {
if let Pattern::Var(Some(nam)) = self {
if ctrs.contains_key(nam) {
*self = Pattern::Ctr(std::mem::take(nam), vec![]);
}
}
for child in self.children_mut() {
child.resolve_pat(ctrs);
}
}
/// Check that ADT constructor pats are correct, meaning defined in a `data` and with correct arity.
fn check_good_ctr(&self, ctrs: &Constructors, adts: &Adts, errs: &mut Vec<String>) {
if let Pattern::Ctr(nam, args) = self {
if let Some(adt) = ctrs.get(nam) {
let expected_arity = adts[adt].ctrs[nam].fields.len();
let found_arity = args.len();
if expected_arity != found_arity {
errs.push(format!(
"Incorrect arity for constructor '{}' of type '{}' in pattern matching rule. Expected {} fields, found {}",
nam, adt, expected_arity, found_arity
));
}
} else {
errs.push(format!("Unbound constructor '{nam}' in pattern matching rule."));
}
}
for child in self.children() {
child.check_good_ctr(ctrs, adts, errs);
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/resolve_type_ctrs.rs | src/fun/transform/resolve_type_ctrs.rs | use crate::{
diagnostics::Diagnostics,
fun::{Adts, Ctx, Type},
maybe_grow,
};
impl Ctx<'_> {
/// Resolves type constructors in the book.
pub fn resolve_type_ctrs(&mut self) -> Result<(), Diagnostics> {
for def in self.book.defs.values_mut() {
let res = def.typ.resolve_type_ctrs(&self.book.adts);
self.info.take_rule_err(res, def.name.clone());
}
let adts = self.book.adts.clone();
for adt in self.book.adts.values_mut() {
for ctr in adt.ctrs.values_mut() {
let res = ctr.typ.resolve_type_ctrs(&adts);
self.info.take_rule_err(res, ctr.name.clone());
for field in ctr.fields.iter_mut() {
let res = field.typ.resolve_type_ctrs(&adts);
self.info.take_rule_err(res, ctr.name.clone());
}
}
}
self.info.fatal(())
}
}
impl Type {
/// Resolves type constructors in the type.
pub fn resolve_type_ctrs(&mut self, adts: &Adts) -> Result<(), String> {
maybe_grow(|| {
match self {
Type::Var(nam) => {
// If the variable actually refers to a type, we change the type to a constructor.
if adts.contains_key(nam) {
*self = Type::Ctr(nam.clone(), vec![]);
}
}
Type::Ctr(name, args) => {
if !adts.contains_key(name) {
return Err(format!("Found unknown type constructor '{name}'."));
}
for arg in args {
arg.resolve_type_ctrs(adts)?;
}
}
Type::Tup(els) => {
for el in els {
el.resolve_type_ctrs(adts)?;
}
}
Type::Arr(lft, rgt) => {
lft.resolve_type_ctrs(adts)?;
rgt.resolve_type_ctrs(adts)?;
}
Type::Number(t) | Type::Integer(t) => t.resolve_type_ctrs(adts)?,
Type::Any | Type::Hole | Type::None | Type::U24 | Type::I24 | Type::F24 => {}
}
Ok(())
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/definition_merge.rs | src/fun/transform/definition_merge.rs | use crate::{
fun::{Book, Definition, Name, Rule, Term},
maybe_grow,
};
use indexmap::{IndexMap, IndexSet};
use itertools::Itertools;
use std::collections::BTreeMap;
pub const MERGE_SEPARATOR: &str = "__M_";
impl Book {
/// Merges definitions that have the same structure into one definition.
/// Expects variables to be linear.
///
/// Some of the origins of the rules will be lost in this stage,
/// Should not be preceded by passes that cares about the origins.
pub fn merge_definitions(&mut self) {
let defs: Vec<_> = self.defs.keys().cloned().collect();
self.merge(defs.into_iter());
}
/// Checks and merges identical definitions given by `defs`.
/// We never merge the entrypoint function with something else.
fn merge(&mut self, defs: impl Iterator<Item = Name>) {
let name = self.entrypoint.clone();
// Sets of definitions that are identical, indexed by the body term.
let equal_terms =
self.collect_terms(defs.filter(|def_name| !name.as_ref().is_some_and(|m| m == def_name)));
// Map of old name to new merged name
let mut name_map = BTreeMap::new();
for (term, equal_defs) in equal_terms {
// def1_$_def2_$_def3
let new_name = Name::new(equal_defs.iter().join(MERGE_SEPARATOR));
if equal_defs.len() > 1 {
// Merging some defs
// The source of the generated definition will be based on the first one we get from `equal_defs`.
// In the future, we might want to change this to point to every source of every definition
// it's based on.
// This could be done by having SourceKind::Generated contain a Vec<Source> or Vec<Definition>.
let any_def_name = equal_defs.iter().next().unwrap(); // we know we can unwrap since equal_defs.len() > 1
// Add the merged def
let source = self.defs[any_def_name].source.clone();
let rules = vec![Rule { pats: vec![], body: term }];
// Note: This will erase types, so type checking needs to come before this.
let new_def = Definition::new_gen(new_name.clone(), rules, source, false);
self.defs.insert(new_name.clone(), new_def);
// Remove the old ones and write the map of old names to new ones.
for name in equal_defs {
self.defs.swap_remove(&name);
name_map.insert(name, new_name.clone());
}
} else {
// Not merging, just put the body back
let def_name = equal_defs.into_iter().next().unwrap();
let def = self.defs.get_mut(&def_name).unwrap();
def.rule_mut().body = term;
}
}
self.update_refs(&name_map);
}
fn collect_terms(&mut self, def_entries: impl Iterator<Item = Name>) -> IndexMap<Term, IndexSet<Name>> {
let mut equal_terms: IndexMap<Term, IndexSet<Name>> = IndexMap::new();
for def_name in def_entries {
let def = self.defs.get_mut(&def_name).unwrap();
let term = std::mem::take(&mut def.rule_mut().body);
equal_terms.entry(term).or_default().insert(def_name);
}
equal_terms
}
fn update_refs(&mut self, name_map: &BTreeMap<Name, Name>) {
let mut updated_defs = Vec::new();
for def in self.defs.values_mut() {
if Term::subst_ref_to_ref(&mut def.rule_mut().body, name_map) {
updated_defs.push(def.name.clone());
}
}
if !updated_defs.is_empty() {
self.merge(updated_defs.into_iter());
}
}
}
impl Term {
/// Performs reference substitution within a term replacing any references found in
/// `ref_map` with their corresponding targets.
pub fn subst_ref_to_ref(term: &mut Term, ref_map: &BTreeMap<Name, Name>) -> bool {
maybe_grow(|| match term {
Term::Ref { nam: def_name } => {
if let Some(target_name) = ref_map.get(def_name) {
*def_name = target_name.clone();
true
} else {
false
}
}
_ => {
let mut subst = false;
for child in term.children_mut() {
subst |= Term::subst_ref_to_ref(child, ref_map);
}
subst
}
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/lift_local_defs.rs | src/fun/transform/lift_local_defs.rs | use std::collections::BTreeSet;
use indexmap::IndexMap;
use crate::{
fun::{Book, Definition, Name, Pattern, Rule, Term},
maybe_grow,
};
impl Book {
pub fn lift_local_defs(&mut self) {
let mut defs = IndexMap::new();
for (name, def) in self.defs.iter_mut() {
let mut gen = 0;
for rule in def.rules.iter_mut() {
rule.body.lift_local_defs(name, def.check, &mut defs, &mut gen);
}
}
self.defs.extend(defs);
}
}
impl Rule {
pub fn binds(&self) -> impl DoubleEndedIterator<Item = &Option<Name>> + Clone {
self.pats.iter().flat_map(Pattern::binds)
}
}
impl Term {
pub fn lift_local_defs(
&mut self,
parent: &Name,
check: bool,
defs: &mut IndexMap<Name, Definition>,
gen: &mut usize,
) {
maybe_grow(|| match self {
Term::Def { def, nxt } => {
let local_name = Name::new(format!("{}__local_{}_{}", parent, gen, def.name));
for rule in def.rules.iter_mut() {
rule.body.lift_local_defs(&local_name, check, defs, gen);
}
nxt.lift_local_defs(parent, check, defs, gen);
*gen += 1;
let inner_defs =
defs.keys().filter(|name| name.starts_with(local_name.as_ref())).cloned().collect::<BTreeSet<_>>();
let (r#use, fvs, mut rules) =
gen_use(inner_defs, &local_name, &def.name, nxt, std::mem::take(&mut def.rules));
let source = std::mem::take(&mut def.source);
*self = r#use;
apply_closure(&mut rules, &fvs);
let new_def = Definition::new_gen(local_name.clone(), rules, source, check);
defs.insert(local_name.clone(), new_def);
}
_ => {
for child in self.children_mut() {
child.lift_local_defs(parent, check, defs, gen);
}
}
})
}
}
fn gen_use(
inner_defs: BTreeSet<Name>,
local_name: &Name,
nam: &Name,
nxt: &mut Box<Term>,
mut rules: Vec<Rule>,
) -> (Term, BTreeSet<Name>, Vec<Rule>) {
let mut fvs = BTreeSet::<Name>::new();
for rule in rules.iter() {
fvs.extend(rule.body.free_vars().into_keys().collect::<BTreeSet<_>>());
}
fvs.retain(|fv| !inner_defs.contains(fv));
for rule in rules.iter() {
for bind in rule.binds().flatten() {
fvs.remove(bind);
}
}
fvs.remove(nam);
let call = Term::call(
Term::Ref { nam: local_name.clone() },
fvs.iter().cloned().map(|nam| Term::Var { nam }).collect::<Vec<_>>(),
);
for rule in rules.iter_mut() {
let slf = std::mem::take(&mut rule.body);
rule.body = Term::Use { nam: Some(nam.clone()), val: Box::new(call.clone()), nxt: Box::new(slf) };
}
let r#use = Term::Use { nam: Some(nam.clone()), val: Box::new(call.clone()), nxt: std::mem::take(nxt) };
(r#use, fvs, rules)
}
fn apply_closure(rules: &mut [Rule], fvs: &BTreeSet<Name>) {
for rule in rules.iter_mut() {
let captured = fvs.iter().cloned().map(Some).collect::<Vec<_>>();
rule.body = Term::rfold_lams(std::mem::take(&mut rule.body), captured.into_iter());
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/desugar_fold.rs | src/fun/transform/desugar_fold.rs | use std::collections::HashSet;
use crate::{
diagnostics::Diagnostics,
fun::{Adts, Constructors, Ctx, Definition, Name, Pattern, Rule, Source, Term},
maybe_grow,
};
impl Ctx<'_> {
/// Desugars `fold` expressions into recursive `match`es.
/// ```bend
/// foo xs =
/// ...
/// fold bind = init with x1 x2 {
/// Type/Ctr1: (Foo bind.rec_fld bind.fld x1 x2 free_var)
/// Type/Ctr2: (Bar bind.fld x1 x2)
/// }
/// ```
/// Desugars to:
/// ```bend
/// foo xs =
/// ...
/// (foo__fold0 init x1 x2 free_var)
///
/// foo__fold0 = @bind match bind {
/// Type/Ctr1: (Foo (foo_fold0 bind.rec_fld x1 x2 free_var) bind.fld x1 x2 free_var)
/// Type/Ctr2: (Bar bind.fld x1 x2)
/// }
/// ```
pub fn desugar_fold(&mut self) -> Result<(), Diagnostics> {
let mut new_defs = vec![];
for def in self.book.defs.values_mut() {
let mut fresh = 0;
for rule in def.rules.iter_mut() {
let mut ctx = DesugarFoldCtx {
def_name: &def.name,
fresh: &mut fresh,
new_defs: &mut new_defs,
ctrs: &self.book.ctrs,
adts: &self.book.adts,
source: def.source.clone(),
check: def.check,
};
let res = rule.body.desugar_fold(&mut ctx);
if let Err(e) = res {
self.info.add_function_error(e, def.name.clone(), def.source.clone());
}
}
}
self.book.defs.extend(new_defs.into_iter().map(|def| (def.name.clone(), def)));
self.info.fatal(())
}
}
struct DesugarFoldCtx<'a> {
pub def_name: &'a Name,
pub fresh: &'a mut usize,
pub new_defs: &'a mut Vec<Definition>,
pub ctrs: &'a Constructors,
pub adts: &'a Adts,
pub source: Source,
pub check: bool,
}
impl Term {
fn desugar_fold(&mut self, ctx: &mut DesugarFoldCtx<'_>) -> Result<(), String> {
maybe_grow(|| {
for child in self.children_mut() {
child.desugar_fold(ctx)?;
}
if let Term::Fold { .. } = self {
// Can't have unmatched unscoped because this'll be extracted
if self.has_unscoped_diff() {
return Err("Can't have non self-contained unscoped variables in a 'fold'".into());
}
let Term::Fold { bnd: _, arg, with_bnd, with_arg, arms } = self else { unreachable!() };
// Gather the free variables
let mut free_vars = HashSet::new();
for arm in arms.iter() {
let mut arm_free_vars = arm.2.free_vars().into_keys().collect::<HashSet<_>>();
for field in arm.1.iter().flatten() {
arm_free_vars.remove(field);
}
free_vars.extend(arm_free_vars);
}
for var in with_bnd.iter().flatten() {
free_vars.remove(var);
}
let free_vars = free_vars.into_iter().collect::<Vec<_>>();
let new_nam = Name::new(format!("{}__fold{}", ctx.def_name, ctx.fresh));
*ctx.fresh += 1;
// Substitute the implicit recursive calls to call the new function
let ctr = arms[0].0.as_ref().unwrap();
let adt_nam = ctx.ctrs.get(ctr).unwrap();
let ctrs = &ctx.adts.get(adt_nam).unwrap().ctrs;
for arm in arms.iter_mut() {
let ctr = arm.0.as_ref().unwrap();
let recursive = arm
.1
.iter()
.zip(&ctrs.get(ctr).unwrap().fields)
.filter_map(|(var, field)| if field.rec { Some(var.as_ref().unwrap().clone()) } else { None })
.collect::<HashSet<_>>();
arm.2.call_recursive(&new_nam, &recursive, &free_vars);
}
// Create the new function
let x_nam = Name::new("%x");
let body = Term::Mat {
arg: Box::new(Term::Var { nam: x_nam.clone() }),
bnd: None,
with_bnd: with_bnd.clone(),
with_arg: with_bnd.iter().map(|nam| Term::var_or_era(nam.clone())).collect(),
arms: std::mem::take(arms),
};
let body = Term::rfold_lams(body, with_bnd.iter().cloned());
let body = Term::rfold_lams(body, free_vars.iter().map(|nam| Some(nam.clone())));
let body = Term::lam(Pattern::Var(Some(x_nam)), body);
let def = Definition::new_gen(
new_nam.clone(),
vec![Rule { pats: vec![], body }],
ctx.source.clone(),
ctx.check,
);
ctx.new_defs.push(def);
// Call the new function
let call = Term::call(Term::Ref { nam: new_nam.clone() }, [std::mem::take(arg.as_mut())]);
let call = Term::call(call, free_vars.iter().cloned().map(|nam| Term::Var { nam }));
let call = Term::call(call, with_arg.iter().cloned());
*self = call;
}
Ok(())
})
}
fn call_recursive(&mut self, def_name: &Name, recursive: &HashSet<Name>, free_vars: &[Name]) {
maybe_grow(|| {
for child in self.children_mut() {
child.call_recursive(def_name, recursive, free_vars);
}
// If we found a recursive field, replace with a call to the new function.
if let Term::Var { nam } = self {
if recursive.contains(nam) {
let call = Term::call(Term::Ref { nam: def_name.clone() }, [std::mem::take(self)]);
let call = Term::call(call, free_vars.iter().cloned().map(|nam| Term::Var { nam }));
*self = call;
}
}
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/unique_names.rs | src/fun/transform/unique_names.rs | // Pass to give all variables in a definition unique names.
use crate::{
fun::{Book, Name, Term},
maybe_grow,
};
use std::collections::HashMap;
impl Book {
/// Makes all variables in each definition have a new unique name.
/// Skips unbound variables.
/// Precondition: Definition references have been resolved.
pub fn make_var_names_unique(&mut self) {
for def in self.defs.values_mut() {
def.rule_mut().body.make_var_names_unique();
}
}
}
impl Term {
pub fn make_var_names_unique(&mut self) {
UniqueNameGenerator::default().unique_names_in_term(self);
}
}
type VarId = u64;
#[derive(Default)]
pub struct UniqueNameGenerator {
name_map: HashMap<Name, Vec<VarId>>,
name_count: VarId,
}
impl UniqueNameGenerator {
// Recursively assign an id to each variable in the term, then convert each id into a unique name.
pub fn unique_names_in_term(&mut self, term: &mut Term) {
// Note: we can't use the children iterators here because we mutate the binds,
// which are shared across multiple children.
maybe_grow(|| match term {
Term::Var { nam } => *nam = self.use_var(nam),
Term::Mat { bnd, arg, with_bnd, with_arg, arms }
| Term::Fold { bnd, arg, with_bnd, with_arg, arms } => {
// Process args
self.unique_names_in_term(arg);
for arg in with_arg {
self.unique_names_in_term(arg);
}
// Add binds shared by all arms
self.push(bnd.as_ref());
for bnd in with_bnd.iter() {
self.push(bnd.as_ref());
}
// Process arms
for arm in arms {
// Add binds unique to each arm
for bnd in arm.1.iter() {
self.push(bnd.as_ref());
}
// Process arm body
self.unique_names_in_term(&mut arm.2);
// Remove binds unique to each arm
for bnd in arm.1.iter_mut() {
*bnd = self.pop(bnd.as_ref());
}
}
// Remove binds shared by all arms
for bnd in with_bnd {
*bnd = self.pop(bnd.as_ref());
}
*bnd = self.pop(bnd.as_ref());
}
Term::Swt { bnd, arg, with_bnd, with_arg, pred, arms } => {
self.unique_names_in_term(arg);
for arg in with_arg {
self.unique_names_in_term(arg);
}
self.push(bnd.as_ref());
for bnd in with_bnd.iter() {
self.push(bnd.as_ref());
}
let (succ, nums) = arms.split_last_mut().unwrap();
for arm in nums.iter_mut() {
self.unique_names_in_term(arm);
}
self.push(pred.as_ref());
self.unique_names_in_term(succ);
*pred = self.pop(pred.as_ref());
for bnd in with_bnd {
*bnd = self.pop(bnd.as_ref());
}
*bnd = self.pop(bnd.as_ref());
}
Term::Bend { bnd, arg, cond, step, base } => {
for arg in arg {
self.unique_names_in_term(arg);
}
for bnd in bnd.iter() {
self.push(bnd.as_ref());
}
self.unique_names_in_term(cond);
self.unique_names_in_term(step);
self.unique_names_in_term(base);
for bnd in bnd {
*bnd = self.pop(bnd.as_ref());
}
}
Term::Let { pat, val, nxt } | Term::Ask { pat, val, nxt } => {
self.unique_names_in_term(val);
for bnd in pat.binds() {
self.push(bnd.as_ref());
}
self.unique_names_in_term(nxt);
for bind in pat.binds_mut() {
*bind = self.pop(bind.as_ref());
}
}
Term::Use { nam, val, nxt } => {
self.unique_names_in_term(val);
self.push(nam.as_ref());
self.unique_names_in_term(nxt);
*nam = self.pop(nam.as_ref());
}
Term::Lam { tag: _, pat, bod } => {
for bind in pat.binds() {
self.push(bind.as_ref());
}
self.unique_names_in_term(bod);
for bind in pat.binds_mut() {
*bind = self.pop(bind.as_ref());
}
}
Term::Fan { fan: _, tag: _, els } | Term::List { els } => {
for el in els {
self.unique_names_in_term(el);
}
}
Term::App { tag: _, fun: fst, arg: snd } | Term::Oper { opr: _, fst, snd } => {
self.unique_names_in_term(fst);
self.unique_names_in_term(snd);
}
Term::With { typ: _, bod } => {
self.unique_names_in_term(bod);
}
Term::Link { .. }
| Term::Num { .. }
| Term::Nat { .. }
| Term::Str { .. }
| Term::Ref { .. }
| Term::Era
| Term::Err => {}
Term::Open { .. } => unreachable!("'open' should be removed in earlier pass"),
Term::Def { .. } => unreachable!("'def' should be removed in earlier pass"),
})
}
fn push(&mut self, nam: Option<&Name>) {
if let Some(name) = nam {
if let Some(ids) = self.name_map.get_mut(name) {
ids.push(self.name_count);
} else {
self.name_map.insert(name.clone(), vec![self.name_count]);
}
self.name_count += 1;
}
}
fn pop(&mut self, nam: Option<&Name>) -> Option<Name> {
if let Some(name) = nam {
let var_id = self.name_map.get_mut(name).unwrap().pop().unwrap();
if self.name_map[name].is_empty() {
self.name_map.remove(name);
}
Some(Name::from(var_id))
} else {
None
}
}
fn use_var(&self, nam: &Name) -> Name {
if let Some(vars) = self.name_map.get(nam) {
let var_id = *vars.last().unwrap();
Name::from(var_id)
} else {
// Skip unbound variables.
// With this, we can use this function before checking for unbound vars.
nam.clone()
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/fun/transform/apply_args.rs | src/fun/transform/apply_args.rs | use crate::{
diagnostics::Diagnostics,
fun::{Ctx, Pattern, Rule, Term},
};
impl Ctx<'_> {
/// Applies the arguments to the program being run by applying them to the main function.
///
/// Example:
/// ```hvm
/// main x1 x2 x3 = (MainBody x1 x2 x3)
/// ```
/// Calling with `bend run <file> arg1 arg2 arg3`, it becomes:
/// ```hvm
/// main = (λx1 λx2 λx3 (MainBody x1 x2 x3) arg1 arg2 arg3)
/// ```
pub fn apply_args(&mut self, args: Option<Vec<Term>>) -> Result<(), Diagnostics> {
if let Some(entrypoint) = &self.book.entrypoint {
let main_def = &mut self.book.defs[entrypoint];
// Since we fatal error, no need to exit early
let n_rules = main_def.rules.len();
if n_rules != 1 {
self.info.add_function_error(
format!("Expected the entrypoint function to have only one rule, found {n_rules}."),
entrypoint.clone(),
main_def.source.clone(),
);
}
let mut main_body = std::mem::take(&mut main_def.rules[0].body);
for pat in main_def.rules[0].pats.iter().rev() {
if let Pattern::Var(var) = pat {
main_body = Term::lam(Pattern::Var(var.clone()), main_body);
} else {
self.info.add_function_error(
format!("Expected the entrypoint function to only have variable patterns, found '{pat}'."),
entrypoint.clone(),
main_def.source.clone(),
);
}
}
if let Some(args) = args {
main_body = Term::call(main_body, args);
}
main_def.rules = vec![Rule { pats: vec![], body: main_body }];
}
self.info.fatal(())
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imports/packages.rs | src/imports/packages.rs | use super::{loader::PackageLoader, normalize_path, BoundSource, ImportCtx, ImportType, ImportsMap};
use crate::{
diagnostics::Diagnostics,
fun::{load_book::do_parse_book, parser::ParseBook, Name},
};
use indexmap::{IndexMap, IndexSet};
use std::{cell::RefCell, collections::VecDeque, path::PathBuf};
#[derive(Default)]
pub struct Packages {
/// Map from source name to parsed book.
pub books: IndexMap<Name, RefCell<ParseBook>>,
/// Already loaded ADTs information to be used when applying ADT binds.
/// Source path -> ADT names -> constructor names.
pub loaded_adts: IndexMap<Name, IndexMap<Name, Vec<Name>>>,
/// Queue of books indexes that still needs to load its imports.
load_queue: VecDeque<usize>,
}
impl Packages {
pub fn new(book: ParseBook) -> Self {
Self {
books: IndexMap::from([(book.source.clone(), book.into())]),
load_queue: VecDeque::new(),
loaded_adts: IndexMap::new(),
}
}
/// Loads each import statement recursively into a Source -> ParseBook map.
/// Inserts into the ImportsMap of each book all the imported names.
pub fn load_imports(
&mut self,
loader: &mut impl PackageLoader,
diag: &mut Diagnostics,
) -> Result<ParseBook, Diagnostics> {
self.load_imports_go(0, None, loader)?;
while let Some(idx) = self.load_queue.pop_front() {
let parent_dir = {
let book = self.books[idx].borrow();
book.source.rsplit_once('/').map(|(s, _)| Name::new(s))
};
self.load_imports_go(idx, parent_dir, loader)?;
}
for idx in 0..self.books.len() {
self.load_binds(idx, diag);
}
let (_, book) = self.books.swap_remove_index(0).unwrap();
diag.fatal(book.into_inner())
}
fn load_imports_go(
&mut self,
idx: usize,
dir: Option<Name>,
loader: &mut impl PackageLoader,
) -> Result<(), Diagnostics> {
let mut sources = IndexMap::new();
{
let mut book = self.books[idx].borrow_mut();
let names = &mut book.import_ctx.imports;
for import in names {
if import.relative {
if let Some(ref dir) = dir {
let path = format!("{}/{}", dir, import.path);
let normalized = normalize_path(&PathBuf::from(path));
import.path = Name::new(normalized.to_string_lossy());
}
}
let loaded = loader.load(import)?;
sources.extend(loaded);
}
}
for (psrc, code) in sources {
let module = do_parse_book(&code, &PathBuf::from(psrc.as_ref()), ParseBook::default())?;
self.load_queue.push_back(self.books.len());
self.books.insert(psrc, module.into());
}
Ok(())
}
/// Maps the `ImportType` of each import to the top level names it relates,
/// checks if it is valid, resolves `BoundSource::Either`, and adds to the book ImportMap.
fn load_binds(&mut self, idx: usize, diag: &mut Diagnostics) {
let book = &mut self.books[idx].borrow_mut();
let ImportCtx { imports, map } = &mut book.import_ctx;
for import in imports {
match (&mut import.src, &import.imp_type) {
(BoundSource::Either(src, pkgs), ImportType::Single(nam, alias)) => {
if self.unique_top_level_names(src).contains(nam) {
let err = format!("Both file '{src}.bend' and folder '{src}' contains the import '{nam}'");
diag.add_book_error(err);
continue;
}
self.add_file_from_dir(pkgs, nam, alias, map, diag);
import.src = BoundSource::Dir(std::mem::take(pkgs));
}
(BoundSource::Either(src, pkgs), ImportType::List(names)) => {
for (name, alias) in names {
let added = self.add_file_from_dir(pkgs, name, alias, map, diag);
if !added {
if !self.unique_top_level_names(src).contains(name) {
let err = format!("Package '{src}' does not contain the top level name '{name}'");
diag.add_book_error(err);
continue;
}
pkgs.insert(name.clone(), src.clone());
self.add_aliased_bind(src, name, alias, map, diag);
}
}
import.src = BoundSource::Dir(std::mem::take(pkgs));
}
(BoundSource::Either(src, pkgs), ImportType::Glob) => {
let names = self.unique_top_level_names(src);
let mut error = false;
for nam in pkgs.keys() {
if names.contains(nam) {
let err = format!("Both file '{src}.bend' and folder '{src}' contains the import '{nam}'");
diag.add_book_error(err);
error = true;
}
}
if error {
continue;
}
self.add_glob_from_dir(pkgs, map, diag);
for sub in &names {
pkgs.insert(sub.clone(), src.clone());
}
map.add_binds(&names, src, diag);
import.src = BoundSource::Dir(std::mem::take(pkgs));
}
(BoundSource::File(src), ImportType::Single(name, alias)) => {
if !self.unique_top_level_names(src).contains(name) {
let err = format!("Package '{src}' does not contain the top level name '{name}'");
diag.add_book_error(err);
continue;
}
self.add_aliased_bind(src, name, alias, map, diag);
}
(BoundSource::File(src), ImportType::List(names)) => {
let src_names = self.unique_top_level_names(src);
let mut error = false;
for (sub, _) in names {
if !src_names.contains(sub) {
let err = format!("Package '{src}' does not contain the top level name '{sub}'");
diag.add_book_error(err);
error = true;
}
}
if error {
continue;
}
for (name, alias) in names {
self.add_aliased_bind(src, name, alias, map, diag);
}
}
(BoundSource::File(src), ImportType::Glob) => {
let names = self.unique_top_level_names(src);
map.add_binds(&names, src, diag);
}
(BoundSource::Dir(pkgs), ImportType::Single(nam, alias)) => {
self.add_file_from_dir(pkgs, nam, alias, map, diag);
}
(BoundSource::Dir(pkgs), ImportType::List(names)) => {
for (nam, alias) in names {
self.add_file_from_dir(pkgs, nam, alias, map, diag);
}
}
(BoundSource::Dir(pkgs), ImportType::Glob) => {
self.add_glob_from_dir(pkgs, map, diag);
}
(BoundSource::None, _) => unreachable!(),
}
}
}
fn add_aliased_bind(
&self,
src: &mut Name,
name: &Name,
alias: &Option<Name>,
map: &mut ImportsMap,
diag: &mut Diagnostics,
) {
let alias = alias.as_ref();
if let Some(adt) = self.books.get(src).unwrap().borrow().adts.get(name) {
let names = adt.ctrs.iter().map(|(n, _)| n);
map.add_nested_binds(src, alias.unwrap_or(name), names, diag);
}
map.add_aliased_bind(src, name, alias, diag);
}
fn add_file_from_dir(
&self,
pkgs: &IndexMap<Name, Name>,
nam: &Name,
alias: &Option<Name>,
map: &mut ImportsMap,
diag: &mut Diagnostics,
) -> bool {
if let Some(src) = pkgs.get(nam) {
let names = self.unique_top_level_names(src);
map.add_file_nested_binds(src, nam, alias.as_ref(), names, diag);
true
} else {
false
}
}
fn add_glob_from_dir(&self, pkgs: &IndexMap<Name, Name>, map: &mut ImportsMap, diag: &mut Diagnostics) {
for (nam, src) in pkgs {
let names = self.unique_top_level_names(src);
map.add_file_nested_binds(src, nam, None, names, diag);
}
}
fn unique_top_level_names(&self, src: &Name) -> IndexSet<Name> {
let bound_book = self.books.get(src).unwrap().borrow();
bound_book.top_level_names().cloned().collect()
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imports/mod.rs | src/imports/mod.rs | use crate::{
diagnostics::{Diagnostics, WarningType},
fun::Name,
};
use indexmap::{IndexMap, IndexSet};
use itertools::Itertools;
use std::fmt::Display;
pub mod book;
pub mod loader;
pub mod packages;
pub use loader::*;
pub type BindMap = IndexMap<Name, Name>;
#[derive(Debug, Clone, Default)]
pub struct ImportCtx {
/// Imports declared in the program source.
imports: Vec<Import>,
/// Map from bound names to source package.
map: ImportsMap,
}
impl ImportCtx {
pub fn add_import(&mut self, import: Import) {
self.imports.push(import);
}
pub fn to_imports(self) -> Vec<Import> {
self.imports
}
pub fn sources(&self) -> Vec<&Name> {
let mut names = Vec::new();
for imps in &self.imports {
match &imps.src {
BoundSource::None => {}
BoundSource::File(f) => names.push(f),
BoundSource::Dir(v) => names.extend(v.values()),
BoundSource::Either(_, _) => unreachable!("This should be resolved into `File` or `Dir` by now"),
}
}
names
}
}
#[derive(Debug, Clone)]
pub struct Import {
pub path: Name,
pub imp_type: ImportType,
pub relative: bool,
pub src: BoundSource,
}
impl Import {
pub fn new(path: Name, imp_type: ImportType, relative: bool) -> Self {
Self { path, imp_type, relative, src: BoundSource::None }
}
}
#[derive(Debug, Clone)]
pub enum ImportType {
Single(Name, Option<Name>),
List(Vec<(Name, Option<Name>)>),
Glob,
}
impl Display for ImportType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ImportType::Single(n, _) => write!(f, "{n}"),
ImportType::List(l) => write!(f, "({})", l.iter().map(|(n, _)| n).join(", ")),
ImportType::Glob => write!(f, "*"),
}
}
}
#[derive(Debug, Clone)]
pub enum BoundSource {
None,
File(Name),
Dir(IndexMap<Name, Name>),
/// If the bound source is ambiguous between a file or a directory
Either(Name, IndexMap<Name, Name>),
}
#[derive(Debug, Clone, Default)]
struct ImportsMap {
binds: BindMap,
}
impl ImportsMap {
pub fn contains_source(&self, s: &Name) -> bool {
self.binds.values().contains(s)
}
fn add_bind(&mut self, src: &str, bind: Name, diag: &mut Diagnostics) {
if let Some(old) = self.binds.get(&bind) {
let warn = format!("The import '{src}' shadows the imported name '{old}'");
diag.add_book_warning(warn, WarningType::ImportShadow);
}
self.binds.insert(bind, Name::new(src));
}
fn add_aliased_bind(&mut self, src: &Name, sub: &Name, alias: Option<&Name>, diag: &mut Diagnostics) {
let src = format!("{}/{}", src, sub);
let aliased = alias.unwrap_or(sub);
self.add_bind(&src, aliased.clone(), diag);
}
fn add_binds(&mut self, names: &IndexSet<Name>, src: &Name, diag: &mut Diagnostics) {
for sub in names {
self.add_aliased_bind(src, sub, None, diag);
}
}
/// Adds all names to the ImportMap in the form `alias/name`.
/// If one of the names is equal to the file name, adds as `alias` instead.
fn add_file_nested_binds(
&mut self,
src: &Name,
file: &Name,
alias: Option<&Name>,
names: IndexSet<Name>,
diag: &mut Diagnostics,
) {
let aliased = alias.unwrap_or(file);
self.add_nested_binds(src, aliased, names.iter().filter(|&n| n != file), diag);
if names.contains(file) {
let src = format!("{}/{}", src, file);
self.add_bind(&src, aliased.clone(), diag);
}
}
/// Adds all names to the ImportMap in the form `bind/name`.
fn add_nested_binds<'a>(
&mut self,
src: &Name,
bind: &Name,
names: impl Iterator<Item = &'a Name>,
diag: &mut Diagnostics,
) {
for name in names {
let src = format!("{}/{}", src, name);
let bind = Name::new(format!("{bind}/{name}"));
self.add_bind(&src, bind, diag);
}
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imports/loader.rs | src/imports/loader.rs | use super::{BoundSource, Import, ImportType};
use crate::fun::Name;
use indexmap::IndexMap;
use std::{
collections::HashSet,
path::{Component, Path, PathBuf},
};
pub type Sources = IndexMap<Name, String>;
/// Trait to load packages from various sources.
pub trait PackageLoader {
/// Load a package specified by the `import` parameter.
///
/// # Parameters
///
/// - `import`: A mutable reference to an `Import` structure, which contains:
/// - `path`: The path to the package or directory to be imported.
/// - `imp_type`: The type of import, which can specify a single name, a list of names, or all names in a path.
/// - `relative`: A boolean indicating if the path is relative to the current directory.
/// - `src`: A `BoundSource` to be updated with the names of the located files.
///
/// # Behavior
///
/// The `load` method is responsible for locating and loading the requested package(s).
/// The loaded packages are returned as a `Sources` map, where the key is the package name and the value is its content.
/// Implementers must:
///
/// - Track already loaded sources to avoid loading and returning them again.
/// - Update `import.src` with the names of the found packages, even if they are not included in the `Sources` map.
///
/// The implementation should handle the following import types:
/// - **Single**: Load a specific file by its name.
/// - **List**: Load a list of specified files or names from a specific file.
/// - **Glob**: Load all files in a directory or all names from a specific file.
fn load(&mut self, import: &mut Import) -> Result<Sources, String>;
}
/// Default implementation of `PackageLoader` that loads packages from the local directory.
pub struct DefaultLoader {
local_path: PathBuf,
loaded: HashSet<Name>,
entrypoint: Name,
}
impl DefaultLoader {
pub fn new(local_path: &Path) -> Self {
let entrypoint = Name::new(local_path.file_stem().unwrap().to_string_lossy());
let local_path = local_path.parent().unwrap().to_path_buf();
Self { local_path, loaded: HashSet::new(), entrypoint }
}
fn read_file(&mut self, path: &Path, file_path: &str, src: &mut Sources) -> Result<Option<Name>, String> {
let normalized = normalize_path(&PathBuf::from(file_path));
let file_path = Name::new(normalized.to_string_lossy());
if self.entrypoint == file_path {
return Err("Can not import the entry point of the program.".to_string());
};
if !self.is_loaded(&file_path) {
self.loaded.insert(file_path.clone());
let path = path.with_extension("bend");
let Some(code) = std::fs::read_to_string(path).ok() else { return Ok(None) };
src.insert(file_path.clone(), code);
}
Ok(Some(file_path))
}
fn read_file_in_folder(
&mut self,
full_path: &Path,
folder: &str,
file_name: &str,
src: &mut Sources,
) -> Result<Option<Name>, String> {
let full_path = full_path.join(file_name);
if folder.is_empty() {
self.read_file(&full_path, file_name, src)
} else {
let file_name = &format!("{}/{}", folder, file_name);
self.read_file(&full_path, file_name, src)
}
}
fn read_path(
&mut self,
base_path: &Path,
path: &Name,
imp_type: &ImportType,
) -> Result<Option<(BoundSource, Sources)>, String> {
let full_path = base_path.join(path.as_ref());
let mut src = IndexMap::new();
let (mut file, mut dir) = (None, None);
if full_path.with_extension("bend").is_file() {
file = self.read_file(&full_path, path.as_ref(), &mut src)?;
}
if full_path.is_dir() || path.is_empty() {
let mut names = IndexMap::new();
match imp_type {
ImportType::Single(file, _) => {
if let Some(name) = self.read_file_in_folder(&full_path, path, file, &mut src)? {
names.insert(file.clone(), name);
}
}
ImportType::List(list) => {
for (file, _) in list {
if let Some(name) = self.read_file_in_folder(&full_path, path, file, &mut src)? {
names.insert(file.clone(), name);
}
}
}
ImportType::Glob => {
for entry in full_path.read_dir().unwrap().flatten() {
let file = PathBuf::from(&entry.file_name());
if let Some("bend") = file.extension().and_then(|f| f.to_str()) {
let file = file.file_stem().unwrap().to_string_lossy();
if let Some(name) = self.read_file_in_folder(&full_path, path, &file, &mut src)? {
names.insert(Name::new(file), name);
}
}
}
}
}
if !names.is_empty() {
dir = Some(names);
}
}
let src = match (file, dir) {
(Some(f), None) => Some((BoundSource::File(f), src)),
(None, Some(d)) => Some((BoundSource::Dir(d), src)),
(Some(f), Some(d)) => Some((BoundSource::Either(f, d), src)),
(None, None) => None,
};
Ok(src)
}
fn is_loaded(&self, name: &Name) -> bool {
self.loaded.contains(name)
}
}
pub const BEND_PATH: &[&str] = &[""];
impl PackageLoader for DefaultLoader {
fn load(&mut self, import: &mut Import) -> Result<Sources, String> {
let mut sources = Sources::new();
let Import { path, imp_type, relative, src } = import;
let folders = if *relative {
vec![self.local_path.clone()]
} else {
BEND_PATH.iter().map(|p| self.local_path.join(p)).collect()
};
for base in folders {
let Some((names, new_pkgs)) = self.read_path(&base, path, imp_type)? else { continue };
*src = names;
sources.extend(new_pkgs);
break;
}
if let BoundSource::None = src {
return Err(format!("Failed to import '{}' from '{}'", imp_type, path).to_string());
}
Ok(sources)
}
}
// Taken from 'cargo/util/paths.rs'
pub fn normalize_path(path: &Path) -> PathBuf {
let mut components = path.components().peekable();
let mut ret = if let Some(c @ Component::Prefix(..)) = components.peek().cloned() {
components.next();
PathBuf::from(c.as_os_str())
} else {
PathBuf::new()
};
for component in components {
match component {
Component::Prefix(..) => unreachable!(),
Component::RootDir => {
ret.push(component.as_os_str());
}
Component::CurDir => {}
Component::ParentDir => {
ret.pop();
}
Component::Normal(c) => {
ret.push(c);
}
}
}
ret
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/src/imports/book.rs | src/imports/book.rs | use super::{BindMap, ImportsMap, PackageLoader};
use crate::{
diagnostics::{Diagnostics, DiagnosticsConfig},
fun::{
parser::ParseBook, Adt, AdtCtr, Book, Definition, HvmDefinition, Name, Pattern, Source, SourceKind, Term,
},
imp::{self, Expr, MatchArm, Stmt},
imports::packages::Packages,
maybe_grow,
};
use indexmap::{map::Entry, IndexMap};
use itertools::Itertools;
impl ParseBook {
/// Loads and applies imports recursively to a ParseBook,
/// transforming definitions and ADTs to a canonical name,
/// and adding `use` binds so that names are accessible by their alias.
///
/// # Details
///
/// The process involves:
///
/// 1. Loading imports recursively using the provided `loader`.
/// 2. Transforming definitions and ADTs with naming transformations.
/// 3. Adding binds for aliases and old names in their respective definitions.
/// 4. Converting the ParseBook into its functional form.
/// 5. Perform any necessary post-processing.
pub fn load_imports(
self,
mut loader: impl PackageLoader,
diag_config: DiagnosticsConfig,
) -> Result<Book, Diagnostics> {
let diag = &mut Diagnostics::new(diag_config);
let pkgs = &mut Packages::new(self);
// Load all the imports recursively, saving them in `pkgs`.
// `book` is the root book with the entry point.
let mut book = pkgs.load_imports(&mut loader, diag)?;
// Apply the imports to the book
book.apply_imports(None, diag, pkgs)?;
diag.fatal(())?;
eprint!("{}", diag);
// Convert the parse-level AST into the internal functional representation.
let mut book = book.to_fun()?;
// Process terms that contains constructors names and can't be updated by `desugar_use`.
book.desugar_ctr_use();
Ok(book)
}
/// Loads the imported books recursively into the importing book,
/// then apply imported names or aliases binds to its definitions.
fn apply_imports(
&mut self,
main_imports: Option<&ImportsMap>,
diag: &mut Diagnostics,
pkgs: &mut Packages,
) -> Result<(), Diagnostics> {
self.load_packages(main_imports, diag, pkgs)?;
self.apply_import_binds(main_imports, pkgs);
Ok(())
}
/// Consumes the book imported packages,
/// applying the imports recursively of every nested book.
fn load_packages(
&mut self,
main_imports: Option<&ImportsMap>,
diag: &mut Diagnostics,
pkgs: &mut Packages,
) -> Result<(), Diagnostics> {
let sources = self.import_ctx.sources().into_iter().cloned().collect_vec();
for src in sources {
let Some(package) = pkgs.books.swap_remove(&src) else { continue };
let mut package = package.into_inner();
// Can not be done outside the loop/function because of the borrow checker.
// Just serves to pass only the import map of the first call to `apply_imports_go`.
let main_imports = main_imports.unwrap_or(&self.import_ctx.map);
package.apply_imports(Some(main_imports), diag, pkgs)?;
// Rename ADTs and defs, applying binds from old names to new names
package.apply_adts(&src, main_imports);
package.apply_defs(&src, main_imports);
let Book { defs, hvm_defs, adts, .. } = package.to_fun()?;
// Add the ADTs to the importing book,
// saving the constructors names to be used when applying ADTs binds.
for (name, adt) in adts {
let adts = pkgs.loaded_adts.entry(src.clone()).or_default();
adts.insert(name.clone(), adt.ctrs.keys().cloned().collect_vec());
self.add_imported_adt(name, adt, diag);
}
// The names on the indexmap are the original ones, so we ignore them
for def in defs.into_values() {
self.add_imported_def(def, diag);
}
// The names on the indexmap are the original ones, so we ignore them
for def in hvm_defs.into_values() {
self.add_imported_hvm_def(def, diag);
}
}
Ok(())
}
/// Applies a chain of `use bind = src` to every local definition.
///
/// Must be used after `load_packages`
fn apply_import_binds(&mut self, main_imports: Option<&ImportsMap>, pkgs: &Packages) {
// Can not be done outside the function because of the borrow checker.
// Just serves to pass only the import map of the first call to `apply_imports_go`.
let main_imports = main_imports.unwrap_or(&self.import_ctx.map);
let mut local_imports = BindMap::new();
let mut adt_imports = BindMap::new();
// Collect local imports binds, starting with `__` if not imported by the main book.
'outer: for (bind, src) in self.import_ctx.map.binds.iter().rev() {
if self.contains_def(bind) | self.ctrs.contains_key(bind) | self.adts.contains_key(bind) {
// TODO: Here we should show warnings for shadowing of imported names by local def/ctr/adt
// It can be done, but when importing with `ImportType::Single` files in the same folder,
// it gives a false positive warning
continue;
}
let nam = if main_imports.contains_source(src) { src.clone() } else { Name::new(format!("__{}", src)) };
// Checks if the bind is an loaded ADT name,
// If so, add the constructors binds as `bind/ctr` instead.
for pkg in self.import_ctx.sources() {
if let Some(book) = pkgs.loaded_adts.get(pkg) {
if let Some(ctrs) = book.get(&nam) {
for ctr in ctrs.iter().rev() {
let full_ctr_name = ctr.split("__").nth(1).unwrap_or(ctr.as_ref());
let ctr_name = full_ctr_name.strip_prefix(src.as_ref()).unwrap();
let bind = Name::new(format!("{}{}", bind, ctr_name));
local_imports.insert(bind, ctr.clone());
}
// Add a mapping of the ADT name
adt_imports.insert(bind.clone(), nam.clone());
continue 'outer;
}
}
}
// Not a constructor, so just insert the bind.
local_imports.insert(bind.clone(), nam);
}
for (_, def) in self.local_defs_mut() {
def.apply_binds(true, &local_imports);
def.apply_type_binds(&adt_imports);
}
}
/// Applying the necessary naming transformations to the book ADTs,
/// adding `use ctr = ctr_src` chains to every local definition and
/// substituting the name of type ctrs for the canonical ones.
fn apply_adts(&mut self, src: &Name, main_imports: &ImportsMap) {
let adts = std::mem::take(&mut self.adts);
let mut new_adts = IndexMap::new();
let mut adts_map = vec![];
let mut ctrs_map = IndexMap::new();
let mut new_ctrs = IndexMap::new();
// Rename the ADTs and constructors to their canonical name,
// starting with `__` if not imported by the main book.
for (mut name, mut adt) in adts {
if adt.source.is_local() {
adt.source.kind = SourceKind::Imported;
let old_name = name.clone();
name = Name::new(format!("{}/{}", src, name));
let mangle_name = !main_imports.contains_source(&name);
let mut mangle_adt_name = mangle_name;
for (old_nam, ctr) in std::mem::take(&mut adt.ctrs) {
let mut ctr_name = Name::new(format!("{}/{}", src, old_nam));
let mangle_ctr = mangle_name && !main_imports.contains_source(&ctr_name);
if mangle_ctr {
mangle_adt_name = true;
ctr_name = Name::new(format!("__{}", ctr_name));
}
let ctr = AdtCtr { name: ctr_name.clone(), ..ctr };
new_ctrs.insert(ctr_name.clone(), name.clone());
ctrs_map.insert(old_nam, ctr_name.clone());
adt.ctrs.insert(ctr_name, ctr);
}
if mangle_adt_name {
name = Name::new(format!("__{}", name));
}
adt.name = name.clone();
adts_map.push((old_name, name.clone()));
}
new_adts.insert(name.clone(), adt);
}
// Apply the binds for the type constructors in the constructor types
for (_, adt) in &mut new_adts {
for (_, ctr) in &mut adt.ctrs {
for (from, to) in &adts_map {
ctr.typ.subst_ctr(from, to);
}
}
}
let adts_map = adts_map.into_iter().collect::<IndexMap<_, _>>();
for (_, def) in self.local_defs_mut() {
// Applies the binds for the new constructor names for every definition.
def.apply_binds(true, &ctrs_map);
// Apply the binds for the type constructors in the def types and in the `def` terms.
def.apply_type_binds(&adts_map);
}
self.adts = new_adts;
self.ctrs = new_ctrs;
}
/// Apply the necessary naming transformations to the book definitions,
/// adding `use def = def_src` chains to every local definition.
fn apply_defs(&mut self, src: &Name, main_imports: &ImportsMap) {
let mut canonical_map: IndexMap<_, _> = IndexMap::new();
// Rename the definitions to their canonical name
// Starting with `__` if not imported by the main book.
for (_, def) in self.local_defs_mut() {
def.canonicalize_name(src, main_imports, &mut canonical_map);
}
// Applies the binds for the new names for every definition
for (_, def) in self.local_defs_mut() {
def.apply_binds(false, &canonical_map);
def.source_mut().kind = SourceKind::Imported;
}
}
}
/// Helper functions
impl ParseBook {
pub fn top_level_names(&self) -> impl Iterator<Item = &Name> {
let imp_defs = self.imp_defs.keys();
let fun_defs = self.fun_defs.keys();
let hvm_defs = self.hvm_defs.keys();
let adts = self.adts.keys();
let ctrs = self.ctrs.keys();
imp_defs.chain(fun_defs).chain(hvm_defs).chain(adts).chain(ctrs)
}
fn add_imported_adt(&mut self, nam: Name, adt: Adt, diag: &mut Diagnostics) {
if self.adts.get(&nam).is_some() {
let err = format!("The imported datatype '{nam}' conflicts with the datatype '{nam}'.");
diag.add_book_error(err);
} else {
for ctr in adt.ctrs.keys() {
if self.contains_def(ctr) {
let err = format!("The imported constructor '{ctr}' conflicts with the definition '{ctr}'.");
diag.add_book_error(err);
}
match self.ctrs.entry(ctr.clone()) {
Entry::Vacant(e) => _ = e.insert(nam.clone()),
Entry::Occupied(e) => {
let ctr = e.key();
let err = format!("The imported constructor '{ctr}' conflicts with the constructor '{ctr}'.");
diag.add_book_error(err);
}
}
}
self.adts.insert(nam, adt);
}
}
fn add_imported_def(&mut self, def: Definition, diag: &mut Diagnostics) {
if !self.has_def_conflict(&def.name, diag) {
self.fun_defs.insert(def.name.clone(), def);
}
}
fn add_imported_hvm_def(&mut self, def: HvmDefinition, diag: &mut Diagnostics) {
if !self.has_def_conflict(&def.name, diag) {
self.hvm_defs.insert(def.name.clone(), def);
}
}
fn has_def_conflict(&mut self, name: &Name, diag: &mut Diagnostics) -> bool {
if self.contains_def(name) {
let err = format!("The imported definition '{name}' conflicts with the definition '{name}'.");
diag.add_book_error(err);
true
} else if self.ctrs.contains_key(name) {
let err = format!("The imported definition '{name}' conflicts with the constructor '{name}'.");
diag.add_book_error(err);
true
} else {
false
}
}
fn local_defs_mut(&mut self) -> impl Iterator<Item = (&Name, &mut dyn Def)> {
let fun = self.fun_defs.iter_mut().map(|(nam, def)| (nam, def as &mut dyn Def));
let imp = self.imp_defs.iter_mut().map(|(nam, def)| (nam, def as &mut dyn Def));
let hvm = self.hvm_defs.iter_mut().map(|(nam, def)| (nam, def as &mut dyn Def));
fun.chain(imp).chain(hvm).filter(|(_, def)| def.source().is_local())
}
}
/// Common functions for the different definition types
trait Def {
fn canonicalize_name(&mut self, src: &Name, main_imports: &ImportsMap, binds: &mut BindMap) {
let def_name = self.name_mut();
let mut new_name = Name::new(format!("{}/{}", src, def_name));
if !main_imports.contains_source(&new_name) {
new_name = Name::new(format!("__{}", new_name));
}
binds.insert(def_name.clone(), new_name.clone());
*def_name = new_name;
}
/// Applies the binds for definition names by placing `use` terms.
///
/// If we know that the bind map doesn't contain any constructor names,
/// we skip renaming rule patterns.
fn apply_binds(&mut self, maybe_constructor: bool, binds: &BindMap);
fn apply_type_binds(&mut self, binds: &BindMap);
fn source(&self) -> &Source;
fn source_mut(&mut self) -> &mut Source;
fn name_mut(&mut self) -> &mut Name;
}
impl Def for Definition {
fn apply_binds(&mut self, maybe_constructor: bool, binds: &BindMap) {
fn rename_ctr_pattern(pat: &mut Pattern, binds: &BindMap) {
for pat in pat.children_mut() {
rename_ctr_pattern(pat, binds);
}
match pat {
Pattern::Ctr(nam, _) => {
if let Some(alias) = binds.get(nam) {
*nam = alias.clone();
}
}
Pattern::Var(Some(nam)) => {
if let Some(alias) = binds.get(nam) {
*nam = alias.clone();
}
}
_ => {}
}
}
for rule in &mut self.rules {
if maybe_constructor {
for pat in &mut rule.pats {
rename_ctr_pattern(pat, binds);
}
}
let bod = std::mem::take(&mut rule.body);
rule.body = bod.fold_uses(binds.iter().rev());
}
}
fn apply_type_binds(&mut self, binds: &BindMap) {
for (from, to) in binds.iter().rev() {
self.typ.subst_ctr(from, to);
for rule in &mut self.rules {
rule.body.subst_type_ctrs(from, to);
}
}
}
fn source(&self) -> &Source {
&self.source
}
fn source_mut(&mut self) -> &mut Source {
&mut self.source
}
fn name_mut(&mut self) -> &mut Name {
&mut self.name
}
}
impl Def for imp::Definition {
fn apply_binds(&mut self, _maybe_constructor: bool, binds: &BindMap) {
let bod = std::mem::take(&mut self.body);
self.body = bod.fold_uses(binds.iter().rev());
}
fn apply_type_binds(&mut self, binds: &BindMap) {
fn subst_type_ctrs_stmt(stmt: &mut Stmt, from: &Name, to: &Name) {
maybe_grow(|| match stmt {
Stmt::Assign { nxt, .. } => {
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::InPlace { nxt, .. } => {
subst_type_ctrs_stmt(nxt, from, to);
}
Stmt::If { then, otherwise, nxt, .. } => {
subst_type_ctrs_stmt(then, from, to);
subst_type_ctrs_stmt(otherwise, from, to);
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::Match { arms, nxt, .. } => {
for MatchArm { lft: _, rgt } in arms {
subst_type_ctrs_stmt(rgt, from, to);
}
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::Switch { arms, nxt, .. } => {
for arm in arms {
subst_type_ctrs_stmt(arm, from, to);
}
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::Bend { step, base, nxt, .. } => {
subst_type_ctrs_stmt(step, from, to);
subst_type_ctrs_stmt(base, from, to);
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::Fold { arms, nxt, .. } => {
for MatchArm { lft: _, rgt } in arms {
subst_type_ctrs_stmt(rgt, from, to);
}
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::With { typ, bod, nxt } => {
if typ == from {
*typ = to.clone();
}
subst_type_ctrs_stmt(bod, from, to);
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::Ask { nxt, .. } => {
if let Some(nxt) = nxt {
subst_type_ctrs_stmt(nxt, from, to);
}
}
Stmt::Return { .. } => {}
Stmt::Open { typ, nxt, .. } => {
if typ == from {
*typ = to.clone();
}
subst_type_ctrs_stmt(nxt, from, to);
}
Stmt::Use { nxt, .. } => {
subst_type_ctrs_stmt(nxt, from, to);
}
Stmt::LocalDef { def, nxt } => {
def.apply_type_binds(&[(from.clone(), to.clone())].into_iter().collect());
subst_type_ctrs_stmt(nxt, from, to);
}
Stmt::Err => {}
})
}
for (from, to) in binds.iter().rev() {
self.typ.subst_ctr(from, to);
subst_type_ctrs_stmt(&mut self.body, from, to);
}
}
fn source(&self) -> &Source {
&self.source
}
fn source_mut(&mut self) -> &mut Source {
&mut self.source
}
fn name_mut(&mut self) -> &mut Name {
&mut self.name
}
}
impl Def for HvmDefinition {
/// Do nothing, can not apply binds to a HvmDefinition.
fn apply_binds(&mut self, _maybe_constructor: bool, _binds: &BindMap) {}
fn apply_type_binds(&mut self, binds: &BindMap) {
for (from, to) in binds.iter().rev() {
self.typ.subst_ctr(from, to);
}
}
fn source(&self) -> &Source {
&self.source
}
fn source_mut(&mut self) -> &mut Source {
&mut self.source
}
fn name_mut(&mut self) -> &mut Name {
&mut self.name
}
fn canonicalize_name(&mut self, src: &Name, main_imports: &ImportsMap, binds: &mut BindMap) {
let def_name = self.name_mut();
let mut new_name = Name::new(std::format!("{}/{}", src, def_name));
if !main_imports.contains_source(&new_name) {
new_name = Name::new(std::format!("__{}", new_name));
}
binds.insert(def_name.clone(), new_name.clone());
*def_name = new_name;
}
}
impl Term {
fn fold_uses<'a>(self, map: impl Iterator<Item = (&'a Name, &'a Name)>) -> Self {
map.fold(self, |acc, (bind, nam)| Self::Use {
nam: Some(bind.clone()),
val: Box::new(Self::Var { nam: nam.clone() }),
nxt: Box::new(acc),
})
}
}
impl Stmt {
fn fold_uses<'a>(self, map: impl Iterator<Item = (&'a Name, &'a Name)>) -> Self {
map.fold(self, |acc, (bind, nam)| Self::Use {
nam: bind.clone(),
val: Box::new(Expr::Var { nam: nam.clone() }),
nxt: Box::new(acc),
})
}
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
HigherOrderCO/Bend | https://github.com/HigherOrderCO/Bend/blob/d184863f03e796d1d657958a51dd6dd331ade92d/tests/golden_tests.rs | tests/golden_tests.rs | //! This module runs snapshot tests for compiling and running Bend programs.
//!
//! The result of each test is saved as a snapshot and used as golden output
//! for future tests. This allows us to test regressions in compilation and
//! have a history of how certain programs compiled and ran.
//!
//! These tests use `cargo-insta`. To run the tests, run `cargo insta test`.
//! If there are any changes to the snapshots, they'll be highlighted by the
//! CLI tool. Then, run `cargo insta review` to review these changes.
use bend::{
check_book, compile_book, desugar_book,
diagnostics::{Diagnostics, DiagnosticsConfig, Severity},
fun::{
load_book::do_parse_book, net_to_term::net_to_term, parser::ParseBook, term_to_net::Labels, Book, Ctx,
Name,
},
hvm::hvm_book_show_pretty,
imports::DefaultLoader,
load_to_book,
net::hvm_to_net::hvm_to_net,
run_book, AdtEncoding, CompileOpts, RunOpts,
};
use insta::assert_snapshot;
use itertools::Itertools;
use std::{
collections::HashMap,
fmt::Write,
io::Read,
path::{Path, PathBuf},
};
use stdext::function_name;
use walkdir::WalkDir;
// Since running a program requires messing with stdout and stderr,
// if we run multiple at the same time, their outputs can get mixed.
// So we put a mutex to execute only one "run" test at a time.
static RUN_MUTEX: std::sync::Mutex<()> = std::sync::Mutex::new(());
const TESTS_PATH: &str = "/tests/golden_tests/";
type RunFn = dyn Fn(&str, &Path) -> Result<String, Diagnostics>;
pub fn parse_book_single_file(code: &str, origin: &Path) -> Result<Book, Diagnostics> {
do_parse_book(code, origin, ParseBook::builtins())?.to_fun()
}
fn run_single_golden_test(path: &Path, run: &[&RunFn]) -> Result<(), String> {
println!("{}", path.display());
let code = std::fs::read_to_string(path).map_err(|e| e.to_string())?;
let file_name = path.to_str().and_then(|path| path.rsplit_once(TESTS_PATH)).unwrap().1;
// unfortunately we need to do this
let file_path = format!("{}{}", &TESTS_PATH[1..], file_name);
let file_path = Path::new(&file_path);
let mut results: HashMap<&Path, Vec<String>> = HashMap::new();
for fun in run {
let result = fun(&code, file_path).unwrap_or_else(|err| err.to_string());
results.entry(file_path).or_default().push(result);
}
let results = results.into_values().map(|v| v.join("\n")).collect_vec();
let mut settings = insta::Settings::clone_current();
settings.set_prepend_module_to_snapshot(false);
settings.set_omit_expression(true);
settings.set_input_file(path);
settings.bind(|| {
for result in results {
assert_snapshot!(file_name, result);
}
});
Ok(())
}
fn run_golden_test_dir(test_name: &str, run: &RunFn) {
run_golden_test_dir_multiple(test_name, &[run])
}
fn run_golden_test_dir_multiple(test_name: &str, run: &[&RunFn]) {
let root = PathBuf::from(format!(
"{}{TESTS_PATH}{}",
env!("CARGO_MANIFEST_DIR"),
test_name.rsplit_once(':').unwrap().1
));
let walker = WalkDir::new(&root).sort_by_file_name().max_depth(1).into_iter().filter_entry(|e| {
let path = e.path();
path == root || path.is_dir() || (path.is_file() && path.extension().is_some_and(|x| x == "bend"))
});
for entry in walker {
let entry = entry.unwrap();
let path = entry.path();
if path.is_file() {
eprintln!("Testing {}", path.display());
run_single_golden_test(path, run).unwrap();
}
}
}
/* Snapshot/regression/golden tests
Each tests runs all the files in tests/golden_tests/<test name>.
The test functions decide how exactly to process the test programs
and what to save as a snapshot.
*/
/// Compiles a file with regular compilation options.
#[test]
fn compile_file() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
let compile_opts = CompileOpts::default();
let diagnostics_cfg = DiagnosticsConfig { unused_definition: Severity::Allow, ..Default::default() };
let res = compile_book(&mut book, compile_opts, diagnostics_cfg, None)?;
Ok(format!("{}{}", res.diagnostics, hvm_book_show_pretty(&res.hvm_book)))
})
}
/// Compiles a file with `-Oall` option.
#[test]
fn compile_file_o_all() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
let opts = CompileOpts::default().set_all();
let diagnostics_cfg = DiagnosticsConfig {
recursion_cycle: Severity::Warning,
unused_definition: Severity::Allow,
..Default::default()
};
let res = compile_book(&mut book, opts, diagnostics_cfg, None)?;
Ok(format!("{}{}", res.diagnostics, hvm_book_show_pretty(&res.hvm_book)))
})
}
/// Compiles a file with `-Ono-all` option.
#[test]
fn compile_file_o_no_all() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
let compile_opts = CompileOpts::default().set_no_all();
let diagnostics_cfg = DiagnosticsConfig::default();
let res = compile_book(&mut book, compile_opts, diagnostics_cfg, None)?;
Ok(hvm_book_show_pretty(&res.hvm_book).to_string())
})
}
/// Runs a file, but with linear readback enabled.
#[test]
fn linear_readback() {
run_golden_test_dir(function_name!(), &|code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let book = parse_book_single_file(code, path)?;
let compile_opts = CompileOpts::default().set_all();
let diagnostics_cfg = DiagnosticsConfig::default();
let (term, _, diags) = run_book(
book,
RunOpts { linear_readback: true, ..Default::default() },
compile_opts,
diagnostics_cfg,
None,
"run",
)?
.unwrap();
let res = format!("{diags}{term}");
Ok(res)
});
}
/// Runs a file with regular compilation options, but rejecting all warnings.
/// Runs once for each ADT encoding.
#[test]
fn run_file() {
run_golden_test_dir_multiple(
function_name!(),
&[(&|code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let book = parse_book_single_file(code, path)?;
let diagnostics_cfg = DiagnosticsConfig {
unused_definition: Severity::Allow,
..DiagnosticsConfig::new(Severity::Error, true)
};
let run_opts = RunOpts::default();
let mut res = String::new();
for adt_encoding in [AdtEncoding::NumScott, AdtEncoding::Scott] {
let compile_opts = CompileOpts { adt_encoding, ..CompileOpts::default() };
let (term, _, diags) =
run_book(book.clone(), run_opts.clone(), compile_opts, diagnostics_cfg, None, "run")?.unwrap();
res.push_str(&format!("{adt_encoding}:\n{diags}{term}\n\n"));
}
Ok(res)
})],
)
}
/// Runs bend programs, all sharing a common lib to test the import system.
#[test]
fn import_system() {
run_golden_test_dir_multiple(
function_name!(),
&[(&|code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let diagnostics_cfg = DiagnosticsConfig {
unused_definition: Severity::Allow,
..DiagnosticsConfig::new(Severity::Error, true)
};
let book = load_to_book(path, code, DefaultLoader::new(path), diagnostics_cfg)?;
let run_opts = RunOpts::default();
let mut res = String::new();
let compile_opts = CompileOpts::default();
let (term, _, diags) = run_book(book, run_opts, compile_opts, diagnostics_cfg, None, "run")?.unwrap();
res.push_str(&format!("{diags}{term}\n\n"));
Ok(res)
})],
)
}
/// Reads back an HVM net.
#[test]
fn readback_hvm() {
run_golden_test_dir(function_name!(), &|code, _| {
let mut p = hvm::ast::CoreParser::new(code);
let net = p.parse_net()?;
let book = Book::default();
let compat_net = hvm_to_net(&net);
let mut diags = Diagnostics::default();
let term = net_to_term(&compat_net, &book, &Labels::default(), false, &mut diags);
Ok(format!("{}{}", diags, term))
})
}
/// Runs compilation up to fixing, simplifying and linearizing matches.
#[test]
fn simplify_matches() {
run_golden_test_dir(function_name!(), &|code, path| {
let diagnostics_cfg = DiagnosticsConfig {
unused_definition: Severity::Allow,
irrefutable_match: Severity::Warning,
unreachable_match: Severity::Warning,
..DiagnosticsConfig::new(Severity::Error, true)
};
let mut book = parse_book_single_file(code, path)?;
let mut ctx = Ctx::new(&mut book, diagnostics_cfg);
ctx.check_shared_names();
ctx.book.encode_adts(AdtEncoding::NumScott);
ctx.fix_match_defs()?;
ctx.desugar_open()?;
ctx.book.encode_builtins();
ctx.resolve_refs()?;
ctx.resolve_type_ctrs()?;
ctx.desugar_match_defs()?;
ctx.fix_match_terms()?;
ctx.book.lift_local_defs();
ctx.desugar_bend()?;
ctx.desugar_fold()?;
ctx.desugar_with_blocks()?;
ctx.check_unbound_vars()?;
ctx.book.make_var_names_unique();
ctx.book.desugar_use();
ctx.book.linearize_match_binds();
ctx.book.linearize_match_with();
ctx.check_unbound_vars()?;
ctx.book.make_var_names_unique();
ctx.book.desugar_use();
ctx.book.make_var_names_unique();
ctx.prune(false);
Ok(format!("{}\n{}", ctx.book, ctx.info))
})
}
/// Runs compilation up to encoding `match` terms as lambdas.
#[test]
fn encode_pattern_match() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut result = String::new();
for adt_encoding in [AdtEncoding::Scott, AdtEncoding::NumScott] {
let diagnostics_cfg = DiagnosticsConfig::default();
let mut book = parse_book_single_file(code, path)?;
let mut ctx = Ctx::new(&mut book, diagnostics_cfg);
ctx.check_shared_names();
ctx.book.encode_adts(adt_encoding);
ctx.fix_match_defs()?;
ctx.desugar_open()?;
ctx.book.encode_builtins();
ctx.resolve_refs()?;
ctx.desugar_match_defs()?;
ctx.fix_match_terms()?;
ctx.book.lift_local_defs();
ctx.desugar_bend()?;
ctx.desugar_fold()?;
ctx.desugar_with_blocks()?;
ctx.check_unbound_vars()?;
ctx.book.make_var_names_unique();
ctx.book.desugar_use();
ctx.book.linearize_match_binds();
ctx.book.linearize_match_with();
ctx.book.encode_matches(adt_encoding);
ctx.check_unbound_vars()?;
ctx.book.make_var_names_unique();
ctx.book.desugar_use();
ctx.book.make_var_names_unique();
ctx.book.linearize_vars();
ctx.prune(false);
writeln!(result, "{adt_encoding}\n{}\n", ctx.book).unwrap();
}
Ok(result)
})
}
/// Parses a file, but does not desugar or compile it.
#[test]
fn parse_file() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
let mut ctx = Ctx::new(&mut book, Default::default());
ctx.set_entrypoint();
ctx.book.encode_adts(AdtEncoding::NumScott);
ctx.book.encode_builtins();
ctx.resolve_refs().expect("Resolve refs");
ctx.prune(false);
Ok(book.to_string())
})
}
/// Runs the check command on a file.
#[test]
fn check_file() {
run_golden_test_dir(function_name!(), &|code, path| {
let compile_opts = CompileOpts::default();
let diagnostics_cfg = DiagnosticsConfig {
unused_definition: Severity::Allow,
..DiagnosticsConfig::new(Severity::Error, true)
};
let mut book = parse_book_single_file(code, path)?;
check_book(&mut book, diagnostics_cfg, compile_opts)?;
Ok(book.to_string())
})
}
/// Runs compilation up to the last term-level pass (`bend desugar` command).
#[test]
fn desugar_file() {
run_golden_test_dir(function_name!(), &|code, path| {
let compile_opts = CompileOpts::default();
let diagnostics_cfg = DiagnosticsConfig {
unused_definition: Severity::Allow,
..DiagnosticsConfig::new(Severity::Error, true)
};
let mut book = parse_book_single_file(code, path)?;
desugar_book(&mut book, compile_opts, diagnostics_cfg, None)?;
Ok(book.to_string())
})
}
/// Runs a file that is expected to hang.
#[test]
#[ignore = "bug - the subprocess created by run_book leaks"]
fn hangs() {
let expected_normalization_time = 5;
run_golden_test_dir(function_name!(), &move |code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let book = parse_book_single_file(code, path)?;
let compile_opts = CompileOpts::default().set_all();
let diagnostics_cfg = DiagnosticsConfig::new(Severity::Allow, false);
let thread = std::thread::spawn(move || {
run_book(book, RunOpts::default(), compile_opts, diagnostics_cfg, None, "run")
});
std::thread::sleep(std::time::Duration::from_secs(expected_normalization_time));
if !thread.is_finished() {
Ok("Hangs".into())
} else if let Err(diags) = thread.join().unwrap() {
Err(format!("Doesn't hang. (Compilation failed)\n{diags}").into())
} else {
Err("Doesn't hang. (Ran to the end)".to_string().into())
}
})
}
/// Compiles a file with a custom entrypoint.
#[test]
fn compile_entrypoint() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
book.entrypoint = Some(Name::new("foo"));
let diagnostics_cfg = DiagnosticsConfig { ..DiagnosticsConfig::new(Severity::Error, true) };
let res = compile_book(&mut book, CompileOpts::default(), diagnostics_cfg, None)?;
Ok(format!("{}{}", res.diagnostics, hvm_book_show_pretty(&res.hvm_book)))
})
}
/// Runs a file with a custom entrypoint.
#[test]
#[ignore = "while execution with different entrypoints is not implemented for hvm32"]
fn run_entrypoint() {
run_golden_test_dir(function_name!(), &|code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let mut book = parse_book_single_file(code, path)?;
book.entrypoint = Some(Name::new("foo"));
let compile_opts = CompileOpts::default().set_all();
let diagnostics_cfg = DiagnosticsConfig { ..DiagnosticsConfig::new(Severity::Error, true) };
let (term, _, diags) =
run_book(book, RunOpts::default(), compile_opts, diagnostics_cfg, None, "run")?.unwrap();
let res = format!("{diags}{term}");
Ok(res)
})
}
/// Runs a Bend CLI command.
#[test]
fn cli() {
run_golden_test_dir(function_name!(), &|_code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let mut args_path = PathBuf::from(path);
assert!(args_path.set_extension("args"));
let mut args_buf = String::with_capacity(16);
let mut args_file = std::fs::File::open(args_path).expect("File exists");
args_file.read_to_string(&mut args_buf).expect("Read args");
let args = args_buf.lines();
let output =
std::process::Command::new(env!("CARGO_BIN_EXE_bend")).args(args).output().expect("Run command");
let res =
format!("{}{}", String::from_utf8_lossy(&output.stderr), String::from_utf8_lossy(&output.stdout));
Ok(res)
})
}
/// Compiles a file to check for mutual recursion.
#[test]
fn mutual_recursion() {
run_golden_test_dir(function_name!(), &|code, path| {
let diagnostics_cfg =
DiagnosticsConfig { recursion_cycle: Severity::Error, ..DiagnosticsConfig::new(Severity::Allow, true) };
let mut book = parse_book_single_file(code, path)?;
let opts = CompileOpts { merge: true, ..CompileOpts::default() };
let res = compile_book(&mut book, opts, diagnostics_cfg, None)?;
Ok(format!("{}{}", res.diagnostics, hvm_book_show_pretty(&res.hvm_book)))
})
}
/// Runs a file that uses IO.
#[test]
fn io() {
run_golden_test_dir(function_name!(), &|code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let book = parse_book_single_file(code, path)?;
let compile_opts = CompileOpts::default();
let diagnostics_cfg = DiagnosticsConfig::default();
let (term, _, diags) =
run_book(book, RunOpts::default(), compile_opts, diagnostics_cfg, None, "run-c")?.unwrap();
let res = format!("{diags}{term}");
Ok(format!("Strict mode:\n{res}"))
})
}
/// Runs a file that uses the prelude.
#[test]
fn prelude() {
run_golden_test_dir(function_name!(), &|code, path| {
let _guard = RUN_MUTEX.lock().unwrap();
let book = parse_book_single_file(code, path)?;
let compile_opts = CompileOpts::default();
let diagnostics_cfg = DiagnosticsConfig::new(Severity::Error, true);
let (term, _, diags) =
run_book(book, RunOpts::default(), compile_opts, diagnostics_cfg, None, "run-c")?.unwrap();
let res = format!("{diags}{term}");
Ok(format!("Strict mode:\n{res}"))
})
}
/// Runs all examples in the examples folder.
#[test]
fn examples() -> Result<(), Diagnostics> {
let examples_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("examples");
for entry in WalkDir::new(examples_path)
.min_depth(1)
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| e.path().extension().map_or(false, |ext| ext == "bend"))
{
let _guard = RUN_MUTEX.lock().unwrap_or_else(|e| e.into_inner());
let path = entry.path();
eprintln!("Testing {}", path.display());
let code = std::fs::read_to_string(path).map_err(|e| e.to_string())?;
let book = parse_book_single_file(&code, path).unwrap();
let compile_opts = CompileOpts::default();
let diagnostics_cfg = DiagnosticsConfig::default();
let (term, _, diags) =
run_book(book, RunOpts::default(), compile_opts, diagnostics_cfg, None, "run-c")?.unwrap();
let res = format!("{diags}{term}");
let mut settings = insta::Settings::clone_current();
settings.set_prepend_module_to_snapshot(false);
settings.set_omit_expression(true);
settings.set_input_file(path);
settings.bind(|| {
assert_snapshot!(format!("examples__{}", path.file_name().unwrap().to_str().unwrap()), res);
});
}
Ok(())
}
/// Test that the Scott encoding correctly triggers unused definition warnings.
#[test]
fn scott_triggers_unused() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
let opts = CompileOpts::default();
let diagnostics_cfg =
DiagnosticsConfig { unused_definition: Severity::Error, ..DiagnosticsConfig::default() };
let res = compile_book(&mut book, opts, diagnostics_cfg, None)?;
Ok(format!("{}{}", res.diagnostics, hvm_book_show_pretty(&res.hvm_book)))
})
}
// TODO: also run the long string file to test the readback
/// Compiles a file that is very large and takes a long time to compile.
/// Only outputs if compilation worked without errors.
#[test]
fn compile_long() {
run_golden_test_dir(function_name!(), &|code, path| {
let mut book = parse_book_single_file(code, path)?;
let opts = CompileOpts::default().set_all();
let diagnostics_cfg = DiagnosticsConfig {
recursion_cycle: Severity::Warning,
unused_definition: Severity::Allow,
..Default::default()
};
compile_book(&mut book, opts.clone(), diagnostics_cfg, None)?;
Ok("Compiled".to_string())
})
}
| rust | Apache-2.0 | d184863f03e796d1d657958a51dd6dd331ade92d | 2026-01-04T15:41:39.511038Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/config.rs | compute_tools/src/config.rs | use anyhow::Result;
use std::fmt::Write as FmtWrite;
use std::fs::{File, OpenOptions};
use std::io;
use std::io::Write;
use std::io::prelude::*;
use std::path::Path;
use compute_api::responses::TlsConfig;
use compute_api::spec::{
ComputeAudit, ComputeMode, ComputeSpec, DatabricksSettings, GenericOption,
};
use crate::compute::ComputeNodeParams;
use crate::pg_helpers::{
DatabricksSettingsExt as _, GenericOptionExt, GenericOptionsSearch, PgOptionsSerialize,
escape_conf_value,
};
use crate::tls::{self, SERVER_CRT, SERVER_KEY};
use utils::shard::{ShardIndex, ShardNumber};
/// Check that `line` is inside a text file and put it there if it is not.
/// Create file if it doesn't exist.
pub fn line_in_file(path: &Path, line: &str) -> Result<bool> {
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.append(false)
.truncate(false)
.open(path)?;
let buf = io::BufReader::new(&file);
let mut count: usize = 0;
for l in buf.lines() {
if l? == line {
return Ok(false);
}
count = 1;
}
write!(file, "{}{}", "\n".repeat(count), line)?;
Ok(true)
}
/// Create or completely rewrite configuration file specified by `path`
#[allow(clippy::too_many_arguments)]
pub fn write_postgres_conf(
pgdata_path: &Path,
params: &ComputeNodeParams,
spec: &ComputeSpec,
postgres_port: Option<u16>,
extension_server_port: u16,
tls_config: &Option<TlsConfig>,
databricks_settings: Option<&DatabricksSettings>,
lakebase_mode: bool,
) -> Result<()> {
let path = pgdata_path.join("postgresql.conf");
// File::create() destroys the file content if it exists.
let mut file = File::create(path)?;
// Write the postgresql.conf content from the spec file as is.
if let Some(conf) = &spec.cluster.postgresql_conf {
writeln!(file, "{conf}")?;
}
// Add options for connecting to storage
writeln!(file, "# Neon storage settings")?;
writeln!(file)?;
if let Some(conninfo) = &spec.pageserver_connection_info {
// Stripe size GUC should be defined prior to connection string
if let Some(stripe_size) = conninfo.stripe_size {
writeln!(
file,
"# from compute spec's pageserver_connection_info.stripe_size field"
)?;
writeln!(file, "neon.stripe_size={stripe_size}")?;
}
let mut libpq_urls: Option<Vec<String>> = Some(Vec::new());
let num_shards = if conninfo.shard_count.0 == 0 {
1 // unsharded, treat it as a single shard
} else {
conninfo.shard_count.0
};
for shard_number in 0..num_shards {
let shard_index = ShardIndex {
shard_number: ShardNumber(shard_number),
shard_count: conninfo.shard_count,
};
let info = conninfo.shards.get(&shard_index).ok_or_else(|| {
anyhow::anyhow!(
"shard {shard_index} missing from pageserver_connection_info shard map"
)
})?;
let first_pageserver = info
.pageservers
.first()
.expect("must have at least one pageserver");
// Add the libpq URL to the array, or if the URL is missing, reset the array
// forgetting any previous entries. All servers must have a libpq URL, or none
// at all.
if let Some(url) = &first_pageserver.libpq_url {
if let Some(ref mut urls) = libpq_urls {
urls.push(url.clone());
}
} else {
libpq_urls = None
}
}
if let Some(libpq_urls) = libpq_urls {
writeln!(
file,
"# derived from compute spec's pageserver_connection_info field"
)?;
writeln!(
file,
"neon.pageserver_connstring={}",
escape_conf_value(&libpq_urls.join(","))
)?;
} else {
writeln!(file, "# no neon.pageserver_connstring")?;
}
} else {
// Stripe size GUC should be defined prior to connection string
if let Some(stripe_size) = spec.shard_stripe_size {
writeln!(file, "# from compute spec's shard_stripe_size field")?;
writeln!(file, "neon.stripe_size={stripe_size}")?;
}
if let Some(s) = &spec.pageserver_connstring {
writeln!(file, "# from compute spec's pageserver_connstring field")?;
writeln!(file, "neon.pageserver_connstring={}", escape_conf_value(s))?;
}
}
if !spec.safekeeper_connstrings.is_empty() {
let mut neon_safekeepers_value = String::new();
tracing::info!(
"safekeepers_connstrings is not zero, gen: {:?}",
spec.safekeepers_generation
);
// If generation is given, prepend sk list with g#number:
if let Some(generation) = spec.safekeepers_generation {
write!(neon_safekeepers_value, "g#{generation}:")?;
}
neon_safekeepers_value.push_str(&spec.safekeeper_connstrings.join(","));
writeln!(
file,
"neon.safekeepers={}",
escape_conf_value(&neon_safekeepers_value)
)?;
}
if let Some(s) = &spec.tenant_id {
writeln!(file, "neon.tenant_id={}", escape_conf_value(&s.to_string()))?;
}
if let Some(s) = &spec.timeline_id {
writeln!(
file,
"neon.timeline_id={}",
escape_conf_value(&s.to_string())
)?;
}
if let Some(s) = &spec.project_id {
writeln!(file, "neon.project_id={}", escape_conf_value(s))?;
}
if let Some(s) = &spec.branch_id {
writeln!(file, "neon.branch_id={}", escape_conf_value(s))?;
}
if let Some(s) = &spec.endpoint_id {
writeln!(file, "neon.endpoint_id={}", escape_conf_value(s))?;
}
// tls
if let Some(tls_config) = tls_config {
writeln!(file, "ssl = on")?;
// postgres requires the keyfile to be in a secure file,
// currently too complicated to ensure that at the VM level,
// so we just copy them to another file instead. :shrug:
tls::update_key_path_blocking(pgdata_path, tls_config);
// these are the default, but good to be explicit.
writeln!(file, "ssl_cert_file = '{SERVER_CRT}'")?;
writeln!(file, "ssl_key_file = '{SERVER_KEY}'")?;
}
// Locales
if cfg!(target_os = "macos") {
writeln!(file, "lc_messages='C'")?;
writeln!(file, "lc_monetary='C'")?;
writeln!(file, "lc_time='C'")?;
writeln!(file, "lc_numeric='C'")?;
} else {
writeln!(file, "lc_messages='C.UTF-8'")?;
writeln!(file, "lc_monetary='C.UTF-8'")?;
writeln!(file, "lc_time='C.UTF-8'")?;
writeln!(file, "lc_numeric='C.UTF-8'")?;
}
writeln!(file, "neon.compute_mode={}", spec.mode.to_type_str())?;
match spec.mode {
ComputeMode::Primary => {}
ComputeMode::Static(lsn) => {
// hot_standby is 'on' by default, but let's be explicit
writeln!(file, "hot_standby=on")?;
writeln!(file, "recovery_target_lsn='{lsn}'")?;
}
ComputeMode::Replica => {
// hot_standby is 'on' by default, but let's be explicit
writeln!(file, "hot_standby=on")?;
}
}
if cfg!(target_os = "linux") {
// Check /proc/sys/vm/overcommit_memory -- if it equals 2 (i.e. linux memory overcommit is
// disabled), then the control plane has enabled swap and we should set
// dynamic_shared_memory_type = 'mmap'.
//
// This is (maybe?) temporary - for more, see https://github.com/neondatabase/cloud/issues/12047.
let overcommit_memory_contents = std::fs::read_to_string("/proc/sys/vm/overcommit_memory")
// ignore any errors - they may be expected to occur under certain situations (e.g. when
// not running in Linux).
.unwrap_or_else(|_| String::new());
if overcommit_memory_contents.trim() == "2" {
let opt = GenericOption {
name: "dynamic_shared_memory_type".to_owned(),
value: Some("mmap".to_owned()),
vartype: "enum".to_owned(),
};
writeln!(file, "{}", opt.to_pg_setting())?;
}
}
writeln!(
file,
"neon.privileged_role_name={}",
escape_conf_value(params.privileged_role_name.as_str())
)?;
// If there are any extra options in the 'settings' field, append those
if spec.cluster.settings.is_some() {
writeln!(file, "# Managed by compute_ctl: begin")?;
write!(file, "{}", spec.cluster.settings.as_pg_settings())?;
writeln!(file, "# Managed by compute_ctl: end")?;
}
// If base audit logging is enabled, configure it.
// In this setup, the audit log will be written to the standard postgresql log.
//
// If compliance audit logging is enabled, configure pgaudit.
//
// Note, that this is called after the settings from spec are written.
// This way we always override the settings from the spec
// and don't allow the user or the control plane admin to change them.
match spec.audit_log_level {
ComputeAudit::Disabled => {}
ComputeAudit::Log | ComputeAudit::Base => {
writeln!(file, "# Managed by compute_ctl base audit settings: start")?;
writeln!(file, "pgaudit.log='ddl,role'")?;
// Disable logging of catalog queries to reduce the noise
writeln!(file, "pgaudit.log_catalog=off")?;
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
let mut extra_shared_preload_libraries = String::new();
if !libs.contains("pgaudit") {
extra_shared_preload_libraries.push_str(",pgaudit");
}
writeln!(
file,
"shared_preload_libraries='{libs}{extra_shared_preload_libraries}'"
)?;
} else {
// Typically, this should be unreacheable,
// because we always set at least some shared_preload_libraries in the spec
// but let's handle it explicitly anyway.
writeln!(file, "shared_preload_libraries='neon,pgaudit'")?;
}
writeln!(file, "# Managed by compute_ctl base audit settings: end")?;
}
ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
writeln!(
file,
"# Managed by compute_ctl compliance audit settings: begin"
)?;
// Enable logging of parameters.
// This is very verbose and may contain sensitive data.
if spec.audit_log_level == ComputeAudit::Full {
writeln!(file, "pgaudit.log_parameter=on")?;
writeln!(file, "pgaudit.log='all'")?;
} else {
writeln!(file, "pgaudit.log_parameter=off")?;
writeln!(file, "pgaudit.log='all, -misc'")?;
}
// Disable logging of catalog queries
// The catalog doesn't contain sensitive data, so we don't need to audit it.
writeln!(file, "pgaudit.log_catalog=off")?;
// Set log rotation to 5 minutes
// TODO: tune this after performance testing
writeln!(file, "pgaudit.log_rotation_age=5")?;
// Enable audit logs for pg_session_jwt extension
// TODO: Consider a good approach for shipping pg_session_jwt logs to the same sink as
// pgAudit - additional context in https://github.com/neondatabase/cloud/issues/28863
//
// writeln!(file, "pg_session_jwt.audit_log=on")?;
// Add audit shared_preload_libraries, if they are not present.
//
// The caller who sets the flag is responsible for ensuring that the necessary
// shared_preload_libraries are present in the compute image,
// otherwise the compute start will fail.
if let Some(libs) = spec.cluster.settings.find("shared_preload_libraries") {
let mut extra_shared_preload_libraries = String::new();
if !libs.contains("pgaudit") {
extra_shared_preload_libraries.push_str(",pgaudit");
}
if !libs.contains("pgauditlogtofile") {
extra_shared_preload_libraries.push_str(",pgauditlogtofile");
}
writeln!(
file,
"shared_preload_libraries='{libs}{extra_shared_preload_libraries}'"
)?;
} else {
// Typically, this should be unreacheable,
// because we always set at least some shared_preload_libraries in the spec
// but let's handle it explicitly anyway.
writeln!(
file,
"shared_preload_libraries='neon,pgaudit,pgauditlogtofile'"
)?;
}
writeln!(
file,
"# Managed by compute_ctl compliance audit settings: end"
)?;
}
}
writeln!(file, "neon.extension_server_port={extension_server_port}")?;
if spec.drop_subscriptions_before_start {
writeln!(file, "neon.disable_logical_replication_subscribers=true")?;
} else {
// be explicit about the default value
writeln!(file, "neon.disable_logical_replication_subscribers=false")?;
}
// We need Postgres to send logs to rsyslog so that we can forward them
// further to customers' log aggregation systems.
if spec.logs_export_host.is_some() {
writeln!(file, "log_destination='stderr,syslog'")?;
}
if lakebase_mode {
// Explicitly set the port based on the connstr, overriding any previous port setting.
// Note: It is important that we don't specify a different port again after this.
let port = postgres_port.expect("port must be present in connstr");
writeln!(file, "port = {port}")?;
// This is databricks specific settings.
// This should be at the end of the file but before `compute_ctl_temp_override.conf` below
// so that it can override any settings above.
// `compute_ctl_temp_override.conf` is intended to override any settings above during specific operations.
// To prevent potential breakage in the future, we keep it above `compute_ctl_temp_override.conf`.
writeln!(file, "# Databricks settings start")?;
if let Some(settings) = databricks_settings {
writeln!(file, "{}", settings.as_pg_settings())?;
}
writeln!(file, "# Databricks settings end")?;
}
// This is essential to keep this line at the end of the file,
// because it is intended to override any settings above.
writeln!(file, "include_if_exists = 'compute_ctl_temp_override.conf'")?;
Ok(())
}
pub fn with_compute_ctl_tmp_override<F>(pgdata_path: &Path, options: &str, exec: F) -> Result<()>
where
F: FnOnce() -> Result<()>,
{
let path = pgdata_path.join("compute_ctl_temp_override.conf");
let mut file = File::create(path)?;
write!(file, "{options}")?;
let res = exec();
file.set_len(0)?;
res
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/swap.rs | compute_tools/src/swap.rs | use std::path::Path;
use anyhow::{Context, anyhow};
use tracing::{instrument, warn};
pub const RESIZE_SWAP_BIN: &str = "/neonvm/bin/resize-swap";
#[instrument]
pub fn resize_swap(size_bytes: u64) -> anyhow::Result<()> {
// run `/neonvm/bin/resize-swap --once {size_bytes}`
//
// Passing '--once' causes resize-swap to delete itself after successful completion, which
// means that if compute_ctl restarts later, we won't end up calling 'swapoff' while
// postgres is running.
//
// NOTE: resize-swap is not very clever. If present, --once MUST be the first arg.
let child_result = std::process::Command::new("/usr/bin/sudo")
.arg(RESIZE_SWAP_BIN)
.arg("--once")
.arg(size_bytes.to_string())
.spawn();
child_result
.context("spawn() failed")
.and_then(|mut child| child.wait().context("wait() failed"))
.and_then(|status| match status.success() {
true => Ok(()),
false => {
// The command failed. Maybe it was because the resize-swap file doesn't exist?
// The --once flag causes it to delete itself on success so we don't disable swap
// while postgres is running; maybe this is fine.
match Path::new(RESIZE_SWAP_BIN).try_exists() {
Err(_) | Ok(true) => Err(anyhow!("process exited with {status}")),
// The path doesn't exist; we're actually ok
Ok(false) => {
warn!("ignoring \"not found\" error from resize-swap to avoid swapoff while compute is running");
Ok(())
},
}
}
})
// wrap any prior error with the overall context that we couldn't run the command
.with_context(|| {
format!("could not run `/usr/bin/sudo {RESIZE_SWAP_BIN} --once {size_bytes}`")
})
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/tls.rs | compute_tools/src/tls.rs | use std::{io::Write, os::unix::fs::OpenOptionsExt, path::Path, time::Duration};
use anyhow::{Context, Result, bail};
use compute_api::responses::TlsConfig;
use ring::digest;
use x509_cert::Certificate;
#[derive(Clone, Copy)]
pub struct CertDigest(digest::Digest);
pub async fn watch_cert_for_changes(cert_path: String) -> tokio::sync::watch::Receiver<CertDigest> {
let mut digest = compute_digest(&cert_path).await;
let (tx, rx) = tokio::sync::watch::channel(digest);
tokio::spawn(async move {
while !tx.is_closed() {
let new_digest = compute_digest(&cert_path).await;
if digest.0.as_ref() != new_digest.0.as_ref() {
digest = new_digest;
_ = tx.send(digest);
}
tokio::time::sleep(Duration::from_secs(60)).await
}
});
rx
}
async fn compute_digest(cert_path: &str) -> CertDigest {
loop {
match try_compute_digest(cert_path).await {
Ok(d) => break d,
Err(e) => {
tracing::error!("could not read cert file {e:?}");
tokio::time::sleep(Duration::from_secs(1)).await
}
}
}
}
async fn try_compute_digest(cert_path: &str) -> Result<CertDigest> {
let data = tokio::fs::read(cert_path).await?;
// sha256 is extremely collision resistent. can safely assume the digest to be unique
Ok(CertDigest(digest::digest(&digest::SHA256, &data)))
}
pub const SERVER_CRT: &str = "server.crt";
pub const SERVER_KEY: &str = "server.key";
pub fn update_key_path_blocking(pg_data: &Path, tls_config: &TlsConfig) {
loop {
match try_update_key_path_blocking(pg_data, tls_config) {
Ok(()) => break,
Err(e) => {
tracing::error!(error = ?e, "could not create key file");
std::thread::sleep(Duration::from_secs(1))
}
}
}
}
// Postgres requires the keypath be "secure". This means
// 1. Owned by the postgres user.
// 2. Have permission 600.
fn try_update_key_path_blocking(pg_data: &Path, tls_config: &TlsConfig) -> Result<()> {
let key = std::fs::read_to_string(&tls_config.key_path)?;
let crt = std::fs::read_to_string(&tls_config.cert_path)?;
// to mitigate a race condition during renewal.
verify_key_cert(&key, &crt)?;
let mut key_file = std::fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(pg_data.join(SERVER_KEY))?;
let mut crt_file = std::fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.mode(0o600)
.open(pg_data.join(SERVER_CRT))?;
key_file.write_all(key.as_bytes())?;
crt_file.write_all(crt.as_bytes())?;
Ok(())
}
fn verify_key_cert(key: &str, cert: &str) -> Result<()> {
use x509_cert::der::oid::db::rfc5912::ECDSA_WITH_SHA_256;
let certs = Certificate::load_pem_chain(cert.as_bytes())
.context("decoding PEM encoded certificates")?;
// First certificate is our server-cert,
// all the rest of the certs are the CA cert chain.
let Some(cert) = certs.first() else {
bail!("no certificates found");
};
match cert.signature_algorithm.oid {
ECDSA_WITH_SHA_256 => {
let key = p256::SecretKey::from_sec1_pem(key).context("parse key")?;
let a = key.public_key().to_sec1_bytes();
let b = cert
.tbs_certificate
.subject_public_key_info
.subject_public_key
.raw_bytes();
if *a != *b {
bail!("private key file does not match certificate")
}
}
_ => bail!("unknown TLS key type"),
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::verify_key_cert;
/// Real certificate chain file, generated by cert-manager in dev.
/// The server auth certificate has expired since 2025-04-24T15:41:35Z.
const CERT: &str = "
-----BEGIN CERTIFICATE-----
MIICCDCCAa+gAwIBAgIQKhLomFcNULbZA/bPdGzaSzAKBggqhkjOPQQDAjBEMQsw
CQYDVQQGEwJVUzESMBAGA1UEChMJTmVvbiBJbmMuMSEwHwYDVQQDExhOZW9uIEs4
cyBJbnRlcm1lZGlhdGUgQ0EwHhcNMjUwNDIzMTU0MTM1WhcNMjUwNDI0MTU0MTM1
WjBBMT8wPQYDVQQDEzZjb21wdXRlLXdpc3B5LWdyYXNzLXcwY21laWp3LmRlZmF1
bHQuc3ZjLmNsdXN0ZXIubG9jYWwwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATF
QCcG2m/EVHAiZtSsYgVnHgoTjUL/Jtwfdrpvz2t0bVRZmBmSKhlo53uPV9Y5eKFG
AmR54p9/gT2eO3xU7vAgo4GFMIGCMA4GA1UdDwEB/wQEAwIFoDAMBgNVHRMBAf8E
AjAAMB8GA1UdIwQYMBaAFFR2JAhXkeiNQNEixTvAYIwxUu3QMEEGA1UdEQQ6MDiC
NmNvbXB1dGUtd2lzcHktZ3Jhc3MtdzBjbWVpancuZGVmYXVsdC5zdmMuY2x1c3Rl
ci5sb2NhbDAKBggqhkjOPQQDAgNHADBEAiBLG22wKG8XS9e9RxBT+kmUx/kIThcP
DIpp7jx0PrFcdQIgEMTdnXpx5Cv/Z0NIEDxtMHUD7G0vuRPfztki36JuakM=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIICFzCCAb6gAwIBAgIUbbX98N2Ip6lWAONRk8dU9hSz+YIwCgYIKoZIzj0EAwIw
RDELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCU5lb24gSW5jLjEhMB8GA1UEAxMYTmVv
biBBV1MgSW50ZXJtZWRpYXRlIENBMB4XDTI1MDQyMjE1MTAxMFoXDTI1MDcyMTE1
MTAxMFowRDELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCU5lb24gSW5jLjEhMB8GA1UE
AxMYTmVvbiBLOHMgSW50ZXJtZWRpYXRlIENBMFkwEwYHKoZIzj0CAQYIKoZIzj0D
AQcDQgAE5++m5owqNI4BPMTVNIUQH0qvU7pYhdpHGVGhdj/Lgars6ROvE6uSNQV4
SAmJN5HBzj5/6kLQaTPWpXW7EHXjK6OBjTCBijAOBgNVHQ8BAf8EBAMCAQYwEgYD
VR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQUVHYkCFeR6I1A0SLFO8BgjDFS7dAw
HwYDVR0jBBgwFoAUgHfNXfyKtHO0V9qoLOWCjkNiaI8wJAYDVR0eAQH/BBowGKAW
MBSCEi5zdmMuY2x1c3Rlci5sb2NhbDAKBggqhkjOPQQDAgNHADBEAiBObVFFdXaL
QpOXmN60dYUNnQRwjKreFduEkQgOdOlssgIgVAdJJQFgvlrvEOBhY8j5WyeKRwUN
k/ALs6KpgaFBCGY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB4jCCAYegAwIBAgIUFlxWFn/11yoGdmD+6gf+yQMToS0wCgYIKoZIzj0EAwIw
ODELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCU5lb24gSW5jLjEVMBMGA1UEAxMMTmVv
biBSb290IENBMB4XDTI1MDQwMzA3MTUyMloXDTI2MDQwMzA3MTUyMlowRDELMAkG
A1UEBhMCVVMxEjAQBgNVBAoTCU5lb24gSW5jLjEhMB8GA1UEAxMYTmVvbiBBV1Mg
SW50ZXJtZWRpYXRlIENBMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEqonG/IQ6
ZxtEtOUTkkoNopPieXDO5CBKUkNFTGeJEB7OxRlSpYJgsBpaYIaD6Vc4sVk3thIF
p+pLw52idQOIN6NjMGEwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w
HQYDVR0OBBYEFIB3zV38irRztFfaqCzlgo5DYmiPMB8GA1UdIwQYMBaAFKh7M4/G
FHvr/ORDQZt4bMLlJvHCMAoGCCqGSM49BAMCA0kAMEYCIQCbS4x7QPslONzBYbjC
UQaQ0QLDW4CJHvQ4u4gbWFG87wIhAJMsHQHjP9qTT27Q65zQCR7O8QeLAfha1jrH
Ag/LsxSr
-----END CERTIFICATE-----
";
/// The key corresponding to [`CERT`]
const KEY: &str = "
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIDnAnrqmIJjndCLWP1iIO5X3X63Aia48TGpGuMXwvm6IoAoGCCqGSM49
AwEHoUQDQgAExUAnBtpvxFRwImbUrGIFZx4KE41C/ybcH3a6b89rdG1UWZgZkioZ
aOd7j1fWOXihRgJkeeKff4E9njt8VO7wIA==
-----END EC PRIVATE KEY-----
";
/// An incorrect key.
const INCORRECT_KEY: &str = "
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIL6WqqBDyvM0HWz7Ir5M5+jhFWB7IzOClGn26OPrzHCXoAoGCCqGSM49
AwEHoUQDQgAE7XVvdOy5lfwtNKb+gJEUtnG+DrnnXLY5LsHDeGQKV9PTRcEMeCrG
YZzHyML4P6Sr4yi2ts+4B9i47uvAG8+XwQ==
-----END EC PRIVATE KEY-----
";
#[test]
fn certificate_verification() {
verify_key_cert(KEY, CERT).unwrap();
}
#[test]
#[should_panic(expected = "private key file does not match certificate")]
fn certificate_verification_fail() {
verify_key_cert(INCORRECT_KEY, CERT).unwrap();
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/catalog.rs | compute_tools/src/catalog.rs | use std::path::Path;
use std::process::Stdio;
use std::result::Result;
use std::sync::Arc;
use compute_api::responses::CatalogObjects;
use futures::Stream;
use postgres::NoTls;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tokio::spawn;
use tokio_stream::{self as stream, StreamExt};
use tokio_util::codec::{BytesCodec, FramedRead};
use tracing::warn;
use crate::compute::ComputeNode;
use crate::pg_helpers::{get_existing_dbs_async, get_existing_roles_async, postgres_conf_for_db};
pub async fn get_dbs_and_roles(compute: &Arc<ComputeNode>) -> anyhow::Result<CatalogObjects> {
let conf = compute.get_tokio_conn_conf(Some("compute_ctl:get_dbs_and_roles"));
let (client, connection): (tokio_postgres::Client, _) = conf.connect(NoTls).await?;
spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {e}");
}
});
let roles = get_existing_roles_async(&client).await?;
let databases = get_existing_dbs_async(&client)
.await?
.into_values()
.collect();
Ok(CatalogObjects { roles, databases })
}
#[derive(Debug, thiserror::Error)]
pub enum SchemaDumpError {
#[error("database does not exist")]
DatabaseDoesNotExist,
#[error("failed to execute pg_dump")]
IO(#[from] std::io::Error),
#[error("unexpected I/O error")]
Unexpected,
}
// It uses the pg_dump utility to dump the schema of the specified database.
// The output is streamed back to the caller and supposed to be streamed via HTTP.
//
// Before return the result with the output, it checks that pg_dump produced any output.
// If not, it tries to parse the stderr output to determine if the database does not exist
// and special error is returned.
//
// To make sure that the process is killed when the caller drops the stream, we use tokio kill_on_drop feature.
pub async fn get_database_schema(
compute: &Arc<ComputeNode>,
dbname: &str,
) -> Result<impl Stream<Item = Result<bytes::Bytes, std::io::Error>> + use<>, SchemaDumpError> {
let pgbin = &compute.params.pgbin;
let basepath = Path::new(pgbin).parent().unwrap();
let pgdump = basepath.join("pg_dump");
// Replace the DB in the connection string and disable it to parts.
// This is the only option to handle DBs with special characters.
let conf = postgres_conf_for_db(&compute.params.connstr, dbname)
.map_err(|_| SchemaDumpError::Unexpected)?;
let host = conf
.get_hosts()
.first()
.ok_or(SchemaDumpError::Unexpected)?;
let host = match host {
tokio_postgres::config::Host::Tcp(ip) => ip.to_string(),
#[cfg(unix)]
tokio_postgres::config::Host::Unix(path) => path.to_string_lossy().to_string(),
};
let port = conf
.get_ports()
.first()
.ok_or(SchemaDumpError::Unexpected)?;
let user = conf.get_user().ok_or(SchemaDumpError::Unexpected)?;
let dbname = conf.get_dbname().ok_or(SchemaDumpError::Unexpected)?;
let mut cmd = Command::new(pgdump)
// XXX: this seems to be the only option to deal with DBs with `=` in the name
// See <https://www.postgresql.org/message-id/flat/20151023003445.931.91267%40wrigleys.postgresql.org>
.env("PGDATABASE", dbname)
.arg("--host")
.arg(host)
.arg("--port")
.arg(port.to_string())
.arg("--username")
.arg(user)
.arg("--schema-only")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.spawn()?;
let stdout = cmd
.stdout
.take()
.ok_or_else(|| std::io::Error::other("Failed to capture stdout."))?;
let stderr = cmd
.stderr
.take()
.ok_or_else(|| std::io::Error::other("Failed to capture stderr."))?;
let mut stdout_reader = FramedRead::new(stdout, BytesCodec::new());
let stderr_reader = BufReader::new(stderr);
let first_chunk = match stdout_reader.next().await {
Some(Ok(bytes)) if !bytes.is_empty() => bytes,
Some(Err(e)) => {
return Err(SchemaDumpError::IO(e));
}
_ => {
let mut lines = stderr_reader.lines();
if let Some(line) = lines.next_line().await? {
if line.contains(&format!("FATAL: database \"{dbname}\" does not exist")) {
return Err(SchemaDumpError::DatabaseDoesNotExist);
}
warn!("pg_dump stderr: {}", line)
}
tokio::spawn(async move {
while let Ok(Some(line)) = lines.next_line().await {
warn!("pg_dump stderr: {}", line)
}
});
return Err(SchemaDumpError::IO(std::io::Error::other(
"failed to start pg_dump",
)));
}
};
let initial_stream = stream::once(Ok(first_chunk.freeze()));
// Consume stderr and log warnings
tokio::spawn(async move {
let mut lines = stderr_reader.lines();
while let Ok(Some(line)) = lines.next_line().await {
warn!("pg_dump stderr: {}", line)
}
});
#[allow(dead_code)]
struct SchemaStream<S> {
// We keep a reference to the child process to ensure it stays alive
// while the stream is being consumed. When SchemaStream is dropped,
// cmd will be dropped, which triggers kill_on_drop and terminates pg_dump
cmd: tokio::process::Child,
stream: S,
}
impl<S> Stream for SchemaStream<S>
where
S: Stream<Item = Result<bytes::Bytes, std::io::Error>> + Unpin,
{
type Item = Result<bytes::Bytes, std::io::Error>;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
Stream::poll_next(std::pin::Pin::new(&mut self.stream), cx)
}
}
let schema_stream = SchemaStream {
cmd,
stream: initial_stream.chain(stdout_reader.map(|res| res.map(|b| b.freeze()))),
};
Ok(schema_stream)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/extension_server.rs | compute_tools/src/extension_server.rs | // Download extension files from the extension store
// and put them in the right place in the postgres directory (share / lib)
/*
The layout of the S3 bucket is as follows:
5615610098 // this is an extension build number
├── v14
│ ├── extensions
│ │ ├── anon.tar.zst
│ │ └── embedding.tar.zst
│ └── ext_index.json
└── v15
├── extensions
│ ├── anon.tar.zst
│ └── embedding.tar.zst
└── ext_index.json
5615261079
├── v14
│ ├── extensions
│ │ └── anon.tar.zst
│ └── ext_index.json
└── v15
├── extensions
│ └── anon.tar.zst
└── ext_index.json
5623261088
├── v14
│ ├── extensions
│ │ └── embedding.tar.zst
│ └── ext_index.json
└── v15
├── extensions
│ └── embedding.tar.zst
└── ext_index.json
Note that build number cannot be part of prefix because we might need extensions
from other build numbers.
ext_index.json stores the control files and location of extension archives
It also stores a list of public extensions and a library_index
We don't need to duplicate extension.tar.zst files.
We only need to upload a new one if it is updated.
(Although currently we just upload every time anyways, hopefully will change
this sometime)
*access* is controlled by spec
More specifically, here is an example ext_index.json
{
"public_extensions": [
"anon",
"pg_buffercache"
],
"library_index": {
"anon": "anon",
"pg_buffercache": "pg_buffercache"
},
"extension_data": {
"pg_buffercache": {
"control_data": {
"pg_buffercache.control": "# pg_buffercache extension \ncomment = 'examine the shared buffer cache' \ndefault_version = '1.3' \nmodule_pathname = '$libdir/pg_buffercache' \nrelocatable = true \ntrusted=true"
},
"archive_path": "5670669815/v14/extensions/pg_buffercache.tar.zst"
},
"anon": {
"control_data": {
"anon.control": "# PostgreSQL Anonymizer (anon) extension \ncomment = 'Data anonymization tools' \ndefault_version = '1.1.0' \ndirectory='extension/anon' \nrelocatable = false \nrequires = 'pgcrypto' \nsuperuser = false \nmodule_pathname = '$libdir/anon' \ntrusted = true \n"
},
"archive_path": "5670669815/v14/extensions/anon.tar.zst"
}
}
}
*/
use std::path::Path;
use std::str;
use crate::metrics::{REMOTE_EXT_REQUESTS_TOTAL, UNKNOWN_HTTP_STATUS};
use anyhow::{Context, Result, bail};
use bytes::Bytes;
use compute_api::spec::RemoteExtSpec;
use postgres_versioninfo::PgMajorVersion;
use regex::Regex;
use remote_storage::*;
use reqwest::StatusCode;
use tar::Archive;
use tracing::info;
use tracing::log::warn;
use url::Url;
use zstd::stream::read::Decoder;
fn get_pg_config(argument: &str, pgbin: &str) -> String {
// gives the result of `pg_config [argument]`
// where argument is a flag like `--version` or `--sharedir`
let pgconfig = pgbin
.strip_suffix("postgres")
.expect("bad pgbin")
.to_owned()
+ "/pg_config";
let config_output = std::process::Command::new(pgconfig)
.arg(argument)
.output()
.expect("pg_config error");
std::str::from_utf8(&config_output.stdout)
.expect("pg_config error")
.trim()
.to_string()
}
pub fn get_pg_version(pgbin: &str) -> PgMajorVersion {
// pg_config --version returns a (platform specific) human readable string
// such as "PostgreSQL 15.4". We parse this to v14/v15/v16 etc.
let human_version = get_pg_config("--version", pgbin);
parse_pg_version(&human_version)
}
pub fn get_pg_version_string(pgbin: &str) -> String {
get_pg_version(pgbin).v_str()
}
fn parse_pg_version(human_version: &str) -> PgMajorVersion {
use PgMajorVersion::*;
// Normal releases have version strings like "PostgreSQL 15.4". But there
// are also pre-release versions like "PostgreSQL 17devel" or "PostgreSQL
// 16beta2" or "PostgreSQL 17rc1". And with the --with-extra-version
// configure option, you can tack any string to the version number,
// e.g. "PostgreSQL 15.4foobar".
match Regex::new(r"^PostgreSQL (?<major>\d+).+")
.unwrap()
.captures(human_version)
{
Some(captures) if captures.len() == 2 => match &captures["major"] {
"14" => return PG14,
"15" => return PG15,
"16" => return PG16,
"17" => return PG17,
_ => {}
},
_ => {}
}
panic!("Unsuported postgres version {human_version}");
}
// download the archive for a given extension,
// unzip it, and place files in the appropriate locations (share/lib)
pub async fn download_extension(
ext_name: &str,
ext_path: &RemotePath,
remote_ext_base_url: &Url,
pgbin: &str,
) -> Result<u64> {
info!("Download extension {:?} from {:?}", ext_name, ext_path);
// TODO add retry logic
let download_buffer =
match download_extension_tar(remote_ext_base_url, &ext_path.to_string()).await {
Ok(buffer) => buffer,
Err(error_message) => {
return Err(anyhow::anyhow!(
"error downloading extension {:?}: {:?}",
ext_name,
error_message
));
}
};
let download_size = download_buffer.len() as u64;
info!("Download size {:?}", download_size);
// it's unclear whether it is more performant to decompress into memory or not
// TODO: decompressing into memory can be avoided
let decoder = Decoder::new(download_buffer.as_ref())?;
let mut archive = Archive::new(decoder);
let unzip_dest = pgbin
.strip_suffix("/bin/postgres")
.expect("bad pgbin")
.to_string()
+ "/download_extensions";
archive.unpack(&unzip_dest)?;
info!("Download + unzip {:?} completed successfully", &ext_path);
let sharedir_paths = (
unzip_dest.to_string() + "/share/extension",
Path::new(&get_pg_config("--sharedir", pgbin)).join("extension"),
);
let libdir_paths = (
unzip_dest.to_string() + "/lib",
Path::new(&get_pg_config("--pkglibdir", pgbin)).to_path_buf(),
);
// move contents of the libdir / sharedir in unzipped archive to the correct local paths
for paths in [sharedir_paths, libdir_paths] {
let (zip_dir, real_dir) = paths;
let dir = match std::fs::read_dir(&zip_dir) {
Ok(dir) => dir,
Err(e) => match e.kind() {
// In the event of a SQL-only extension, there would be nothing
// to move from the lib/ directory, so note that in the log and
// move on.
std::io::ErrorKind::NotFound => {
info!("nothing to move from {}", zip_dir);
continue;
}
_ => return Err(anyhow::anyhow!(e)),
},
};
info!("mv {zip_dir:?}/* {real_dir:?}");
for file in dir {
let old_file = file?.path();
let new_file =
Path::new(&real_dir).join(old_file.file_name().context("error parsing file")?);
info!("moving {old_file:?} to {new_file:?}");
// extension download failed: Directory not empty (os error 39)
match std::fs::rename(old_file, new_file) {
Ok(()) => info!("move succeeded"),
Err(e) => {
warn!("move failed, probably because the extension already exists: {e}")
}
}
}
}
info!("done moving extension {ext_name}");
Ok(download_size)
}
// Create extension control files from spec
pub fn create_control_files(remote_extensions: &RemoteExtSpec, pgbin: &str) {
let local_sharedir = Path::new(&get_pg_config("--sharedir", pgbin)).join("extension");
for (ext_name, ext_data) in remote_extensions.extension_data.iter() {
// Check if extension is present in public or custom.
// If not, then it is not allowed to be used by this compute.
if let Some(public_extensions) = &remote_extensions.public_extensions {
if !public_extensions.contains(ext_name) {
if let Some(custom_extensions) = &remote_extensions.custom_extensions {
if !custom_extensions.contains(ext_name) {
continue; // skip this extension, it is not allowed
}
}
}
}
for (control_name, control_content) in &ext_data.control_data {
let control_path = local_sharedir.join(control_name);
if !control_path.exists() {
info!("writing file {:?}{:?}", control_path, control_content);
std::fs::write(control_path, control_content).unwrap();
} else {
warn!(
"control file {:?} exists both locally and remotely. ignoring the remote version.",
control_path
);
}
}
}
}
// Do request to extension storage proxy, e.g.,
// curl http://pg-ext-s3-gateway.pg-ext-s3-gateway.svc.cluster.local/latest/v15/extensions/anon.tar.zst
// using HTTP GET and return the response body as bytes.
async fn download_extension_tar(remote_ext_base_url: &Url, ext_path: &str) -> Result<Bytes> {
let uri = remote_ext_base_url.join(ext_path).with_context(|| {
format!(
"failed to create the remote extension URI for {ext_path} using {remote_ext_base_url}"
)
})?;
let filename = Path::new(ext_path)
.file_name()
.unwrap_or_else(|| std::ffi::OsStr::new("unknown"))
.to_str()
.unwrap_or("unknown")
.to_string();
info!("Downloading extension file '{}' from uri {}", filename, uri);
match do_extension_server_request(uri).await {
Ok(resp) => {
info!("Successfully downloaded remote extension data {}", ext_path);
REMOTE_EXT_REQUESTS_TOTAL
.with_label_values(&[&StatusCode::OK.to_string(), &filename])
.inc();
Ok(resp)
}
Err((msg, status)) => {
REMOTE_EXT_REQUESTS_TOTAL
.with_label_values(&[&status, &filename])
.inc();
bail!(msg);
}
}
}
// Do a single remote extensions server request.
// Return result or (error message + stringified status code) in case of any failures.
async fn do_extension_server_request(uri: Url) -> Result<Bytes, (String, String)> {
let resp = reqwest::get(uri).await.map_err(|e| {
(
format!("could not perform remote extensions server request: {e:?}"),
UNKNOWN_HTTP_STATUS.to_string(),
)
})?;
let status = resp.status();
match status {
StatusCode::OK => match resp.bytes().await {
Ok(resp) => Ok(resp),
Err(e) => Err((
format!("could not read remote extensions server response: {e:?}"),
// It's fine to return and report error with status as 200 OK,
// because we still failed to read the response.
status.to_string(),
)),
},
StatusCode::SERVICE_UNAVAILABLE => Err((
"remote extensions server is temporarily unavailable".to_string(),
status.to_string(),
)),
_ => Err((
format!("unexpected remote extensions server response status code: {status}"),
status.to_string(),
)),
}
}
#[cfg(test)]
mod tests {
use super::parse_pg_version;
#[test]
fn test_parse_pg_version() {
use postgres_versioninfo::PgMajorVersion::*;
assert_eq!(parse_pg_version("PostgreSQL 15.4"), PG15);
assert_eq!(parse_pg_version("PostgreSQL 15.14"), PG15);
assert_eq!(
parse_pg_version("PostgreSQL 15.4 (Ubuntu 15.4-0ubuntu0.23.04.1)"),
PG15
);
assert_eq!(parse_pg_version("PostgreSQL 14.15"), PG14);
assert_eq!(parse_pg_version("PostgreSQL 14.0"), PG14);
assert_eq!(
parse_pg_version("PostgreSQL 14.9 (Debian 14.9-1.pgdg120+1"),
PG14
);
assert_eq!(parse_pg_version("PostgreSQL 16devel"), PG16);
assert_eq!(parse_pg_version("PostgreSQL 16beta1"), PG16);
assert_eq!(parse_pg_version("PostgreSQL 16rc2"), PG16);
assert_eq!(parse_pg_version("PostgreSQL 16extra"), PG16);
}
#[test]
#[should_panic]
fn test_parse_pg_unsupported_version() {
parse_pg_version("PostgreSQL 13.14");
}
#[test]
#[should_panic]
fn test_parse_pg_incorrect_version_format() {
parse_pg_version("PostgreSQL 14");
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/lib.rs | compute_tools/src/lib.rs | //! Various tools and helpers to handle cluster / compute node (Postgres)
//! configuration.
#![deny(unsafe_code)]
#![deny(clippy::undocumented_unsafe_blocks)]
pub mod checker;
pub mod communicator_socket_client;
pub mod config;
pub mod configurator;
pub mod http;
#[macro_use]
pub mod logger;
pub mod catalog;
pub mod compute;
pub mod compute_prewarm;
pub mod compute_promote;
pub mod disk_quota;
pub mod extension_server;
pub mod hadron_metrics;
pub mod installed_extensions;
pub mod local_proxy;
pub mod lsn_lease;
pub mod metrics;
mod migration;
pub mod monitor;
pub mod params;
pub mod pg_helpers;
pub mod pg_isready;
pub mod pgbouncer;
pub mod rsyslog;
pub mod spec;
mod spec_apply;
pub mod swap;
pub mod sync_sk;
pub mod tls;
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/logger.rs | compute_tools/src/logger.rs | use std::collections::HashMap;
use std::sync::{LazyLock, RwLock};
use tracing::Subscriber;
use tracing::info;
use tracing_appender;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, layer::SubscriberExt, registry::LookupSpan};
/// Initialize logging to stderr, and OpenTelemetry tracing and exporter.
///
/// Logging is configured using either `default_log_level` or
/// `RUST_LOG` environment variable as default log level.
///
/// OpenTelemetry is configured with OTLP/HTTP exporter. It picks up
/// configuration from environment variables. For example, to change the destination,
/// set `OTEL_EXPORTER_OTLP_ENDPOINT=http://jaeger:4318`. See
/// `tracing-utils` package description.
///
pub fn init_tracing_and_logging(
default_log_level: &str,
log_dir_opt: &Option<String>,
) -> anyhow::Result<(
Option<tracing_utils::Provider>,
Option<tracing_appender::non_blocking::WorkerGuard>,
)> {
// Initialize Logging
let env_filter = tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(default_log_level));
// Standard output streams
let fmt_layer = tracing_subscriber::fmt::layer()
.with_ansi(false)
.with_target(false)
.with_writer(std::io::stderr);
// Logs with file rotation. Files in `$log_dir/pgcctl.yyyy-MM-dd`
let (json_to_file_layer, _file_logs_guard) = if let Some(log_dir) = log_dir_opt {
std::fs::create_dir_all(log_dir)?;
let file_logs_appender = tracing_appender::rolling::RollingFileAppender::builder()
.rotation(tracing_appender::rolling::Rotation::DAILY)
.filename_prefix("pgcctl")
// Lib appends to existing files, so we will keep files for up to 2 days even on restart loops.
// At minimum, log-daemon will have 1 day to detect and upload a file (if created right before midnight).
.max_log_files(2)
.build(log_dir)
.expect("Initializing rolling file appender should succeed");
let (file_logs_writer, _file_logs_guard) =
tracing_appender::non_blocking(file_logs_appender);
let json_to_file_layer = tracing_subscriber::fmt::layer()
.with_ansi(false)
.with_target(false)
.event_format(PgJsonLogShapeFormatter)
.with_writer(file_logs_writer);
(Some(json_to_file_layer), Some(_file_logs_guard))
} else {
(None, None)
};
// Initialize OpenTelemetry
let provider =
tracing_utils::init_tracing("compute_ctl", tracing_utils::ExportConfig::default());
let otlp_layer = provider.as_ref().map(tracing_utils::layer);
// Put it all together
tracing_subscriber::registry()
.with(env_filter)
.with(otlp_layer)
.with(fmt_layer)
.with(json_to_file_layer)
.init();
tracing::info!("logging and tracing started");
utils::logging::replace_panic_hook_with_tracing_panic_hook().forget();
Ok((provider, _file_logs_guard))
}
/// Replace all newline characters with a special character to make it
/// easier to grep for log messages.
pub fn inlinify(s: &str) -> String {
s.replace('\n', "\u{200B}")
}
pub fn startup_context_from_env() -> Option<opentelemetry::Context> {
// Extract OpenTelemetry context for the startup actions from the
// TRACEPARENT and TRACESTATE env variables, and attach it to the current
// tracing context.
//
// This is used to propagate the context for the 'start_compute' operation
// from the neon control plane. This allows linking together the wider
// 'start_compute' operation that creates the compute container, with the
// startup actions here within the container.
//
// There is no standard for passing context in env variables, but a lot of
// tools use TRACEPARENT/TRACESTATE, so we use that convention too. See
// https://github.com/open-telemetry/opentelemetry-specification/issues/740
//
// Switch to the startup context here, and exit it once the startup has
// completed and Postgres is up and running.
//
// If this pod is pre-created without binding it to any particular endpoint
// yet, this isn't the right place to enter the startup context. In that
// case, the control plane should pass the tracing context as part of the
// /configure API call.
//
// NOTE: This is supposed to only cover the *startup* actions. Once
// postgres is configured and up-and-running, we exit this span. Any other
// actions that are performed on incoming HTTP requests, for example, are
// performed in separate spans.
//
// XXX: If the pod is restarted, we perform the startup actions in the same
// context as the original startup actions, which probably doesn't make
// sense.
let mut startup_tracing_carrier: HashMap<String, String> = HashMap::new();
if let Ok(val) = std::env::var("TRACEPARENT") {
startup_tracing_carrier.insert("traceparent".to_string(), val);
}
if let Ok(val) = std::env::var("TRACESTATE") {
startup_tracing_carrier.insert("tracestate".to_string(), val);
}
if !startup_tracing_carrier.is_empty() {
use opentelemetry::propagation::TextMapPropagator;
use opentelemetry_sdk::propagation::TraceContextPropagator;
info!("got startup tracing context from env variables");
Some(TraceContextPropagator::new().extract(&startup_tracing_carrier))
} else {
None
}
}
/// Track relevant id's
const UNKNOWN_IDS: &str = r#""pg_instance_id": "", "pg_compute_id": """#;
static IDS: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new(UNKNOWN_IDS.to_string()));
pub fn update_ids(instance_id: &Option<String>, compute_id: &Option<String>) -> anyhow::Result<()> {
let ids = format!(
r#""pg_instance_id": "{}", "pg_compute_id": "{}""#,
instance_id.as_ref().map(|s| s.as_str()).unwrap_or_default(),
compute_id.as_ref().map(|s| s.as_str()).unwrap_or_default()
);
let mut guard = IDS
.write()
.map_err(|e| anyhow::anyhow!("Log set id's rwlock poisoned: {}", e))?;
*guard = ids;
Ok(())
}
/// Massage compute_ctl logs into PG json log shape so we can use the same Lumberjack setup.
struct PgJsonLogShapeFormatter;
impl<S, N> fmt::format::FormatEvent<S, N> for PgJsonLogShapeFormatter
where
S: Subscriber + for<'a> LookupSpan<'a>,
N: for<'a> fmt::format::FormatFields<'a> + 'static,
{
fn format_event(
&self,
ctx: &fmt::FmtContext<'_, S, N>,
mut writer: fmt::format::Writer<'_>,
event: &tracing::Event<'_>,
) -> std::fmt::Result {
// Format values from the event's metadata, and open message string
let metadata = event.metadata();
{
let ids_guard = IDS.read();
let ids = ids_guard
.as_ref()
.map(|guard| guard.as_str())
// Surpress so that we don't lose all uploaded/ file logs if something goes super wrong. We would notice the missing id's.
.unwrap_or(UNKNOWN_IDS);
write!(
&mut writer,
r#"{{"timestamp": "{}", "error_severity": "{}", "file_name": "{}", "backend_type": "compute_ctl_self", {}, "message": "#,
chrono::Utc::now().format("%Y-%m-%d %H:%M:%S%.3f GMT"),
metadata.level(),
metadata.target(),
ids
)?;
}
let mut message = String::new();
let message_writer = fmt::format::Writer::new(&mut message);
// Gather the message
ctx.field_format().format_fields(message_writer, event)?;
// TODO: any better options than to copy-paste this OSS span formatter?
// impl<S, N, T> FormatEvent<S, N> for Format<Full, T>
// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/fmt/trait.FormatEvent.html#impl-FormatEvent%3CS,+N%3E-for-Format%3CFull,+T%3E
// write message, close bracket, and new line
writeln!(writer, "{}}}", serde_json::to_string(&message).unwrap())
}
}
#[cfg(feature = "testing")]
#[cfg(test)]
mod test {
use super::*;
use std::{cell::RefCell, io};
// Use thread_local! instead of Mutex for test isolation
thread_local! {
static WRITER_OUTPUT: RefCell<String> = const { RefCell::new(String::new()) };
}
#[derive(Clone, Default)]
struct StaticStringWriter;
impl io::Write for StaticStringWriter {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let output = String::from_utf8(buf.to_vec()).expect("Invalid UTF-8 in test output");
WRITER_OUTPUT.with(|s| s.borrow_mut().push_str(&output));
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl fmt::MakeWriter<'_> for StaticStringWriter {
type Writer = Self;
fn make_writer(&self) -> Self::Writer {
Self
}
}
#[test]
fn test_log_pg_json_shape_formatter() {
// Use a scoped subscriber to prevent global state pollution
let subscriber = tracing_subscriber::registry().with(
tracing_subscriber::fmt::layer()
.with_ansi(false)
.with_target(false)
.event_format(PgJsonLogShapeFormatter)
.with_writer(StaticStringWriter),
);
let _ = update_ids(&Some("000".to_string()), &Some("111".to_string()));
// Clear any previous test state
WRITER_OUTPUT.with(|s| s.borrow_mut().clear());
let messages = [
"test message",
r#"json escape check: name="BatchSpanProcessor.Flush.ExportError" reason="Other(reqwest::Error { kind: Request, url: \"http://localhost:4318/v1/traces\", source: hyper_
util::client::legacy::Error(Connect, ConnectError(\"tcp connect error\", Os { code: 111, kind: ConnectionRefused, message: \"Connection refused\" })) })" Failed during the export process"#,
];
tracing::subscriber::with_default(subscriber, || {
for message in messages {
tracing::info!(message);
}
});
tracing::info!("not test message");
// Get captured output
let output = WRITER_OUTPUT.with(|s| s.borrow().clone());
let json_strings: Vec<&str> = output.lines().collect();
assert_eq!(
json_strings.len(),
messages.len(),
"Log didn't have the expected number of json strings."
);
let json_string_shape_regex = regex::Regex::new(
r#"\{"timestamp": "\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} GMT", "error_severity": "INFO", "file_name": ".+", "backend_type": "compute_ctl_self", "pg_instance_id": "000", "pg_compute_id": "111", "message": ".+"\}"#
).unwrap();
for (i, expected_message) in messages.iter().enumerate() {
let json_string = json_strings[i];
assert!(
json_string_shape_regex.is_match(json_string),
"Json log didn't match expected pattern:\n{json_string}",
);
let parsed_json: serde_json::Value = serde_json::from_str(json_string).unwrap();
let actual_message = parsed_json["message"].as_str().unwrap();
assert_eq!(*expected_message, actual_message);
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/params.rs | compute_tools/src/params.rs | pub const DEFAULT_LOG_LEVEL: &str = "info";
// From Postgres docs:
// To ease transition from the md5 method to the newer SCRAM method, if md5 is specified
// as a method in pg_hba.conf but the user's password on the server is encrypted for SCRAM
// (see below), then SCRAM-based authentication will automatically be chosen instead.
// https://www.postgresql.org/docs/15/auth-password.html
//
// So it's safe to set md5 here, as `control-plane` anyway uses SCRAM for all roles.
pub const PG_HBA_ALL_MD5: &str = "host\tall\t\tall\t\tall\t\tmd5";
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/spec_apply.rs | compute_tools/src/spec_apply.rs | use std::collections::{HashMap, HashSet};
use std::fmt::{Debug, Formatter};
use std::future::Future;
use std::iter::{empty, once};
use std::sync::Arc;
use anyhow::{Context, Result};
use compute_api::responses::ComputeStatus;
use compute_api::spec::{ComputeAudit, ComputeSpec, Database, PgIdent, Role};
use futures::future::join_all;
use tokio::sync::RwLock;
use tokio_postgres::Client;
use tokio_postgres::error::SqlState;
use tracing::{Instrument, debug, error, info, info_span, instrument, warn};
use crate::compute::{ComputeNode, ComputeNodeParams, ComputeState, create_databricks_roles};
use crate::hadron_metrics::COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS;
use crate::pg_helpers::{
DatabaseExt, Escaping, GenericOptionsSearch, RoleExt, get_existing_dbs_async,
get_existing_roles_async,
};
use crate::spec_apply::ApplySpecPhase::{
AddDatabricksGrants, AlterDatabricksRoles, CreateAndAlterDatabases, CreateAndAlterRoles,
CreateAvailabilityCheck, CreateDatabricksMisc, CreateDatabricksRoles, CreatePgauditExtension,
CreatePgauditlogtofileExtension, CreatePrivilegedRole, CreateSchemaNeon,
DisablePostgresDBPgAudit, DropInvalidDatabases, DropRoles, FinalizeDropLogicalSubscriptions,
HandleDatabricksAuthExtension, HandleNeonExtension, HandleOtherExtensions,
RenameAndDeleteDatabases, RenameRoles, RunInEachDatabase,
};
use crate::spec_apply::PerDatabasePhase::{
ChangeSchemaPerms, DeleteDBRoleReferences, DropLogicalSubscriptions,
};
impl ComputeNode {
/// Apply the spec to the running PostgreSQL instance.
/// The caller can decide to run with multiple clients in parallel, or
/// single mode. Either way, the commands executed will be the same, and
/// only commands run in different databases are parallelized.
#[instrument(skip_all)]
pub fn apply_spec_sql(
&self,
spec: Arc<ComputeSpec>,
conf: Arc<tokio_postgres::Config>,
concurrency: usize,
) -> Result<()> {
info!("Applying config with max {} concurrency", concurrency);
debug!("Config: {:?}", spec);
let rt = tokio::runtime::Handle::current();
rt.block_on(async {
// Proceed with post-startup configuration. Note, that order of operations is important.
let client = Self::get_maintenance_client(&conf).await?;
let spec = spec.clone();
let params = Arc::new(self.params.clone());
let databases = get_existing_dbs_async(&client).await?;
let roles = get_existing_roles_async(&client)
.await?
.into_iter()
.map(|role| (role.name.clone(), role))
.collect::<HashMap<String, Role>>();
// Check if we need to drop subscriptions before starting the endpoint.
//
// It is important to do this operation exactly once when endpoint starts on a new branch.
// Otherwise, we may drop not inherited, but newly created subscriptions.
//
// We cannot rely only on spec.drop_subscriptions_before_start flag,
// because if for some reason compute restarts inside VM,
// it will start again with the same spec and flag value.
//
// To handle this, we save the fact of the operation in the database
// in the neon.drop_subscriptions_done table.
// If the table does not exist, we assume that the operation was never performed, so we must do it.
// If table exists, we check if the operation was performed on the current timelilne.
//
let mut drop_subscriptions_done = false;
if spec.drop_subscriptions_before_start {
let timeline_id = self.get_timeline_id().context("timeline_id must be set")?;
info!("Checking if drop subscription operation was already performed for timeline_id: {}", timeline_id);
drop_subscriptions_done = match
client.query("select 1 from neon.drop_subscriptions_done where timeline_id OPERATOR(pg_catalog.=) $1", &[&timeline_id.to_string()]).await {
Ok(result) => !result.is_empty(),
Err(e) =>
{
match e.code() {
Some(&SqlState::UNDEFINED_TABLE) => false,
_ => {
// We don't expect any other error here, except for the schema/table not existing
error!("Error checking if drop subscription operation was already performed: {}", e);
return Err(e.into());
}
}
}
}
};
let jwks_roles = Arc::new(
spec.as_ref()
.local_proxy_config
.iter()
.flat_map(|it| &it.jwks)
.flatten()
.flat_map(|setting| &setting.role_names)
.cloned()
.collect::<HashSet<_>>(),
);
let ctx = Arc::new(tokio::sync::RwLock::new(MutableApplyContext {
roles,
dbs: databases,
}));
// Apply special pre drop database phase.
// NOTE: we use the code of RunInEachDatabase phase for parallelism
// and connection management, but we don't really run it in *each* database,
// only in databases, we're about to drop.
info!("Applying PerDatabase (pre-dropdb) phase");
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
// Run the phase for each database that we're about to drop.
let db_processes = spec
.delta_operations
.iter()
.flatten()
.filter_map(move |op| {
if op.action.as_str() == "delete_db" {
Some(op.name.clone())
} else {
None
}
})
.map(|dbname| {
let spec = spec.clone();
let ctx = ctx.clone();
let jwks_roles = jwks_roles.clone();
let mut conf = conf.as_ref().clone();
let concurrency_token = concurrency_token.clone();
// We only need dbname field for this phase, so set other fields to dummy values
let db = DB::UserDB(Database {
name: dbname.clone(),
owner: "cloud_admin".to_string(),
options: None,
restrict_conn: false,
invalid: false,
});
debug!("Applying per-database phases for Database {:?}", &db);
match &db {
DB::SystemDB => {}
DB::UserDB(db) => {
conf.dbname(db.name.as_str());
}
}
let conf = Arc::new(conf);
let fut = Self::apply_spec_sql_db(
params.clone(),
spec.clone(),
conf,
ctx.clone(),
jwks_roles.clone(),
concurrency_token.clone(),
db,
[DropLogicalSubscriptions].to_vec(),
self.params.lakebase_mode,
);
Ok(tokio::spawn(fut))
})
.collect::<Vec<Result<_, anyhow::Error>>>();
for process in db_processes.into_iter() {
let handle = process?;
if let Err(e) = handle.await? {
// Handle the error case where the database does not exist
// We do not check whether the DB exists or not in the deletion phase,
// so we shouldn't be strict about it in pre-deletion cleanup as well.
if e.to_string().contains("does not exist") {
warn!("Error dropping subscription: {}", e);
} else {
return Err(e);
}
};
}
let phases = if self.params.lakebase_mode {
vec![
CreatePrivilegedRole,
// BEGIN_HADRON
CreateDatabricksRoles,
AlterDatabricksRoles,
// END_HADRON
DropInvalidDatabases,
RenameRoles,
CreateAndAlterRoles,
RenameAndDeleteDatabases,
CreateAndAlterDatabases,
CreateSchemaNeon,
]
} else {
vec![
CreatePrivilegedRole,
DropInvalidDatabases,
RenameRoles,
CreateAndAlterRoles,
RenameAndDeleteDatabases,
CreateAndAlterDatabases,
CreateSchemaNeon,
]
};
for phase in phases {
info!("Applying phase {:?}", &phase);
apply_operations(
params.clone(),
spec.clone(),
ctx.clone(),
jwks_roles.clone(),
phase,
|| async { Ok(&client) },
self.params.lakebase_mode,
)
.await?;
}
info!("Applying RunInEachDatabase2 phase");
let concurrency_token = Arc::new(tokio::sync::Semaphore::new(concurrency));
let db_processes = spec
.cluster
.databases
.iter()
.map(|db| DB::new(db.clone()))
// include
.chain(once(DB::SystemDB))
.map(|db| {
let spec = spec.clone();
let ctx = ctx.clone();
let jwks_roles = jwks_roles.clone();
let mut conf = conf.as_ref().clone();
let concurrency_token = concurrency_token.clone();
let db = db.clone();
debug!("Applying per-database phases for Database {:?}", &db);
match &db {
DB::SystemDB => {}
DB::UserDB(db) => {
conf.dbname(db.name.as_str());
}
}
let conf = Arc::new(conf);
let mut phases = vec![
DeleteDBRoleReferences,
ChangeSchemaPerms,
];
if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
info!("Adding DropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
phases.push(DropLogicalSubscriptions);
}
let fut = Self::apply_spec_sql_db(
params.clone(),
spec.clone(),
conf,
ctx.clone(),
jwks_roles.clone(),
concurrency_token.clone(),
db,
phases,
self.params.lakebase_mode,
);
Ok(tokio::spawn(fut))
})
.collect::<Vec<Result<_, anyhow::Error>>>();
for process in db_processes.into_iter() {
let handle = process?;
handle.await??;
}
let mut phases = if self.params.lakebase_mode {
vec![
HandleOtherExtensions,
HandleNeonExtension, // This step depends on CreateSchemaNeon
// BEGIN_HADRON
HandleDatabricksAuthExtension,
// END_HADRON
CreateAvailabilityCheck,
DropRoles,
// BEGIN_HADRON
AddDatabricksGrants,
CreateDatabricksMisc,
// END_HADRON
]
} else {
vec![
HandleOtherExtensions,
HandleNeonExtension, // This step depends on CreateSchemaNeon
CreateAvailabilityCheck,
DropRoles,
]
};
// This step depends on CreateSchemaNeon
if spec.drop_subscriptions_before_start && !drop_subscriptions_done {
info!("Adding FinalizeDropLogicalSubscriptions phase because drop_subscriptions_before_start is set");
phases.push(FinalizeDropLogicalSubscriptions);
}
// Keep DisablePostgresDBPgAudit phase at the end,
// so that all config operations are audit logged.
match spec.audit_log_level
{
ComputeAudit::Hipaa | ComputeAudit::Extended | ComputeAudit::Full => {
phases.push(CreatePgauditExtension);
phases.push(CreatePgauditlogtofileExtension);
phases.push(DisablePostgresDBPgAudit);
}
ComputeAudit::Log | ComputeAudit::Base => {
phases.push(CreatePgauditExtension);
phases.push(DisablePostgresDBPgAudit);
}
ComputeAudit::Disabled => {}
}
for phase in phases {
debug!("Applying phase {:?}", &phase);
apply_operations(
params.clone(),
spec.clone(),
ctx.clone(),
jwks_roles.clone(),
phase,
|| async { Ok(&client) },
self.params.lakebase_mode,
)
.await?;
}
Ok::<(), anyhow::Error>(())
})?;
Ok(())
}
/// Apply SQL migrations of the RunInEachDatabase phase.
///
/// May opt to not connect to databases that don't have any scheduled
/// operations. The function is concurrency-controlled with the provided
/// semaphore. The caller has to make sure the semaphore isn't exhausted.
#[allow(clippy::too_many_arguments)] // TODO: needs bigger refactoring
async fn apply_spec_sql_db(
params: Arc<ComputeNodeParams>,
spec: Arc<ComputeSpec>,
conf: Arc<tokio_postgres::Config>,
ctx: Arc<tokio::sync::RwLock<MutableApplyContext>>,
jwks_roles: Arc<HashSet<String>>,
concurrency_token: Arc<tokio::sync::Semaphore>,
db: DB,
subphases: Vec<PerDatabasePhase>,
lakebase_mode: bool,
) -> Result<()> {
let _permit = concurrency_token.acquire().await?;
let mut client_conn = None;
for subphase in subphases {
apply_operations(
params.clone(),
spec.clone(),
ctx.clone(),
jwks_roles.clone(),
RunInEachDatabase {
db: db.clone(),
subphase,
},
// Only connect if apply_operation actually wants a connection.
// It's quite possible this database doesn't need any queries,
// so by not connecting we save time and effort connecting to
// that database.
|| async {
if client_conn.is_none() {
let db_client = Self::get_maintenance_client(&conf).await?;
client_conn.replace(db_client);
}
let client = client_conn.as_ref().unwrap();
Ok(client)
},
lakebase_mode,
)
.await?;
}
drop(client_conn);
Ok::<(), anyhow::Error>(())
}
/// Choose how many concurrent connections to use for applying the spec changes.
pub fn max_service_connections(
&self,
compute_state: &ComputeState,
spec: &ComputeSpec,
) -> usize {
// If the cluster is in Init state we don't have to deal with user connections,
// and can thus use all `max_connections` connection slots. However, that's generally not
// very efficient, so we generally still limit it to a smaller number.
if compute_state.status == ComputeStatus::Init {
// If the settings contain 'max_connections', use that as template
if let Some(config) = spec.cluster.settings.find("max_connections") {
config.parse::<usize>().ok()
} else {
// Otherwise, try to find the setting in the postgresql_conf string
spec.cluster
.postgresql_conf
.iter()
.flat_map(|conf| conf.split("\n"))
.filter_map(|line| {
if !line.contains("max_connections") {
return None;
}
let (key, value) = line.split_once("=")?;
let key = key
.trim_start_matches(char::is_whitespace)
.trim_end_matches(char::is_whitespace);
let value = value
.trim_start_matches(char::is_whitespace)
.trim_end_matches(char::is_whitespace);
if key != "max_connections" {
return None;
}
value.parse::<usize>().ok()
})
.next()
}
// If max_connections is present, use at most 1/3rd of that.
// When max_connections is lower than 30, try to use at least 10 connections, but
// never more than max_connections.
.map(|limit| match limit {
0..10 => limit,
10..30 => 10,
30..300 => limit / 3,
300.. => 100,
})
// If we didn't find max_connections, default to 10 concurrent connections.
.unwrap_or(10)
} else {
// state == Running
// Because the cluster is already in the Running state, we should assume users are
// already connected to the cluster, and high concurrency could negatively
// impact user connectivity. Therefore, we can limit concurrency to the number of
// reserved superuser connections, which users wouldn't be able to use anyway.
spec.cluster
.settings
.find("superuser_reserved_connections")
.iter()
.filter_map(|val| val.parse::<usize>().ok())
.map(|val| if val > 1 { val - 1 } else { 1 })
.next_back()
.unwrap_or(3)
}
}
}
#[derive(Clone)]
pub enum DB {
SystemDB,
UserDB(Database),
}
impl DB {
pub fn new(db: Database) -> DB {
Self::UserDB(db)
}
pub fn is_owned_by(&self, role: &PgIdent) -> bool {
match self {
DB::SystemDB => false,
DB::UserDB(db) => &db.owner == role,
}
}
}
impl Debug for DB {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
DB::SystemDB => f.debug_tuple("SystemDB").finish(),
DB::UserDB(db) => f.debug_tuple("UserDB").field(&db.name).finish(),
}
}
}
#[derive(Copy, Clone, Debug)]
pub enum PerDatabasePhase {
DeleteDBRoleReferences,
ChangeSchemaPerms,
/// This is a shared phase, used for both i) dropping dangling LR subscriptions
/// before dropping the DB, and ii) dropping all subscriptions after creating
/// a fresh branch.
/// N.B. we will skip all DBs that are not present in Postgres, invalid, or
/// have `datallowconn = false` (`restrict_conn`).
DropLogicalSubscriptions,
}
#[derive(Clone, Debug)]
pub enum ApplySpecPhase {
CreatePrivilegedRole,
// BEGIN_HADRON
CreateDatabricksRoles,
AlterDatabricksRoles,
// END_HADRON
DropInvalidDatabases,
RenameRoles,
CreateAndAlterRoles,
RenameAndDeleteDatabases,
CreateAndAlterDatabases,
CreateSchemaNeon,
RunInEachDatabase { db: DB, subphase: PerDatabasePhase },
CreatePgauditExtension,
CreatePgauditlogtofileExtension,
DisablePostgresDBPgAudit,
HandleOtherExtensions,
HandleNeonExtension,
// BEGIN_HADRON
HandleDatabricksAuthExtension,
// END_HADRON
CreateAvailabilityCheck,
// BEGIN_HADRON
AddDatabricksGrants,
CreateDatabricksMisc,
// END_HADRON
DropRoles,
FinalizeDropLogicalSubscriptions,
}
pub struct Operation {
pub query: String,
pub comment: Option<String>,
}
pub struct MutableApplyContext {
pub roles: HashMap<String, Role>,
pub dbs: HashMap<String, Database>,
}
/// Apply the operations that belong to the given spec apply phase.
///
/// Commands within a single phase are executed in order of Iterator yield.
/// Commands of ApplySpecPhase::RunInEachDatabase will execute in the database
/// indicated by its `db` field, and can share a single client for all changes
/// to that database.
///
/// Notes:
/// - Commands are pipelined, and thus may cause incomplete apply if one
/// command of many fails.
/// - Failing commands will fail the phase's apply step once the return value
/// is processed.
/// - No timeouts have (yet) been implemented.
/// - The caller is responsible for limiting and/or applying concurrency.
pub async fn apply_operations<'a, Fut, F>(
params: Arc<ComputeNodeParams>,
spec: Arc<ComputeSpec>,
ctx: Arc<RwLock<MutableApplyContext>>,
jwks_roles: Arc<HashSet<String>>,
apply_spec_phase: ApplySpecPhase,
client: F,
lakebase_mode: bool,
) -> Result<()>
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<&'a Client>>,
{
debug!("Starting phase {:?}", &apply_spec_phase);
let span = info_span!("db_apply_changes", phase=?apply_spec_phase);
let span2 = span.clone();
async move {
debug!("Processing phase {:?}", &apply_spec_phase);
let ctx = ctx;
let mut ops = get_operations(¶ms, &spec, &ctx, &jwks_roles, &apply_spec_phase)
.await?
.peekable();
// Return (and by doing so, skip requesting the PostgreSQL client) if
// we don't have any operations scheduled.
if ops.peek().is_none() {
return Ok(());
}
let client = client().await?;
debug!("Applying phase {:?}", &apply_spec_phase);
let active_queries = ops
.map(|op| {
let Operation { comment, query } = op;
let inspan = match comment {
None => span.clone(),
Some(comment) => info_span!("phase {}: {}", comment),
};
async {
let query = query;
let res = client.simple_query(&query).await;
debug!(
"{} {}",
if res.is_ok() {
"successfully executed"
} else {
"failed to execute"
},
query
);
if !lakebase_mode {
return res;
}
// BEGIN HADRON
if let Err(e) = res.as_ref() {
if let Some(sql_state) = e.code() {
if sql_state.code() == "57014" {
// SQL State 57014 (ERRCODE_QUERY_CANCELED) is used for statement timeouts.
// Increment the counter whenever a statement timeout occurs. Timeouts on
// this configuration path can only occur due to PS connectivity problems that
// Postgres failed to recover from.
COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS.inc();
}
}
}
// END HADRON
res
}
.instrument(inspan)
})
.collect::<Vec<_>>();
drop(ctx);
for it in join_all(active_queries).await {
drop(it?);
}
debug!("Completed phase {:?}", &apply_spec_phase);
Ok(())
}
.instrument(span2)
.await
}
/// Create a stream of operations to be executed for that phase of applying
/// changes.
///
/// In the future we may generate a single stream of changes and then
/// sort/merge/batch execution, but for now this is a nice way to improve
/// batching behavior of the commands.
async fn get_operations<'a>(
params: &'a ComputeNodeParams,
spec: &'a ComputeSpec,
ctx: &'a RwLock<MutableApplyContext>,
jwks_roles: &'a HashSet<String>,
apply_spec_phase: &'a ApplySpecPhase,
) -> Result<Box<dyn Iterator<Item = Operation> + 'a + Send>> {
match apply_spec_phase {
ApplySpecPhase::CreatePrivilegedRole => Ok(Box::new(once(Operation {
query: format!(
include_str!("sql/create_privileged_role.sql"),
privileged_role_name = params.privileged_role_name,
privileges = if params.lakebase_mode {
"CREATEDB CREATEROLE NOLOGIN BYPASSRLS"
} else {
"CREATEDB CREATEROLE NOLOGIN REPLICATION BYPASSRLS"
}
),
comment: None,
}))),
// BEGIN_HADRON
// New Hadron phase
ApplySpecPhase::CreateDatabricksRoles => {
let queries = create_databricks_roles();
let operations = queries.into_iter().map(|query| Operation {
query,
comment: None,
});
Ok(Box::new(operations))
}
// Backfill existing databricks_reader_* roles with statement timeout from GUC
ApplySpecPhase::AlterDatabricksRoles => {
let query = String::from(include_str!(
"sql/alter_databricks_reader_roles_timeout.sql"
));
let operations = once(Operation {
query,
comment: Some(
"Backfill existing databricks_reader_* roles with statement timeout"
.to_string(),
),
});
Ok(Box::new(operations))
}
// End of new Hadron Phase
// END_HADRON
ApplySpecPhase::DropInvalidDatabases => {
let mut ctx = ctx.write().await;
let databases = &mut ctx.dbs;
let keys: Vec<_> = databases
.iter()
.filter(|(_, db)| db.invalid)
.map(|(dbname, _)| dbname.clone())
.collect();
// After recent commit in Postgres, interrupted DROP DATABASE
// leaves the database in the invalid state. According to the
// commit message, the only option for user is to drop it again.
// See:
// https://github.com/postgres/postgres/commit/a4b4cc1d60f7e8ccfcc8ff8cb80c28ee411ad9a9
//
// Postgres Neon extension is done the way, that db is de-registered
// in the control plane metadata only after it is dropped. So there is
// a chance that it still thinks that the db should exist. This means
// that it will be re-created by the `CreateDatabases` phase. This
// is fine, as user can just drop the table again (in vanilla
// Postgres they would need to do the same).
let operations = keys
.into_iter()
.filter_map(move |dbname| ctx.dbs.remove(&dbname))
.map(|db| Operation {
query: format!("DROP DATABASE IF EXISTS {}", db.name.pg_quote()),
comment: Some(format!("Dropping invalid database {}", db.name)),
});
Ok(Box::new(operations))
}
ApplySpecPhase::RenameRoles => {
let mut ctx = ctx.write().await;
let operations = spec
.delta_operations
.iter()
.flatten()
.filter(|op| op.action == "rename_role")
.filter_map(move |op| {
let roles = &mut ctx.roles;
if roles.contains_key(op.name.as_str()) {
None
} else {
let new_name = op.new_name.as_ref().unwrap();
let mut role = roles.remove(op.name.as_str()).unwrap();
role.name = new_name.clone();
role.encrypted_password = None;
roles.insert(role.name.clone(), role);
Some(Operation {
query: format!(
"ALTER ROLE {} RENAME TO {}",
op.name.pg_quote(),
new_name.pg_quote()
),
comment: Some(format!("renaming role '{}' to '{}'", op.name, new_name)),
})
}
});
Ok(Box::new(operations))
}
ApplySpecPhase::CreateAndAlterRoles => {
let mut ctx = ctx.write().await;
let operations = spec.cluster.roles
.iter()
.filter_map(move |role| {
let roles = &mut ctx.roles;
let db_role = roles.get(&role.name);
match db_role {
Some(db_role) => {
if db_role.encrypted_password != role.encrypted_password {
// This can be run on /every/ role! Not just ones created through the console.
// This means that if you add some funny ALTER here that adds a permission,
// this will get run even on user-created roles! This will result in different
// behavior before and after a spec gets reapplied. The below ALTER as it stands
// now only grants LOGIN and changes the password. Please do not allow this branch
// to do anything silly.
Some(Operation {
query: format!(
"ALTER ROLE {} {}",
role.name.pg_quote(),
role.to_pg_options(),
),
comment: None,
})
} else {
None
}
}
None => {
let query = if !jwks_roles.contains(role.name.as_str()) {
format!(
"CREATE ROLE {} INHERIT CREATEROLE CREATEDB BYPASSRLS REPLICATION IN ROLE {} {}",
role.name.pg_quote(),
params.privileged_role_name,
role.to_pg_options(),
)
} else {
format!(
"CREATE ROLE {} {}",
role.name.pg_quote(),
role.to_pg_options(),
)
};
Some(Operation {
query,
comment: Some(format!("creating role {}", role.name)),
})
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/pg_isready.rs | compute_tools/src/pg_isready.rs | use anyhow::{Context, anyhow};
// Run `/usr/local/bin/pg_isready -p {port}`
// Check the connectivity of PG
// Success means PG is listening on the port and accepting connections
// Note that PG does not need to authenticate the connection, nor reserve a connection quota for it.
// See https://www.postgresql.org/docs/current/app-pg-isready.html
pub fn pg_isready(bin: &str, port: u16) -> anyhow::Result<()> {
let child_result = std::process::Command::new(bin)
.arg("-p")
.arg(port.to_string())
.spawn();
child_result
.context("spawn() failed")
.and_then(|mut child| child.wait().context("wait() failed"))
.and_then(|status| match status.success() {
true => Ok(()),
false => Err(anyhow!("process exited with {status}")),
})
// wrap any prior error with the overall context that we couldn't run the command
.with_context(|| format!("could not run `{bin} --port {port}`"))
}
// It's safe to assume pg_isready is under the same directory with postgres,
// because it is a PG util bin installed along with postgres
pub fn get_pg_isready_bin(pgbin: &str) -> String {
let split = pgbin.split("/").collect::<Vec<&str>>();
split[0..split.len() - 1].join("/") + "/pg_isready"
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/pgbouncer.rs | compute_tools/src/pgbouncer.rs | pub const PGBOUNCER_PIDFILE: &str = "/tmp/pgbouncer.pid";
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/local_proxy.rs | compute_tools/src/local_proxy.rs | //! Local Proxy is a feature of our BaaS Neon Authorize project.
//!
//! Local Proxy validates JWTs and manages the pg_session_jwt extension.
//! It also maintains a connection pool to postgres.
use anyhow::{Context, Result};
use camino::Utf8Path;
use compute_api::spec::LocalProxySpec;
use nix::sys::signal::Signal;
use utils::pid_file::{self, PidFileRead};
pub fn configure(local_proxy: &LocalProxySpec) -> Result<()> {
write_local_proxy_conf("/etc/local_proxy/config.json".as_ref(), local_proxy)?;
notify_local_proxy("/etc/local_proxy/pid".as_ref())?;
Ok(())
}
/// Create or completely rewrite configuration file specified by `path`
fn write_local_proxy_conf(path: &Utf8Path, local_proxy: &LocalProxySpec) -> Result<()> {
let config =
serde_json::to_string_pretty(local_proxy).context("serializing LocalProxySpec to json")?;
std::fs::write(path, config).with_context(|| format!("writing {path}"))?;
Ok(())
}
/// Notify local proxy about a new config file.
fn notify_local_proxy(path: &Utf8Path) -> Result<()> {
match pid_file::read(path)? {
// if the file doesn't exist, or isn't locked, local_proxy isn't running
// and will naturally pick up our config later
PidFileRead::NotExist | PidFileRead::NotHeldByAnyProcess(_) => {}
PidFileRead::LockedByOtherProcess(pid) => {
// From the pid_file docs:
//
// > 1. The other process might exit at any time, turning the given PID stale.
// > 2. There is a small window in which `claim_for_current_process` has already
// > locked the file but not yet updates its contents. [`read`] will return
// > this variant here, but with the old file contents, i.e., a stale PID.
// >
// > The kernel is free to recycle PID once it has been `wait(2)`ed upon by
// > its creator. Thus, acting upon a stale PID, e.g., by issuing a `kill`
// > system call on it, bears the risk of killing an unrelated process.
// > This is an inherent limitation of using pidfiles.
// > The only race-free solution is to have a supervisor-process with a lifetime
// > that exceeds that of all of its child-processes (e.g., `runit`, `supervisord`).
//
// This is an ok risk as we only send a SIGHUP which likely won't actually
// kill the process, only reload config.
nix::sys::signal::kill(pid, Signal::SIGHUP).context("sending signal to local_proxy")?;
}
}
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/compute_promote.rs | compute_tools/src/compute_promote.rs | use crate::compute::ComputeNode;
use anyhow::{Context, bail};
use compute_api::responses::{LfcPrewarmState, PromoteConfig, PromoteState};
use std::time::Instant;
use tracing::info;
impl ComputeNode {
/// Returns only when promote fails or succeeds. If http client calling this function
/// disconnects, this does not stop promotion, and subsequent calls block until promote finishes.
/// Called by control plane on secondary after primary endpoint is terminated
/// Has a failpoint "compute-promotion"
pub async fn promote(self: &std::sync::Arc<Self>, cfg: PromoteConfig) -> PromoteState {
let this = self.clone();
let promote_fn = async move || match this.promote_impl(cfg).await {
Ok(state) => state,
Err(err) => {
tracing::error!(%err, "promoting replica");
let error = format!("{err:#}");
PromoteState::Failed { error }
}
};
let start_promotion = || {
let (tx, rx) = tokio::sync::watch::channel(PromoteState::NotPromoted);
tokio::spawn(async move { tx.send(promote_fn().await) });
rx
};
let mut task;
// promote_impl locks self.state so we need to unlock it before calling task.changed()
{
let promote_state = &mut self.state.lock().unwrap().promote_state;
task = promote_state.get_or_insert_with(start_promotion).clone()
}
if task.changed().await.is_err() {
let error = "promote sender dropped".to_string();
return PromoteState::Failed { error };
}
task.borrow().clone()
}
async fn promote_impl(&self, cfg: PromoteConfig) -> anyhow::Result<PromoteState> {
{
let state = self.state.lock().unwrap();
let mode = &state.pspec.as_ref().unwrap().spec.mode;
if *mode != compute_api::spec::ComputeMode::Replica {
bail!("compute mode \"{}\" is not replica", mode.to_type_str());
}
match &state.lfc_prewarm_state {
status @ (LfcPrewarmState::NotPrewarmed | LfcPrewarmState::Prewarming) => {
bail!("compute {status}")
}
LfcPrewarmState::Failed { error } => {
tracing::warn!(%error, "compute prewarm failed")
}
_ => {}
}
}
let client = ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
.await
.context("connecting to postgres")?;
let mut now = Instant::now();
let primary_lsn = cfg.wal_flush_lsn;
let mut standby_lsn = utils::lsn::Lsn::INVALID;
const RETRIES: i32 = 20;
for i in 0..=RETRIES {
let row = client
.query_one("SELECT pg_catalog.pg_last_wal_replay_lsn()", &[])
.await
.context("getting last replay lsn")?;
let lsn: u64 = row.get::<usize, postgres_types::PgLsn>(0).into();
standby_lsn = lsn.into();
if standby_lsn >= primary_lsn {
break;
}
info!(%standby_lsn, %primary_lsn, "catching up, try {i}");
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
}
if standby_lsn < primary_lsn {
bail!("didn't catch up with primary in {RETRIES} retries");
}
let lsn_wait_time_ms = now.elapsed().as_millis() as u32;
now = Instant::now();
// using $1 doesn't work with ALTER SYSTEM SET
let safekeepers_sql = format!(
"ALTER SYSTEM SET neon.safekeepers='{}'",
cfg.spec.safekeeper_connstrings.join(",")
);
client
.query(&safekeepers_sql, &[])
.await
.context("setting safekeepers")?;
client
.query(
"ALTER SYSTEM SET synchronous_standby_names=walproposer",
&[],
)
.await
.context("setting synchronous_standby_names")?;
client
.query("SELECT pg_catalog.pg_reload_conf()", &[])
.await
.context("reloading postgres config")?;
#[cfg(feature = "testing")]
fail::fail_point!("compute-promotion", |_| bail!(
"compute-promotion failpoint"
));
let row = client
.query_one("SELECT * FROM pg_catalog.pg_promote()", &[])
.await
.context("pg_promote")?;
if !row.get::<usize, bool>(0) {
bail!("pg_promote() failed");
}
let pg_promote_time_ms = now.elapsed().as_millis() as u32;
let now = Instant::now();
let row = client
.query_one("SHOW transaction_read_only", &[])
.await
.context("getting transaction_read_only")?;
if row.get::<usize, &str>(0) == "on" {
bail!("replica in read only mode after promotion");
}
// Already checked validity in http handler
#[allow(unused_mut)]
let mut new_pspec = crate::compute::ParsedSpec::try_from(cfg.spec).expect("invalid spec");
{
let mut state = self.state.lock().unwrap();
// Local setup has different ports for pg process (port=) for primary and secondary.
// Primary is stopped so we need secondary's "port" value
#[cfg(feature = "testing")]
{
let old_spec = &state.pspec.as_ref().unwrap().spec;
let Some(old_conf) = old_spec.cluster.postgresql_conf.as_ref() else {
bail!("pspec.spec.cluster.postgresql_conf missing for endpoint");
};
let set: std::collections::HashMap<&str, &str> = old_conf
.split_terminator('\n')
.map(|e| e.split_once("=").expect("invalid item"))
.collect();
let Some(new_conf) = new_pspec.spec.cluster.postgresql_conf.as_mut() else {
bail!("pspec.spec.cluster.postgresql_conf missing for supplied config");
};
new_conf.push_str(&format!("port={}\n", set["port"]));
}
tracing::debug!("applied spec: {:#?}", new_pspec.spec);
if self.params.lakebase_mode {
ComputeNode::set_spec(&self.params, &mut state, new_pspec);
} else {
state.pspec = Some(new_pspec);
}
}
info!("applied new spec, reconfiguring as primary");
self.reconfigure()?;
let reconfigure_time_ms = now.elapsed().as_millis() as u32;
Ok(PromoteState::Completed {
lsn_wait_time_ms,
pg_promote_time_ms,
reconfigure_time_ms,
})
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/sync_sk.rs | compute_tools/src/sync_sk.rs | // Utils for running sync_safekeepers
use anyhow::Result;
use tracing::info;
use utils::lsn::Lsn;
#[derive(Copy, Clone, Debug)]
pub enum TimelineStatusResponse {
NotFound,
Ok(TimelineStatusOkResponse),
}
#[derive(Copy, Clone, Debug)]
pub struct TimelineStatusOkResponse {
flush_lsn: Lsn,
commit_lsn: Lsn,
}
/// Get a safekeeper's metadata for our timeline. The id is only used for logging
pub async fn ping_safekeeper(
id: String,
config: tokio_postgres::Config,
) -> Result<TimelineStatusResponse> {
// TODO add retries
// Connect
info!("connecting to {}", id);
let (client, conn) = config.connect(tokio_postgres::NoTls).await?;
tokio::spawn(async move {
if let Err(e) = conn.await {
eprintln!("connection error: {e}");
}
});
// Query
info!("querying {}", id);
let result = client.simple_query("TIMELINE_STATUS").await?;
// Parse result
info!("done with {}", id);
if let postgres::SimpleQueryMessage::Row(row) = &result[0] {
use std::str::FromStr;
let response = TimelineStatusResponse::Ok(TimelineStatusOkResponse {
flush_lsn: Lsn::from_str(row.get("flush_lsn").unwrap())?,
commit_lsn: Lsn::from_str(row.get("commit_lsn").unwrap())?,
});
Ok(response)
} else {
// Timeline doesn't exist
Ok(TimelineStatusResponse::NotFound)
}
}
/// Given a quorum of responses, check if safekeepers are synced at some Lsn
pub fn check_if_synced(responses: Vec<TimelineStatusResponse>) -> Option<Lsn> {
// Check if all responses are ok
let ok_responses: Vec<TimelineStatusOkResponse> = responses
.iter()
.filter_map(|r| match r {
TimelineStatusResponse::Ok(ok_response) => Some(ok_response),
_ => None,
})
.cloned()
.collect();
if ok_responses.len() < responses.len() {
info!(
"not synced. Only {} out of {} know about this timeline",
ok_responses.len(),
responses.len()
);
return None;
}
// Get the min and the max of everything
let commit: Vec<Lsn> = ok_responses.iter().map(|r| r.commit_lsn).collect();
let flush: Vec<Lsn> = ok_responses.iter().map(|r| r.flush_lsn).collect();
let commit_max = commit.iter().max().unwrap();
let commit_min = commit.iter().min().unwrap();
let flush_max = flush.iter().max().unwrap();
let flush_min = flush.iter().min().unwrap();
// Check that all values are equal
if commit_min != commit_max {
info!("not synced. {:?} {:?}", commit_min, commit_max);
return None;
}
if flush_min != flush_max {
info!("not synced. {:?} {:?}", flush_min, flush_max);
return None;
}
// Check that commit == flush
if commit_max != flush_max {
info!("not synced. {:?} {:?}", commit_max, flush_max);
return None;
}
Some(*commit_max)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/checker.rs | compute_tools/src/checker.rs | use anyhow::{Ok, Result, anyhow};
use tokio_postgres::NoTls;
use tracing::{error, instrument, warn};
use crate::compute::ComputeNode;
/// Update timestamp in a row in a special service table to check
/// that we can actually write some data in this particular timeline.
#[instrument(skip_all)]
pub async fn check_writability(compute: &ComputeNode) -> Result<()> {
// Connect to the database.
let conf = compute.get_tokio_conn_conf(Some("compute_ctl:availability_checker"));
let (client, connection) = conf.connect(NoTls).await?;
if client.is_closed() {
return Err(anyhow!("connection to postgres closed"));
}
// The connection object performs the actual communication with the database,
// so spawn it off to run on its own.
tokio::spawn(async move {
if let Err(e) = connection.await {
error!("connection error: {}", e);
}
});
let query = "
INSERT INTO public.health_check VALUES (1, pg_catalog.now())
ON CONFLICT (id) DO UPDATE
SET updated_at = pg_catalog.now();";
match client.simple_query(query).await {
Result::Ok(result) => {
if result.len() != 1 {
return Err(anyhow::anyhow!(
"expected 1 query results, but got {}",
result.len()
));
}
}
Err(err) => {
if let Some(state) = err.code() {
if state == &tokio_postgres::error::SqlState::DISK_FULL {
warn!("Tenant disk is full");
return Ok(());
}
}
return Err(err.into());
}
}
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/monitor.rs | compute_tools/src/monitor.rs | use std::sync::Arc;
use std::thread;
use std::time::Duration;
use chrono::{DateTime, Utc};
use compute_api::responses::ComputeStatus;
use compute_api::spec::ComputeFeature;
use postgres::{Client, NoTls};
use tracing::{Level, error, info, instrument, span};
use crate::compute::ComputeNode;
use crate::metrics::{PG_CURR_DOWNTIME_MS, PG_TOTAL_DOWNTIME_MS};
const PG_DEFAULT_INIT_TIMEOUIT: Duration = Duration::from_secs(60);
const MONITOR_CHECK_INTERVAL: Duration = Duration::from_millis(500);
/// Struct to store runtime state of the compute monitor thread.
/// In theory, this could be a part of `Compute`, but i)
/// this state is expected to be accessed only by single thread,
/// so we don't need to care about locking; ii) `Compute` is
/// already quite big. Thus, it seems to be a good idea to keep
/// all the activity/health monitoring parts here.
struct ComputeMonitor {
compute: Arc<ComputeNode>,
/// The moment when Postgres had some activity,
/// that should prevent compute from being suspended.
last_active: Option<DateTime<Utc>>,
/// The moment when we last tried to check Postgres.
last_checked: DateTime<Utc>,
/// The last moment we did a successful Postgres check.
last_up: DateTime<Utc>,
/// Only used for internal statistics change tracking
/// between monitor runs and can be outdated.
active_time: Option<f64>,
/// Only used for internal statistics change tracking
/// between monitor runs and can be outdated.
sessions: Option<i64>,
/// Use experimental statistics-based activity monitor. It's no longer
/// 'experimental' per se, as it's enabled for everyone, but we still
/// keep the flag as an option to turn it off in some cases if it will
/// misbehave.
experimental: bool,
}
impl ComputeMonitor {
fn report_down(&self) {
let now = Utc::now();
// Calculate and report current downtime
// (since the last time Postgres was up)
let downtime = now.signed_duration_since(self.last_up);
PG_CURR_DOWNTIME_MS.set(downtime.num_milliseconds() as f64);
// Calculate and update total downtime
// (cumulative duration of Postgres downtime in ms)
let inc = now
.signed_duration_since(self.last_checked)
.num_milliseconds();
PG_TOTAL_DOWNTIME_MS.inc_by(inc as u64);
}
fn report_up(&mut self) {
self.last_up = Utc::now();
PG_CURR_DOWNTIME_MS.set(0.0);
}
fn downtime_info(&self) -> String {
format!(
"total_ms: {}, current_ms: {}, last_up: {}",
PG_TOTAL_DOWNTIME_MS.get(),
PG_CURR_DOWNTIME_MS.get(),
self.last_up
)
}
/// Check if compute is in some terminal or soon-to-be-terminal
/// state, then return `true`, signalling the caller that it
/// should exit gracefully. Otherwise, return `false`.
fn check_interrupts(&mut self) -> bool {
let compute_status = self.compute.get_status();
if matches!(
compute_status,
ComputeStatus::Terminated
| ComputeStatus::TerminationPendingFast
| ComputeStatus::TerminationPendingImmediate
| ComputeStatus::Failed
) {
info!(
"compute is in {} status, stopping compute monitor",
compute_status
);
return true;
}
false
}
/// Spin in a loop and figure out the last activity time in the Postgres.
/// Then update it in the shared state. This function currently never
/// errors out explicitly, but there is a graceful termination path.
/// Every time we receive an error trying to check Postgres, we use
/// [`ComputeMonitor::check_interrupts()`] because it could be that
/// compute is being terminated already, then we can exit gracefully
/// to not produce errors' noise in the log.
/// NB: the only expected panic is at `Mutex` unwrap(), all other errors
/// should be handled gracefully.
#[instrument(skip_all)]
pub fn run(&mut self) -> anyhow::Result<()> {
// Suppose that `connstr` doesn't change
let connstr = self.compute.params.connstr.clone();
let conf = self
.compute
.get_conn_conf(Some("compute_ctl:compute_monitor"));
// During startup and configuration we connect to every Postgres database,
// but we don't want to count this as some user activity. So wait until
// the compute fully started before monitoring activity.
wait_for_postgres_start(&self.compute);
// Define `client` outside of the loop to reuse existing connection if it's active.
let mut client = conf.connect(NoTls);
info!("starting compute monitor for {}", connstr);
loop {
if self.check_interrupts() {
break;
}
match &mut client {
Ok(cli) => {
if cli.is_closed() {
info!(
downtime_info = self.downtime_info(),
"connection to Postgres is closed, trying to reconnect"
);
if self.check_interrupts() {
break;
}
self.report_down();
// Connection is closed, reconnect and try again.
client = conf.connect(NoTls);
} else {
match self.check(cli) {
Ok(_) => {
self.report_up();
self.compute.update_last_active(self.last_active);
}
Err(e) => {
error!(
downtime_info = self.downtime_info(),
"could not check Postgres: {}", e
);
if self.check_interrupts() {
break;
}
// Although we have many places where we can return errors in `check()`,
// normally it shouldn't happen. I.e., we will likely return error if
// connection got broken, query timed out, Postgres returned invalid data, etc.
// In all such cases it's suspicious, so let's report this as downtime.
self.report_down();
// Reconnect to Postgres just in case. During tests, I noticed
// that queries in `check()` can fail with `connection closed`,
// but `cli.is_closed()` above doesn't detect it. Even if old
// connection is still alive, it will be dropped when we reassign
// `client` to a new connection.
client = conf.connect(NoTls);
}
}
}
}
Err(e) => {
info!(
downtime_info = self.downtime_info(),
"could not connect to Postgres: {}, retrying", e
);
if self.check_interrupts() {
break;
}
self.report_down();
// Establish a new connection and try again.
client = conf.connect(NoTls);
}
}
// Reset the `last_checked` timestamp and sleep before the next iteration.
self.last_checked = Utc::now();
thread::sleep(MONITOR_CHECK_INTERVAL);
}
// Graceful termination path
Ok(())
}
#[instrument(skip_all)]
fn check(&mut self, cli: &mut Client) -> anyhow::Result<()> {
// This is new logic, only enable if the feature flag is set.
// TODO: remove this once we are sure that it works OR drop it altogether.
if self.experimental {
// Check if the total active time or sessions across all databases has changed.
// If it did, it means that user executed some queries. In theory, it can even go down if
// some databases were dropped, but it's still user activity.
match get_database_stats(cli) {
Ok((active_time, sessions)) => {
let mut detected_activity = false;
if let Some(prev_active_time) = self.active_time {
if active_time != prev_active_time {
detected_activity = true;
}
}
self.active_time = Some(active_time);
if let Some(prev_sessions) = self.sessions {
if sessions != prev_sessions {
detected_activity = true;
}
}
self.sessions = Some(sessions);
if detected_activity {
// Update the last active time and continue, we don't need to
// check backends state change.
self.last_active = Some(Utc::now());
return Ok(());
}
}
Err(e) => {
return Err(anyhow::anyhow!("could not get database statistics: {}", e));
}
}
}
// If database statistics are the same, check all backends for state changes.
// Maybe there are some with more recent activity. `get_backends_state_change()`
// can return None or stale timestamp, so it's `compute.update_last_active()`
// responsibility to check if the new timestamp is more recent than the current one.
// This helps us to discover new sessions that have not done anything yet.
match get_backends_state_change(cli) {
Ok(last_active) => match (last_active, self.last_active) {
(Some(last_active), Some(prev_last_active)) => {
if last_active > prev_last_active {
self.last_active = Some(last_active);
return Ok(());
}
}
(Some(last_active), None) => {
self.last_active = Some(last_active);
return Ok(());
}
_ => {}
},
Err(e) => {
return Err(anyhow::anyhow!(
"could not get backends state change: {}",
e
));
}
}
// If there are existing (logical) walsenders, do not suspend.
//
// N.B. walproposer doesn't currently show up in pg_stat_replication,
// but protect if it will.
const WS_COUNT_QUERY: &str =
"select count(*) from pg_stat_replication where application_name != 'walproposer';";
match cli.query_one(WS_COUNT_QUERY, &[]) {
Ok(r) => match r.try_get::<&str, i64>("count") {
Ok(num_ws) => {
if num_ws > 0 {
self.last_active = Some(Utc::now());
return Ok(());
}
}
Err(e) => {
let err: anyhow::Error = e.into();
return Err(err.context("failed to parse walsenders count"));
}
},
Err(e) => {
return Err(anyhow::anyhow!("failed to get list of walsenders: {}", e));
}
}
// Don't suspend compute if there is an active logical replication subscription
//
// `where pid is not null` – to filter out read only computes and subscription on branches
const LOGICAL_SUBSCRIPTIONS_QUERY: &str =
"select count(*) from pg_stat_subscription where pid is not null;";
match cli.query_one(LOGICAL_SUBSCRIPTIONS_QUERY, &[]) {
Ok(row) => match row.try_get::<&str, i64>("count") {
Ok(num_subscribers) => {
if num_subscribers > 0 {
self.last_active = Some(Utc::now());
return Ok(());
}
}
Err(e) => {
return Err(anyhow::anyhow!(
"failed to parse 'pg_stat_subscription' count: {}",
e
));
}
},
Err(e) => {
return Err(anyhow::anyhow!(
"failed to get list of active logical replication subscriptions: {}",
e
));
}
}
// Do not suspend compute if autovacuum is running
const AUTOVACUUM_COUNT_QUERY: &str =
"select count(*) from pg_stat_activity where backend_type = 'autovacuum worker'";
match cli.query_one(AUTOVACUUM_COUNT_QUERY, &[]) {
Ok(r) => match r.try_get::<&str, i64>("count") {
Ok(num_workers) => {
if num_workers > 0 {
self.last_active = Some(Utc::now());
return Ok(());
};
}
Err(e) => {
return Err(anyhow::anyhow!(
"failed to parse autovacuum workers count: {}",
e
));
}
},
Err(e) => {
return Err(anyhow::anyhow!(
"failed to get list of autovacuum workers: {}",
e
));
}
}
Ok(())
}
}
// Hang on condition variable waiting until the compute status is `Running`.
fn wait_for_postgres_start(compute: &ComputeNode) {
let mut state = compute.state.lock().unwrap();
let pg_init_timeout = compute
.params
.pg_init_timeout
.unwrap_or(PG_DEFAULT_INIT_TIMEOUIT);
while state.status != ComputeStatus::Running {
info!("compute is not running, waiting before monitoring activity");
if !compute.params.lakebase_mode {
state = compute.state_changed.wait(state).unwrap();
if state.status == ComputeStatus::Running {
break;
}
continue;
}
if state.pg_start_time.is_some()
&& Utc::now()
.signed_duration_since(state.pg_start_time.unwrap())
.to_std()
.unwrap_or_default()
> pg_init_timeout
{
// If Postgres isn't up and running with working PS/SK connections within POSTGRES_STARTUP_TIMEOUT, it is
// possible that we started Postgres with a wrong spec (so it is talking to the wrong PS/SK nodes). To prevent
// deadends we simply exit (panic) the compute node so it can restart with the latest spec.
//
// NB: We skip this check if we have not attempted to start PG yet (indicated by state.pg_start_up == None).
// This is to make sure the more appropriate errors are surfaced if we encounter issues before we even attempt
// to start PG (e.g., if we can't pull the spec, can't sync safekeepers, or can't get the basebackup).
error!(
"compute did not enter Running state in {} seconds, exiting",
pg_init_timeout.as_secs()
);
std::process::exit(1);
}
state = compute
.state_changed
.wait_timeout(state, Duration::from_secs(5))
.unwrap()
.0;
}
}
// Figure out the total active time and sessions across all non-system databases.
// Returned tuple is `(active_time, sessions)`.
// It can return `0.0` active time or `0` sessions, which means no user databases exist OR
// it was a start with skipped `pg_catalog` updates and user didn't do any queries
// (or open any sessions) yet.
fn get_database_stats(cli: &mut Client) -> anyhow::Result<(f64, i64)> {
// Filter out `postgres` database as `compute_ctl` and other monitoring tools
// like `postgres_exporter` use it to query Postgres statistics.
// Use explicit 8 bytes type casts to match Rust types.
let stats = cli.query_one(
"SELECT pg_catalog.coalesce(pg_catalog.sum(active_time), 0.0)::pg_catalog.float8 AS total_active_time,
pg_catalog.coalesce(pg_catalog.sum(sessions), 0)::pg_catalog.bigint AS total_sessions
FROM pg_catalog.pg_stat_database
WHERE datname NOT IN (
'postgres',
'template0',
'template1'
);",
&[],
);
let stats = match stats {
Ok(stats) => stats,
Err(e) => {
return Err(anyhow::anyhow!("could not query active_time: {}", e));
}
};
let active_time: f64 = match stats.try_get("total_active_time") {
Ok(active_time) => active_time,
Err(e) => return Err(anyhow::anyhow!("could not get total_active_time: {}", e)),
};
let sessions: i64 = match stats.try_get("total_sessions") {
Ok(sessions) => sessions,
Err(e) => return Err(anyhow::anyhow!("could not get total_sessions: {}", e)),
};
Ok((active_time, sessions))
}
// Figure out the most recent state change time across all client backends.
// If there is currently active backend, timestamp will be `Utc::now()`.
// It can return `None`, which means no client backends exist or we were
// unable to parse the timestamp.
fn get_backends_state_change(cli: &mut Client) -> anyhow::Result<Option<DateTime<Utc>>> {
let mut last_active: Option<DateTime<Utc>> = None;
// Get all running client backends except ourself, use RFC3339 DateTime format.
let backends = cli.query(
"SELECT state, pg_catalog.to_char(state_change, 'YYYY-MM-DD\"T\"HH24:MI:SS.US\"Z\"'::pg_catalog.text) AS state_change
FROM pg_stat_activity
WHERE backend_type OPERATOR(pg_catalog.=) 'client backend'::pg_catalog.text
AND pid OPERATOR(pg_catalog.!=) pg_catalog.pg_backend_pid()
AND usename OPERATOR(pg_catalog.!=) 'cloud_admin'::pg_catalog.name;", // XXX: find a better way to filter other monitors?
&[],
);
match backends {
Ok(backs) => {
let mut idle_backs: Vec<DateTime<Utc>> = vec![];
for b in backs.into_iter() {
let state: String = match b.try_get("state") {
Ok(state) => state,
Err(_) => continue,
};
if state == "idle" {
let change: String = match b.try_get("state_change") {
Ok(state_change) => state_change,
Err(_) => continue,
};
let change = DateTime::parse_from_rfc3339(&change);
match change {
Ok(t) => idle_backs.push(t.with_timezone(&Utc)),
Err(e) => {
info!("cannot parse backend state_change DateTime: {}", e);
continue;
}
}
} else {
// Found non-idle backend, so the last activity is NOW.
// Return immediately, no need to check other backends.
return Ok(Some(Utc::now()));
}
}
// Get idle backend `state_change` with the max timestamp.
if let Some(last) = idle_backs.iter().max() {
last_active = Some(*last);
}
}
Err(e) => {
return Err(anyhow::anyhow!("could not query backends: {}", e));
}
}
Ok(last_active)
}
/// Launch a separate compute monitor thread and return its `JoinHandle`.
pub fn launch_monitor(compute: &Arc<ComputeNode>) -> thread::JoinHandle<()> {
let compute = Arc::clone(compute);
let experimental = compute.has_feature(ComputeFeature::ActivityMonitorExperimental);
let now = Utc::now();
let mut monitor = ComputeMonitor {
compute,
last_active: None,
last_checked: now,
last_up: now,
active_time: None,
sessions: None,
experimental,
};
thread::Builder::new()
.name("compute-monitor".into())
.spawn(move || {
let span = span!(Level::INFO, "compute_monitor");
let _enter = span.enter();
match monitor.run() {
Ok(_) => info!("compute monitor thread terminated gracefully"),
Err(err) => error!("compute monitor thread terminated abnormally {:?}", err),
}
})
.expect("cannot launch compute monitor thread")
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/rsyslog.rs | compute_tools/src/rsyslog.rs | use std::fs;
use std::io::ErrorKind;
use std::path::Path;
use std::process::Command;
use std::time::Duration;
use std::{fs::OpenOptions, io::Write};
use url::{Host, Url};
use anyhow::{Context, Result, anyhow};
use hostname_validator;
use tracing::{error, info, instrument, warn};
const POSTGRES_LOGS_CONF_PATH: &str = "/etc/rsyslog.d/postgres_logs.conf";
fn get_rsyslog_pid() -> Option<String> {
let output = Command::new("pgrep")
.arg("rsyslogd")
.output()
.expect("Failed to execute pgrep");
if !output.stdout.is_empty() {
let pid = std::str::from_utf8(&output.stdout)
.expect("Invalid UTF-8 in process output")
.trim()
.to_string();
Some(pid)
} else {
None
}
}
fn wait_for_rsyslog_pid() -> Result<String, anyhow::Error> {
const MAX_WAIT: Duration = Duration::from_secs(5);
const INITIAL_SLEEP: Duration = Duration::from_millis(2);
let mut sleep_duration = INITIAL_SLEEP;
let start = std::time::Instant::now();
let mut attempts = 1;
for attempt in 1.. {
attempts = attempt;
match get_rsyslog_pid() {
Some(pid) => return Ok(pid),
None => {
if start.elapsed() >= MAX_WAIT {
break;
}
info!(
"rsyslogd is not running, attempt {}. Sleeping for {} ms",
attempt,
sleep_duration.as_millis()
);
std::thread::sleep(sleep_duration);
sleep_duration *= 2;
}
}
}
Err(anyhow::anyhow!(
"rsyslogd is not running after waiting for {} seconds and {} attempts",
attempts,
start.elapsed().as_secs()
))
}
// Restart rsyslogd to apply the new configuration.
// This is necessary, because there is no other way to reload the rsyslog configuration.
//
// Rsyslogd shouldn't lose any messages, because of the restart,
// because it tracks the last read position in the log files
// and will continue reading from that position.
// TODO: test it properly
//
fn restart_rsyslog() -> Result<()> {
// kill it to restart
let _ = Command::new("pkill")
.arg("rsyslogd")
.output()
.context("Failed to restart rsyslogd")?;
// ensure rsyslogd is running
wait_for_rsyslog_pid()?;
Ok(())
}
fn parse_audit_syslog_address(
remote_plain_endpoint: &str,
remote_tls_endpoint: &str,
) -> Result<(String, u16, String)> {
let tls;
let remote_endpoint = if !remote_tls_endpoint.is_empty() {
tls = "true".to_string();
remote_tls_endpoint
} else {
tls = "false".to_string();
remote_plain_endpoint
};
// Urlify the remote_endpoint, so parsing can be done with url::Url.
let url_str = format!("http://{remote_endpoint}");
let url = Url::parse(&url_str).map_err(|err| {
anyhow!("Error parsing {remote_endpoint}, expected host:port, got {err:?}")
})?;
let is_valid = url.scheme() == "http"
&& url.path() == "/"
&& url.query().is_none()
&& url.fragment().is_none()
&& url.username() == ""
&& url.password().is_none();
if !is_valid {
return Err(anyhow!(
"Invalid address format {remote_endpoint}, expected host:port"
));
}
let host = match url.host() {
Some(Host::Domain(h)) if hostname_validator::is_valid(h) => h.to_string(),
Some(Host::Ipv4(ip4)) => ip4.to_string(),
Some(Host::Ipv6(ip6)) => ip6.to_string(),
_ => return Err(anyhow!("Invalid host")),
};
let port = url
.port()
.ok_or_else(|| anyhow!("Invalid port in {remote_endpoint}"))?;
Ok((host, port, tls))
}
fn generate_audit_rsyslog_config(
log_directory: String,
endpoint_id: &str,
project_id: &str,
remote_syslog_host: &str,
remote_syslog_port: u16,
remote_syslog_tls: &str,
) -> String {
format!(
include_str!("config_template/compute_audit_rsyslog_template.conf"),
log_directory = log_directory,
endpoint_id = endpoint_id,
project_id = project_id,
remote_syslog_host = remote_syslog_host,
remote_syslog_port = remote_syslog_port,
remote_syslog_tls = remote_syslog_tls
)
}
pub fn configure_audit_rsyslog(
log_directory: String,
endpoint_id: &str,
project_id: &str,
remote_endpoint: &str,
remote_tls_endpoint: &str,
) -> Result<()> {
let (remote_syslog_host, remote_syslog_port, remote_syslog_tls) =
parse_audit_syslog_address(remote_endpoint, remote_tls_endpoint).unwrap();
let config_content = generate_audit_rsyslog_config(
log_directory,
endpoint_id,
project_id,
&remote_syslog_host,
remote_syslog_port,
&remote_syslog_tls,
);
info!("rsyslog config_content: {}", config_content);
let rsyslog_conf_path = "/etc/rsyslog.d/compute_audit_rsyslog.conf";
let mut file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(rsyslog_conf_path)?;
file.write_all(config_content.as_bytes())?;
info!(
"rsyslog configuration file {} added successfully. Starting rsyslogd",
rsyslog_conf_path
);
// start the service, using the configuration
restart_rsyslog()?;
Ok(())
}
/// Configuration for enabling Postgres logs forwarding from rsyslogd
pub struct PostgresLogsRsyslogConfig<'a> {
pub host: Option<&'a str>,
}
impl<'a> PostgresLogsRsyslogConfig<'a> {
pub fn new(host: Option<&'a str>) -> Self {
Self { host }
}
pub fn build(&self) -> Result<String> {
match self.host {
Some(host) => {
if let Some((target, port)) = host.split_once(":") {
Ok(format!(
include_str!(
"config_template/compute_rsyslog_postgres_export_template.conf"
),
logs_export_target = target,
logs_export_port = port,
))
} else {
Err(anyhow!("Invalid host format for Postgres logs export"))
}
}
None => Ok("".to_string()),
}
}
fn current_config() -> Result<String> {
let config_content = match std::fs::read_to_string(POSTGRES_LOGS_CONF_PATH) {
Ok(c) => c,
Err(err) if err.kind() == ErrorKind::NotFound => String::new(),
Err(err) => return Err(err.into()),
};
Ok(config_content)
}
}
/// Writes rsyslogd configuration for Postgres logs export and restarts rsyslog.
pub fn configure_postgres_logs_export(conf: PostgresLogsRsyslogConfig) -> Result<()> {
let new_config = conf.build()?;
let current_config = PostgresLogsRsyslogConfig::current_config()?;
if new_config == current_config {
info!("postgres logs rsyslog configuration is up-to-date");
return Ok(());
}
// Nothing to configure
if new_config.is_empty() {
// When the configuration is removed, PostgreSQL will stop sending data
// to the files watched by rsyslog, so restarting rsyslog is more effort
// than just ignoring this change.
return Ok(());
}
info!(
"configuring rsyslog for postgres logs export to: {:?}",
conf.host
);
let mut file = OpenOptions::new()
.create(true)
.write(true)
.truncate(true)
.open(POSTGRES_LOGS_CONF_PATH)?;
file.write_all(new_config.as_bytes())?;
info!(
"rsyslog configuration file {} added successfully. Starting rsyslogd",
POSTGRES_LOGS_CONF_PATH
);
restart_rsyslog()?;
Ok(())
}
#[instrument(skip_all)]
async fn pgaudit_gc_main_loop(log_directory: String) -> Result<()> {
info!("running pgaudit GC main loop");
loop {
// Check log_directory for old pgaudit logs and delete them.
// New log files are checked every 5 minutes, as set in pgaudit.log_rotation_age
// Find files that were not modified in the last 15 minutes and delete them.
// This should be enough time for rsyslog to process the logs and for us to catch the alerts.
//
// In case of a very high load, we might need to adjust this value and pgaudit.log_rotation_age.
//
// TODO: add some smarter logic to delete the files that are fully streamed according to rsyslog
// imfile-state files, but for now just do a simple GC to avoid filling up the disk.
let _ = Command::new("find")
.arg(&log_directory)
.arg("-name")
.arg("audit*.log")
.arg("-mmin")
.arg("+15")
.arg("-delete")
.output()?;
// also collect the metric for the size of the log directory
async fn get_log_files_size(path: &Path) -> Result<u64> {
let mut total_size = 0;
for entry in fs::read_dir(path)? {
let entry = entry?;
let entry_path = entry.path();
if entry_path.is_file() && entry_path.to_string_lossy().ends_with("log") {
total_size += entry.metadata()?.len();
}
}
Ok(total_size)
}
let log_directory_size = get_log_files_size(Path::new(&log_directory))
.await
.unwrap_or_else(|e| {
warn!("Failed to get log directory size: {}", e);
0
});
crate::metrics::AUDIT_LOG_DIR_SIZE.set(log_directory_size as f64);
tokio::time::sleep(Duration::from_secs(60)).await;
}
}
// launch pgaudit GC thread to clean up the old pgaudit logs stored in the log_directory
pub fn launch_pgaudit_gc(log_directory: String) {
tokio::spawn(async move {
if let Err(e) = pgaudit_gc_main_loop(log_directory).await {
error!("pgaudit GC main loop failed: {}", e);
}
});
}
#[cfg(test)]
mod tests {
use crate::rsyslog::PostgresLogsRsyslogConfig;
use super::{generate_audit_rsyslog_config, parse_audit_syslog_address};
#[test]
fn test_postgres_logs_config() {
{
// Verify empty config
let conf = PostgresLogsRsyslogConfig::new(None);
let res = conf.build();
assert!(res.is_ok());
let conf_str = res.unwrap();
assert_eq!(&conf_str, "");
}
{
// Verify config
let conf = PostgresLogsRsyslogConfig::new(Some("collector.cvc.local:514"));
let res = conf.build();
assert!(res.is_ok());
let conf_str = res.unwrap();
assert!(conf_str.contains("omfwd"));
assert!(conf_str.contains(r#"target="collector.cvc.local""#));
assert!(conf_str.contains(r#"port="514""#));
}
{
// Verify invalid config
let conf = PostgresLogsRsyslogConfig::new(Some("invalid"));
let res = conf.build();
assert!(res.is_err());
}
}
#[test]
fn test_parse_audit_syslog_address() {
{
// host:port format (plaintext)
let parsed = parse_audit_syslog_address("collector.host.tld:5555", "");
assert!(parsed.is_ok());
assert_eq!(
parsed.unwrap(),
(
String::from("collector.host.tld"),
5555,
String::from("false")
)
);
}
{
// host:port format with ipv4 ip address (plaintext)
let parsed = parse_audit_syslog_address("10.0.0.1:5555", "");
assert!(parsed.is_ok());
assert_eq!(
parsed.unwrap(),
(String::from("10.0.0.1"), 5555, String::from("false"))
);
}
{
// host:port format with ipv6 ip address (plaintext)
let parsed =
parse_audit_syslog_address("[7e60:82ed:cb2e:d617:f904:f395:aaca:e252]:5555", "");
assert_eq!(
parsed.unwrap(),
(
String::from("7e60:82ed:cb2e:d617:f904:f395:aaca:e252"),
5555,
String::from("false")
)
);
}
{
// Only TLS host:port defined
let parsed = parse_audit_syslog_address("", "tls.host.tld:5556");
assert_eq!(
parsed.unwrap(),
(String::from("tls.host.tld"), 5556, String::from("true"))
);
}
{
// tls host should take precedence, when both defined
let parsed = parse_audit_syslog_address("plaintext.host.tld:5555", "tls.host.tld:5556");
assert_eq!(
parsed.unwrap(),
(String::from("tls.host.tld"), 5556, String::from("true"))
);
}
{
// host without port (plaintext)
let parsed = parse_audit_syslog_address("collector.host.tld", "");
assert!(parsed.is_err());
}
{
// port without host
let parsed = parse_audit_syslog_address(":5555", "");
assert!(parsed.is_err());
}
{
// valid host with invalid port
let parsed = parse_audit_syslog_address("collector.host.tld:90001", "");
assert!(parsed.is_err());
}
{
// invalid hostname with valid port
let parsed = parse_audit_syslog_address("-collector.host.tld:5555", "");
assert!(parsed.is_err());
}
{
// parse error
let parsed = parse_audit_syslog_address("collector.host.tld:::5555", "");
assert!(parsed.is_err());
}
}
#[test]
fn test_generate_audit_rsyslog_config() {
{
// plaintext version
let log_directory = "/tmp/log".to_string();
let endpoint_id = "ep-test-endpoint-id";
let project_id = "test-project-id";
let remote_syslog_host = "collector.host.tld";
let remote_syslog_port = 5555;
let remote_syslog_tls = "false";
let conf_str = generate_audit_rsyslog_config(
log_directory,
endpoint_id,
project_id,
remote_syslog_host,
remote_syslog_port,
remote_syslog_tls,
);
assert!(conf_str.contains(r#"set $.remote_syslog_tls = "false";"#));
assert!(conf_str.contains(r#"type="omfwd""#));
assert!(conf_str.contains(r#"target="collector.host.tld""#));
assert!(conf_str.contains(r#"port="5555""#));
assert!(conf_str.contains(r#"StreamDriverPermittedPeers="collector.host.tld""#));
}
{
// TLS version
let log_directory = "/tmp/log".to_string();
let endpoint_id = "ep-test-endpoint-id";
let project_id = "test-project-id";
let remote_syslog_host = "collector.host.tld";
let remote_syslog_port = 5556;
let remote_syslog_tls = "true";
let conf_str = generate_audit_rsyslog_config(
log_directory,
endpoint_id,
project_id,
remote_syslog_host,
remote_syslog_port,
remote_syslog_tls,
);
assert!(conf_str.contains(r#"set $.remote_syslog_tls = "true";"#));
assert!(conf_str.contains(r#"type="omfwd""#));
assert!(conf_str.contains(r#"target="collector.host.tld""#));
assert!(conf_str.contains(r#"port="5556""#));
assert!(conf_str.contains(r#"StreamDriverPermittedPeers="collector.host.tld""#));
}
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/communicator_socket_client.rs | compute_tools/src/communicator_socket_client.rs | //! Client for making request to a running Postgres server's communicator control socket.
//!
//! The storage communicator process that runs inside Postgres exposes an HTTP endpoint in
//! a Unix Domain Socket in the Postgres data directory. This provides access to it.
use std::path::Path;
use anyhow::Context;
use hyper::client::conn::http1::SendRequest;
use hyper_util::rt::TokioIo;
/// Name of the socket within the Postgres data directory. This better match that in
/// `pgxn/neon/communicator/src/lib.rs`.
const NEON_COMMUNICATOR_SOCKET_NAME: &str = "neon-communicator.socket";
/// Open a connection to the communicator's control socket, prepare to send requests to it
/// with hyper.
pub async fn connect_communicator_socket<B>(pgdata: &Path) -> anyhow::Result<SendRequest<B>>
where
B: hyper::body::Body + 'static + Send,
B::Data: Send,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
let socket_path = pgdata.join(NEON_COMMUNICATOR_SOCKET_NAME);
let socket_path_len = socket_path.display().to_string().len();
// There is a limit of around 100 bytes (108 on Linux?) on the length of the path to a
// Unix Domain socket. The limit is on the connect(2) function used to open the
// socket, not on the absolute path itself. Postgres changes the current directory to
// the data directory and uses a relative path to bind to the socket, and the relative
// path "./neon-communicator.socket" is always short, but when compute_ctl needs to
// open the socket, we need to use a full path, which can be arbitrarily long.
//
// There are a few ways we could work around this:
//
// 1. Change the current directory to the Postgres data directory and use a relative
// path in the connect(2) call. That's problematic because the current directory
// applies to the whole process. We could change the current directory early in
// compute_ctl startup, and that might be a good idea anyway for other reasons too:
// it would be more robust if the data directory is moved around or unlinked for
// some reason, and you would be less likely to accidentally litter other parts of
// the filesystem with e.g. temporary files. However, that's a pretty invasive
// change.
//
// 2. On Linux, you could open() the data directory, and refer to the the socket
// inside it as "/proc/self/fd/<fd>/neon-communicator.socket". But that's
// Linux-only.
//
// 3. Create a symbolic link to the socket with a shorter path, and use that.
//
// We use the symbolic link approach here. Hopefully the paths we use in production
// are shorter, so that we can open the socket directly, so that this hack is needed
// only in development.
let connect_result = if socket_path_len < 100 {
// We can open the path directly with no hacks.
tokio::net::UnixStream::connect(socket_path).await
} else {
// The path to the socket is too long. Create a symlink to it with a shorter path.
let short_path = std::env::temp_dir().join(format!(
"compute_ctl.short-socket.{}.{}",
std::process::id(),
tokio::task::id()
));
std::os::unix::fs::symlink(&socket_path, &short_path)?;
// Delete the symlink as soon as we have connected to it. There's a small chance
// of leaking if the process dies before we remove it, so try to keep that window
// as small as possible.
scopeguard::defer! {
if let Err(err) = std::fs::remove_file(&short_path) {
tracing::warn!("could not remove symlink \"{}\" created for socket: {}",
short_path.display(), err);
}
}
tracing::info!(
"created symlink \"{}\" for socket \"{}\", opening it now",
short_path.display(),
socket_path.display()
);
tokio::net::UnixStream::connect(&short_path).await
};
let stream = connect_result.context("connecting to communicator control socket")?;
let io = TokioIo::new(stream);
let (request_sender, connection) = hyper::client::conn::http1::handshake(io).await?;
// spawn a task to poll the connection and drive the HTTP state
tokio::spawn(async move {
if let Err(err) = connection.await {
eprintln!("Error in connection: {err}");
}
});
Ok(request_sender)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/installed_extensions.rs | compute_tools/src/installed_extensions.rs | use std::collections::HashMap;
use anyhow::Result;
use compute_api::responses::{InstalledExtension, InstalledExtensions};
use once_cell::sync::Lazy;
use tokio_postgres::error::Error as PostgresError;
use tokio_postgres::{Client, Config, NoTls};
use crate::metrics::INSTALLED_EXTENSIONS;
/// We don't reuse get_existing_dbs() just for code clarity
/// and to make database listing query here more explicit.
///
/// Limit the number of databases to 500 to avoid excessive load.
async fn list_dbs(client: &mut Client) -> Result<Vec<String>, PostgresError> {
// `pg_database.datconnlimit = -2` means that the database is in the
// invalid state
let databases = client
.query(
"SELECT datname FROM pg_catalog.pg_database
WHERE datallowconn
AND datconnlimit OPERATOR(pg_catalog.<>) (OPERATOR(pg_catalog.-) 2::pg_catalog.int4)
LIMIT 500",
&[],
)
.await?
.iter()
.map(|row| {
let db: String = row.get("datname");
db
})
.collect();
Ok(databases)
}
/// Connect to every database (see list_dbs above) and get the list of installed extensions.
///
/// Same extension can be installed in multiple databases with different versions,
/// so we report a separate metric (number of databases where it is installed)
/// for each extension version.
pub async fn get_installed_extensions(
mut conf: Config,
) -> Result<InstalledExtensions, PostgresError> {
conf.application_name("compute_ctl:get_installed_extensions");
let databases: Vec<String> = {
let (mut client, connection) = conf.connect(NoTls).await?;
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {e}");
}
});
list_dbs(&mut client).await?
};
let mut extensions_map: HashMap<(String, String, String), InstalledExtension> = HashMap::new();
for db in databases.iter() {
conf.dbname(db);
let (client, connection) = conf.connect(NoTls).await?;
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {e}");
}
});
let extensions: Vec<(String, String, i32)> = client
.query(
"SELECT extname, extversion, extowner::pg_catalog.int4 FROM pg_catalog.pg_extension",
&[],
)
.await?
.iter()
.map(|row| {
(
row.get("extname"),
row.get("extversion"),
row.get("extowner"),
)
})
.collect();
for (extname, v, extowner) in extensions.iter() {
let version = v.to_string();
// check if the extension is owned by superuser
// 10 is the oid of superuser
let owned_by_superuser = if *extowner == 10 { "1" } else { "0" };
extensions_map
.entry((
extname.to_string(),
version.clone(),
owned_by_superuser.to_string(),
))
.and_modify(|e| {
// count the number of databases where the extension is installed
e.n_databases += 1;
})
.or_insert(InstalledExtension {
extname: extname.to_string(),
version: version.clone(),
n_databases: 1,
owned_by_superuser: owned_by_superuser.to_string(),
});
}
}
for (key, ext) in extensions_map.iter() {
let (extname, version, owned_by_superuser) = key;
let n_databases = ext.n_databases as u64;
INSTALLED_EXTENSIONS
.with_label_values(&[extname, version, owned_by_superuser])
.set(n_databases);
}
Ok(InstalledExtensions {
extensions: extensions_map.into_values().collect(),
})
}
pub fn initialize_metrics() {
Lazy::force(&INSTALLED_EXTENSIONS);
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/disk_quota.rs | compute_tools/src/disk_quota.rs | use anyhow::Context;
use tracing::instrument;
pub const DISK_QUOTA_BIN: &str = "/neonvm/bin/set-disk-quota";
/// If size_bytes is 0, it disables the quota. Otherwise, it sets filesystem quota to size_bytes.
/// `fs_mountpoint` should point to the mountpoint of the filesystem where the quota should be set.
#[instrument]
pub fn set_disk_quota(size_bytes: u64, fs_mountpoint: &str) -> anyhow::Result<()> {
let size_kb = size_bytes / 1024;
// run `/neonvm/bin/set-disk-quota {size_kb} {mountpoint}`
let child_result = std::process::Command::new("/usr/bin/sudo")
.arg(DISK_QUOTA_BIN)
.arg(size_kb.to_string())
.arg(fs_mountpoint)
.spawn();
child_result
.context("spawn() failed")
.and_then(|mut child| child.wait().context("wait() failed"))
.and_then(|status| match status.success() {
true => Ok(()),
false => Err(anyhow::anyhow!("process exited with {status}")),
})
// wrap any prior error with the overall context that we couldn't run the command
.with_context(|| format!("could not run `/usr/bin/sudo {DISK_QUOTA_BIN}`"))
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/hadron_metrics.rs | compute_tools/src/hadron_metrics.rs | use metrics::{
IntCounter, IntGaugeVec, core::Collector, proto::MetricFamily, register_int_counter,
register_int_gauge_vec,
};
use once_cell::sync::Lazy;
// Counter keeping track of the number of PageStream request errors reported by Postgres.
// An error is registered every time Postgres calls compute_ctl's /refresh_configuration API.
// Postgres will invoke this API if it detected trouble with PageStream requests (get_page@lsn,
// get_base_backup, etc.) it sends to any pageserver. An increase in this counter value typically
// indicates Postgres downtime, as PageStream requests are critical for Postgres to function.
pub static POSTGRES_PAGESTREAM_REQUEST_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"pg_cctl_pagestream_request_errors_total",
"Number of PageStream request errors reported by the postgres process"
)
.expect("failed to define a metric")
});
// Counter keeping track of the number of compute configuration errors due to Postgres statement
// timeouts. An error is registered every time `ComputeNode::reconfigure()` fails due to Postgres
// error code 57014 (query cancelled). This statement timeout typically occurs when postgres is
// stuck in a problematic retry loop when the PS is reject its connection requests (usually due
// to PG pointing at the wrong PS). We should investigate the root cause when this counter value
// increases by checking PG and PS logs.
pub static COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"pg_cctl_configure_statement_timeout_errors_total",
"Number of compute configuration errors due to Postgres statement timeouts."
)
.expect("failed to define a metric")
});
pub static COMPUTE_ATTACHED: Lazy<IntGaugeVec> = Lazy::new(|| {
register_int_gauge_vec!(
"pg_cctl_attached",
"Compute node attached status (1 if attached)",
&[
"pg_compute_id",
"pg_instance_id",
"tenant_id",
"timeline_id"
]
)
.expect("failed to define a metric")
});
pub fn collect() -> Vec<MetricFamily> {
let mut metrics = Vec::new();
metrics.extend(POSTGRES_PAGESTREAM_REQUEST_ERRORS.collect());
metrics.extend(COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS.collect());
metrics.extend(COMPUTE_ATTACHED.collect());
metrics
}
pub fn initialize_metrics() {
Lazy::force(&POSTGRES_PAGESTREAM_REQUEST_ERRORS);
Lazy::force(&COMPUTE_CONFIGURE_STATEMENT_TIMEOUT_ERRORS);
Lazy::force(&COMPUTE_ATTACHED);
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/metrics.rs | compute_tools/src/metrics.rs | use metrics::core::{AtomicF64, AtomicU64, Collector, GenericCounter, GenericGauge};
use metrics::proto::MetricFamily;
use metrics::{
IntCounter, IntCounterVec, IntGaugeVec, UIntGaugeVec, register_gauge, register_int_counter,
register_int_counter_vec, register_int_gauge_vec, register_uint_gauge_vec,
};
use once_cell::sync::Lazy;
pub(crate) static INSTALLED_EXTENSIONS: Lazy<UIntGaugeVec> = Lazy::new(|| {
register_uint_gauge_vec!(
"compute_installed_extensions",
"Number of databases where the version of extension is installed",
&["extension_name", "version", "owned_by_superuser"]
)
.expect("failed to define a metric")
});
// Normally, any HTTP API request is described by METHOD (e.g. GET, POST, etc.) + PATH,
// but for all our APIs we defined a 'slug'/method/operationId in the OpenAPI spec.
// And it's fair to call it a 'RPC' (Remote Procedure Call).
pub enum CPlaneRequestRPC {
GetConfig,
}
impl CPlaneRequestRPC {
pub fn as_str(&self) -> &str {
match self {
CPlaneRequestRPC::GetConfig => "GetConfig",
}
}
}
pub const UNKNOWN_HTTP_STATUS: &str = "unknown";
pub(crate) static CPLANE_REQUESTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
"compute_ctl_cplane_requests_total",
"Total number of control plane requests made by compute_ctl by status",
&["rpc", "http_status"]
)
.expect("failed to define a metric")
});
/// Total number of failed database migrations. Per-compute, this is actually a boolean metric,
/// either empty or with a single value (1, migration_id) because we stop at the first failure.
/// Yet, the sum over the fleet will provide the total number of failures.
pub(crate) static DB_MIGRATION_FAILED: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
"compute_ctl_db_migration_failed_total",
"Total number of failed database migrations",
&["migration_id"]
)
.expect("failed to define a metric")
});
pub(crate) static REMOTE_EXT_REQUESTS_TOTAL: Lazy<IntCounterVec> = Lazy::new(|| {
register_int_counter_vec!(
"compute_ctl_remote_ext_requests_total",
"Total number of requests made by compute_ctl to download extensions from S3 proxy by status",
&["http_status", "filename"]
)
.expect("failed to define a metric")
});
// Size of audit log directory in bytes
pub(crate) static AUDIT_LOG_DIR_SIZE: Lazy<GenericGauge<AtomicF64>> = Lazy::new(|| {
register_gauge!(
"compute_audit_log_dir_size",
"Size of audit log directory in bytes",
)
.expect("failed to define a metric")
});
// Report that `compute_ctl` is up and what's the current compute status.
pub(crate) static COMPUTE_CTL_UP: Lazy<IntGaugeVec> = Lazy::new(|| {
register_int_gauge_vec!(
"compute_ctl_up",
"Whether compute_ctl is running",
&["build_tag", "status"]
)
.expect("failed to define a metric")
});
pub(crate) static PG_CURR_DOWNTIME_MS: Lazy<GenericGauge<AtomicF64>> = Lazy::new(|| {
register_gauge!(
"compute_pg_current_downtime_ms",
"Non-cumulative duration of Postgres downtime in ms; resets after successful check",
)
.expect("failed to define a metric")
});
pub(crate) static PG_TOTAL_DOWNTIME_MS: Lazy<GenericCounter<AtomicU64>> = Lazy::new(|| {
register_int_counter!(
"compute_pg_downtime_ms_total",
"Cumulative duration of Postgres downtime in ms",
)
.expect("failed to define a metric")
});
pub(crate) static LFC_PREWARMS: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"compute_ctl_lfc_prewarms_total",
"Total number of LFC prewarms requested by compute_ctl or autoprewarm option",
)
.expect("failed to define a metric")
});
pub(crate) static LFC_PREWARM_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"compute_ctl_lfc_prewarm_errors_total",
"Total number of LFC prewarm errors",
)
.expect("failed to define a metric")
});
pub(crate) static LFC_OFFLOADS: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"compute_ctl_lfc_offloads_total",
"Total number of LFC offloads requested by compute_ctl or lfc_offload_period_seconds option",
)
.expect("failed to define a metric")
});
pub(crate) static LFC_OFFLOAD_ERRORS: Lazy<IntCounter> = Lazy::new(|| {
register_int_counter!(
"compute_ctl_lfc_offload_errors_total",
"Total number of LFC offload errors",
)
.expect("failed to define a metric")
});
pub fn collect() -> Vec<MetricFamily> {
let mut metrics = COMPUTE_CTL_UP.collect();
metrics.extend(INSTALLED_EXTENSIONS.collect());
metrics.extend(CPLANE_REQUESTS_TOTAL.collect());
metrics.extend(REMOTE_EXT_REQUESTS_TOTAL.collect());
metrics.extend(DB_MIGRATION_FAILED.collect());
metrics.extend(AUDIT_LOG_DIR_SIZE.collect());
metrics.extend(PG_CURR_DOWNTIME_MS.collect());
metrics.extend(PG_TOTAL_DOWNTIME_MS.collect());
metrics.extend(LFC_PREWARMS.collect());
metrics.extend(LFC_PREWARM_ERRORS.collect());
metrics.extend(LFC_OFFLOADS.collect());
metrics.extend(LFC_OFFLOAD_ERRORS.collect());
metrics
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/compute_prewarm.rs | compute_tools/src/compute_prewarm.rs | use crate::compute::ComputeNode;
use anyhow::{Context, Result, bail};
use async_compression::tokio::bufread::{ZstdDecoder, ZstdEncoder};
use compute_api::responses::LfcOffloadState;
use compute_api::responses::LfcPrewarmState;
use http::StatusCode;
use reqwest::Client;
use std::mem::replace;
use std::sync::Arc;
use std::time::Instant;
use tokio::{io::AsyncReadExt, select, spawn};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
/// A pair of url and a token to query endpoint storage for LFC prewarm-related tasks
struct EndpointStoragePair {
url: String,
token: String,
}
const KEY: &str = "lfc_state";
impl EndpointStoragePair {
/// endpoint_id is set to None while prewarming from other endpoint, see compute_promote.rs
/// If not None, takes precedence over pspec.spec.endpoint_id
fn from_spec_and_endpoint(
pspec: &crate::compute::ParsedSpec,
endpoint_id: Option<String>,
) -> Result<Self> {
let endpoint_id = endpoint_id.as_ref().or(pspec.spec.endpoint_id.as_ref());
let Some(ref endpoint_id) = endpoint_id else {
bail!("pspec.endpoint_id missing, other endpoint_id not provided")
};
let Some(ref base_uri) = pspec.endpoint_storage_addr else {
bail!("pspec.endpoint_storage_addr missing")
};
let tenant_id = pspec.tenant_id;
let timeline_id = pspec.timeline_id;
let url = format!("http://{base_uri}/{tenant_id}/{timeline_id}/{endpoint_id}/{KEY}");
let Some(ref token) = pspec.endpoint_storage_token else {
bail!("pspec.endpoint_storage_token missing")
};
let token = token.clone();
Ok(EndpointStoragePair { url, token })
}
}
impl ComputeNode {
pub async fn lfc_prewarm_state(&self) -> LfcPrewarmState {
self.state.lock().unwrap().lfc_prewarm_state.clone()
}
pub fn lfc_offload_state(&self) -> LfcOffloadState {
self.state.lock().unwrap().lfc_offload_state.clone()
}
/// If there is a prewarm request ongoing, return `false`, `true` otherwise.
/// Has a failpoint "compute-prewarm"
pub fn prewarm_lfc(self: &Arc<Self>, from_endpoint: Option<String>) -> bool {
let token: CancellationToken;
{
let state = &mut self.state.lock().unwrap();
token = state.lfc_prewarm_token.clone();
if let LfcPrewarmState::Prewarming =
replace(&mut state.lfc_prewarm_state, LfcPrewarmState::Prewarming)
{
return false;
}
}
crate::metrics::LFC_PREWARMS.inc();
let this = self.clone();
spawn(async move {
let prewarm_state = match this.prewarm_impl(from_endpoint, token).await {
Ok(state) => state,
Err(err) => {
crate::metrics::LFC_PREWARM_ERRORS.inc();
error!(%err, "could not prewarm LFC");
let error = format!("{err:#}");
LfcPrewarmState::Failed { error }
}
};
let state = &mut this.state.lock().unwrap();
if let LfcPrewarmState::Cancelled = prewarm_state {
state.lfc_prewarm_token = CancellationToken::new();
}
state.lfc_prewarm_state = prewarm_state;
});
true
}
/// from_endpoint: None for endpoint managed by this compute_ctl
fn endpoint_storage_pair(&self, from_endpoint: Option<String>) -> Result<EndpointStoragePair> {
let state = self.state.lock().unwrap();
EndpointStoragePair::from_spec_and_endpoint(state.pspec.as_ref().unwrap(), from_endpoint)
}
/// Request LFC state from endpoint storage and load corresponding pages into Postgres.
async fn prewarm_impl(
&self,
from_endpoint: Option<String>,
token: CancellationToken,
) -> Result<LfcPrewarmState> {
let EndpointStoragePair {
url,
token: storage_token,
} = self.endpoint_storage_pair(from_endpoint)?;
#[cfg(feature = "testing")]
fail::fail_point!("compute-prewarm", |_| bail!("compute-prewarm failpoint"));
info!(%url, "requesting LFC state from endpoint storage");
let mut now = Instant::now();
let request = Client::new().get(&url).bearer_auth(storage_token);
let response = select! {
_ = token.cancelled() => return Ok(LfcPrewarmState::Cancelled),
response = request.send() => response
}
.context("querying endpoint storage")?;
match response.status() {
StatusCode::OK => (),
StatusCode::NOT_FOUND => return Ok(LfcPrewarmState::Skipped),
status => bail!("{status} querying endpoint storage"),
}
let state_download_time_ms = now.elapsed().as_millis() as u32;
now = Instant::now();
let mut uncompressed = Vec::new();
let lfc_state = select! {
_ = token.cancelled() => return Ok(LfcPrewarmState::Cancelled),
lfc_state = response.bytes() => lfc_state
}
.context("getting request body from endpoint storage")?;
let mut decoder = ZstdDecoder::new(lfc_state.iter().as_slice());
select! {
_ = token.cancelled() => return Ok(LfcPrewarmState::Cancelled),
read = decoder.read_to_end(&mut uncompressed) => read
}
.context("decoding LFC state")?;
let uncompress_time_ms = now.elapsed().as_millis() as u32;
now = Instant::now();
let uncompressed_len = uncompressed.len();
info!(%url, "downloaded LFC state, uncompressed size {uncompressed_len}");
// Client connection and prewarm info querying are fast and therefore don't need
// cancellation
let client = ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
.await
.context("connecting to postgres")?;
let pg_token = client.cancel_token();
let params: Vec<&(dyn postgres_types::ToSql + Sync)> = vec![&uncompressed];
select! {
res = client.query_one("select neon.prewarm_local_cache($1)", ¶ms) => res,
_ = token.cancelled() => {
pg_token.cancel_query(postgres::NoTls).await
.context("cancelling neon.prewarm_local_cache()")?;
return Ok(LfcPrewarmState::Cancelled)
}
}
.context("loading LFC state into postgres")
.map(|_| ())?;
let prewarm_time_ms = now.elapsed().as_millis() as u32;
let row = client
.query_one("select * from neon.get_prewarm_info()", &[])
.await
.context("querying prewarm info")?;
let total = row.try_get(0).unwrap_or_default();
let prewarmed = row.try_get(1).unwrap_or_default();
let skipped = row.try_get(2).unwrap_or_default();
Ok(LfcPrewarmState::Completed {
total,
prewarmed,
skipped,
state_download_time_ms,
uncompress_time_ms,
prewarm_time_ms,
})
}
/// If offload request is ongoing, return false, true otherwise
pub fn offload_lfc(self: &Arc<Self>) -> bool {
{
let state = &mut self.state.lock().unwrap().lfc_offload_state;
if matches!(
replace(state, LfcOffloadState::Offloading),
LfcOffloadState::Offloading
) {
return false;
}
}
let cloned = self.clone();
spawn(async move { cloned.offload_lfc_with_state_update().await });
true
}
pub async fn offload_lfc_async(self: &Arc<Self>) {
{
let state = &mut self.state.lock().unwrap().lfc_offload_state;
if matches!(
replace(state, LfcOffloadState::Offloading),
LfcOffloadState::Offloading
) {
return;
}
}
self.offload_lfc_with_state_update().await
}
async fn offload_lfc_with_state_update(&self) {
crate::metrics::LFC_OFFLOADS.inc();
let state = match self.offload_lfc_impl().await {
Ok(state) => state,
Err(err) => {
crate::metrics::LFC_OFFLOAD_ERRORS.inc();
error!(%err, "could not offload LFC");
let error = format!("{err:#}");
LfcOffloadState::Failed { error }
}
};
self.state.lock().unwrap().lfc_offload_state = state;
}
async fn offload_lfc_impl(&self) -> Result<LfcOffloadState> {
let EndpointStoragePair { url, token } = self.endpoint_storage_pair(None)?;
info!(%url, "requesting LFC state from Postgres");
let mut now = Instant::now();
let row = ComputeNode::get_maintenance_client(&self.tokio_conn_conf)
.await
.context("connecting to postgres")?
.query_one("select neon.get_local_cache_state()", &[])
.await
.context("querying LFC state")?;
let state = row
.try_get::<usize, Option<&[u8]>>(0)
.context("deserializing LFC state")?;
let Some(state) = state else {
info!(%url, "empty LFC state, not exporting");
return Ok(LfcOffloadState::Skipped);
};
let state_query_time_ms = now.elapsed().as_millis() as u32;
now = Instant::now();
let mut compressed = Vec::new();
ZstdEncoder::new(state)
.read_to_end(&mut compressed)
.await
.context("compressing LFC state")?;
let compress_time_ms = now.elapsed().as_millis() as u32;
now = Instant::now();
let compressed_len = compressed.len();
info!(%url, "downloaded LFC state, compressed size {compressed_len}");
let request = Client::new().put(url).bearer_auth(token).body(compressed);
let response = request
.send()
.await
.context("writing to endpoint storage")?;
let state_upload_time_ms = now.elapsed().as_millis() as u32;
let status = response.status();
if status != StatusCode::OK {
bail!("request to endpoint storage failed: {status}");
}
Ok(LfcOffloadState::Completed {
compress_time_ms,
state_query_time_ms,
state_upload_time_ms,
})
}
pub fn cancel_prewarm(self: &Arc<Self>) {
self.state.lock().unwrap().lfc_prewarm_token.cancel();
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/pg_helpers.rs | compute_tools/src/pg_helpers.rs | use std::collections::HashMap;
use std::fmt::Write;
use std::fs;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::process::Child;
use std::str::FromStr;
use std::time::{Duration, Instant};
use anyhow::{Result, bail};
use compute_api::responses::TlsConfig;
use compute_api::spec::{
Database, DatabricksSettings, GenericOption, GenericOptions, PgIdent, Role,
};
use futures::StreamExt;
use indexmap::IndexMap;
use ini::Ini;
use notify::{RecursiveMode, Watcher};
use postgres::config::Config;
use tokio::io::AsyncBufReadExt;
use tokio::task::JoinHandle;
use tokio::time::timeout;
use tokio_postgres;
use tokio_postgres::NoTls;
use tracing::{debug, error, info, instrument};
const POSTGRES_WAIT_TIMEOUT: Duration = Duration::from_millis(60 * 1000); // milliseconds
/// Escape a string for including it in a SQL literal.
///
/// Wrapping the result with `E'{}'` or `'{}'` is not required,
/// as it returns a ready-to-use SQL string literal, e.g. `'db'''` or `E'db\\'`.
/// See <https://github.com/postgres/postgres/blob/da98d005cdbcd45af563d0c4ac86d0e9772cd15f/src/backend/utils/adt/quote.c#L47>
/// for the original implementation.
pub fn escape_literal(s: &str) -> String {
let res = s.replace('\'', "''").replace('\\', "\\\\");
if res.contains('\\') {
format!("E'{res}'")
} else {
format!("'{res}'")
}
}
/// Escape a string so that it can be used in postgresql.conf. Wrapping the result
/// with `'{}'` is not required, as it returns a ready-to-use config string.
pub fn escape_conf_value(s: &str) -> String {
let res = s.replace('\'', "''").replace('\\', "\\\\");
format!("'{res}'")
}
pub trait GenericOptionExt {
fn to_pg_option(&self) -> String;
fn to_pg_setting(&self) -> String;
}
impl GenericOptionExt for GenericOption {
/// Represent `GenericOption` as SQL statement parameter.
fn to_pg_option(&self) -> String {
if let Some(val) = &self.value {
match self.vartype.as_ref() {
"string" => format!("{} {}", self.name, escape_literal(val)),
_ => format!("{} {}", self.name, val),
}
} else {
self.name.to_owned()
}
}
/// Represent `GenericOption` as configuration option.
fn to_pg_setting(&self) -> String {
if let Some(val) = &self.value {
match self.vartype.as_ref() {
"string" => format!("{} = {}", self.name, escape_conf_value(val)),
_ => format!("{} = {}", self.name, val),
}
} else {
self.name.to_owned()
}
}
}
pub trait PgOptionsSerialize {
fn as_pg_options(&self) -> String;
fn as_pg_settings(&self) -> String;
}
impl PgOptionsSerialize for GenericOptions {
/// Serialize an optional collection of `GenericOption`'s to
/// Postgres SQL statement arguments.
fn as_pg_options(&self) -> String {
if let Some(ops) = &self {
ops.iter()
.map(|op| op.to_pg_option())
.collect::<Vec<String>>()
.join(" ")
} else {
"".to_string()
}
}
/// Serialize an optional collection of `GenericOption`'s to
/// `postgresql.conf` compatible format.
fn as_pg_settings(&self) -> String {
if let Some(ops) = &self {
ops.iter()
.map(|op| op.to_pg_setting())
.collect::<Vec<String>>()
.join("\n")
+ "\n" // newline after last setting
} else {
"".to_string()
}
}
}
pub trait GenericOptionsSearch {
fn find(&self, name: &str) -> Option<String>;
fn find_ref(&self, name: &str) -> Option<&GenericOption>;
}
impl GenericOptionsSearch for GenericOptions {
/// Lookup option by name
fn find(&self, name: &str) -> Option<String> {
let ops = self.as_ref()?;
let op = ops.iter().find(|s| s.name == name)?;
op.value.clone()
}
/// Lookup option by name, returning ref
fn find_ref(&self, name: &str) -> Option<&GenericOption> {
let ops = self.as_ref()?;
ops.iter().find(|s| s.name == name)
}
}
pub trait RoleExt {
fn to_pg_options(&self) -> String;
}
impl RoleExt for Role {
/// Serialize a list of role parameters into a Postgres-acceptable
/// string of arguments.
fn to_pg_options(&self) -> String {
// XXX: consider putting LOGIN as a default option somewhere higher, e.g. in control-plane.
let mut params: String = self.options.as_pg_options();
params.push_str(" LOGIN");
if let Some(pass) = &self.encrypted_password {
// Some time ago we supported only md5 and treated all encrypted_password as md5.
// Now we also support SCRAM-SHA-256 and to preserve compatibility
// we treat all encrypted_password as md5 unless they starts with SCRAM-SHA-256.
if pass.starts_with("SCRAM-SHA-256") {
write!(params, " PASSWORD '{pass}'")
.expect("String is documented to not to error during write operations");
} else {
write!(params, " PASSWORD 'md5{pass}'")
.expect("String is documented to not to error during write operations");
}
} else {
params.push_str(" PASSWORD NULL");
}
params
}
}
pub trait DatabaseExt {
fn to_pg_options(&self) -> String;
}
impl DatabaseExt for Database {
/// Serialize a list of database parameters into a Postgres-acceptable
/// string of arguments.
/// NB: `TEMPLATE` is actually also an identifier, but so far we only need
/// to use `template0` and `template1`, so it is not a problem. Yet in the future
/// it may require a proper quoting too.
fn to_pg_options(&self) -> String {
let mut params: String = self.options.as_pg_options();
write!(params, " OWNER {}", &self.owner.pg_quote())
.expect("String is documented to not to error during write operations");
params
}
}
pub trait DatabricksSettingsExt {
fn as_pg_settings(&self) -> String;
}
impl DatabricksSettingsExt for DatabricksSettings {
fn as_pg_settings(&self) -> String {
// Postgres GUCs rendered from DatabricksSettings
vec![
// ssl_ca_file
Some(format!(
"ssl_ca_file = '{}'",
self.pg_compute_tls_settings.ca_file
)),
// [Optional] databricks.workspace_url
Some(format!(
"databricks.workspace_url = '{}'",
&self.databricks_workspace_host
)),
// todo(vikas.jain): these are not required anymore as they are moved to static
// conf but keeping these to avoid image mismatch between hcc and pg.
// Once hcc and pg are in sync, we can remove these.
//
// databricks.enable_databricks_identity_login
Some("databricks.enable_databricks_identity_login = true".to_string()),
// databricks.enable_sql_restrictions
Some("databricks.enable_sql_restrictions = true".to_string()),
]
.into_iter()
// Removes `None`s
.flatten()
.collect::<Vec<String>>()
.join("\n")
+ "\n"
}
}
/// Generic trait used to provide quoting / encoding for strings used in the
/// Postgres SQL queries and DATABASE_URL.
pub trait Escaping {
fn pg_quote(&self) -> String;
fn pg_quote_dollar(&self) -> (String, String);
}
impl Escaping for PgIdent {
/// This is intended to mimic Postgres quote_ident(), but for simplicity it
/// always quotes provided string with `""` and escapes every `"`.
/// **Not idempotent**, i.e. if string is already escaped it will be escaped again.
/// N.B. it's not useful for escaping identifiers that are used inside WHERE
/// clause, use `escape_literal()` instead.
fn pg_quote(&self) -> String {
format!("\"{}\"", self.replace('"', "\"\""))
}
/// This helper is intended to be used for dollar-escaping strings for usage
/// inside PL/pgSQL procedures. In addition to dollar-escaping the string,
/// it also returns a tag that is intended to be used inside the outer
/// PL/pgSQL procedure. If you do not need an outer tag, just discard it.
/// Here we somewhat mimic the logic of Postgres' `pg_get_functiondef()`,
/// <https://github.com/postgres/postgres/blob/8b49392b270b4ac0b9f5c210e2a503546841e832/src/backend/utils/adt/ruleutils.c#L2924>
fn pg_quote_dollar(&self) -> (String, String) {
let mut tag: String = "x".to_string();
let mut outer_tag = "xx".to_string();
// Find the first suitable tag that is not present in the string.
// Postgres' max role/DB name length is 63 bytes, so even in the
// worst case it won't take long. Outer tag is always `tag + "x"`,
// so if `tag` is not present in the string, `outer_tag` is not
// present in the string either.
while self.contains(&tag.to_string()) {
tag += "x";
outer_tag = tag.clone() + "x";
}
let escaped = format!("${tag}${self}${tag}$");
(escaped, outer_tag)
}
}
/// Build a list of existing Postgres roles
pub async fn get_existing_roles_async(client: &tokio_postgres::Client) -> Result<Vec<Role>> {
let postgres_roles = client
.query_raw::<str, &String, &[String; 0]>(
"SELECT rolname, rolpassword FROM pg_catalog.pg_authid",
&[],
)
.await?
.filter_map(|row| async { row.ok() })
.map(|row| Role {
name: row.get("rolname"),
encrypted_password: row.get("rolpassword"),
options: None,
})
.collect()
.await;
Ok(postgres_roles)
}
/// Build a list of existing Postgres databases
pub async fn get_existing_dbs_async(
client: &tokio_postgres::Client,
) -> Result<HashMap<String, Database>> {
// `pg_database.datconnlimit = -2` means that the database is in the
// invalid state. See:
// https://github.com/postgres/postgres/commit/a4b4cc1d60f7e8ccfcc8ff8cb80c28ee411ad9a9
let rowstream = client
// We use a subquery instead of a fancy `datdba::regrole::text AS owner`,
// because the latter automatically wraps the result in double quotes,
// if the role name contains special characters.
.query_raw::<str, &String, &[String; 0]>(
"SELECT
datname AS name,
(SELECT rolname FROM pg_catalog.pg_roles WHERE oid OPERATOR(pg_catalog.=) datdba) AS owner,
NOT datallowconn AS restrict_conn,
datconnlimit OPERATOR(pg_catalog.=) (OPERATOR(pg_catalog.-) 2) AS invalid
FROM
pg_catalog.pg_database;",
&[],
)
.await?;
let dbs_map = rowstream
.filter_map(|r| async { r.ok() })
.map(|row| Database {
name: row.get("name"),
owner: row.get("owner"),
restrict_conn: row.get("restrict_conn"),
invalid: row.get("invalid"),
options: None,
})
.map(|db| (db.name.clone(), db.clone()))
.collect::<HashMap<_, _>>()
.await;
Ok(dbs_map)
}
/// Wait for Postgres to become ready to accept connections. It's ready to
/// accept connections when the state-field in `pgdata/postmaster.pid` says
/// 'ready'.
#[instrument(skip_all, fields(pgdata = %pgdata.display()))]
pub fn wait_for_postgres(pg: &mut Child, pgdata: &Path) -> Result<()> {
let pid_path = pgdata.join("postmaster.pid");
// PostgreSQL writes line "ready" to the postmaster.pid file, when it has
// completed initialization and is ready to accept connections. We want to
// react quickly and perform the rest of our initialization as soon as
// PostgreSQL starts accepting connections. Use 'notify' to be notified
// whenever the PID file is changed, and whenever it changes, read it to
// check if it's now "ready".
//
// You cannot actually watch a file before it exists, so we first watch the
// data directory, and once the postmaster.pid file appears, we switch to
// watch the file instead. We also wake up every 100 ms to poll, just in
// case we miss some events for some reason. Not strictly necessary, but
// better safe than sorry.
let (tx, rx) = std::sync::mpsc::channel();
let watcher_res = notify::recommended_watcher(move |res| {
let _ = tx.send(res);
});
let (mut watcher, rx): (Box<dyn Watcher>, _) = match watcher_res {
Ok(watcher) => (Box::new(watcher), rx),
Err(e) => {
match e.kind {
notify::ErrorKind::Io(os) if os.raw_os_error() == Some(38) => {
// docker on m1 macs does not support recommended_watcher
// but return "Function not implemented (os error 38)"
// see https://github.com/notify-rs/notify/issues/423
let (tx, rx) = std::sync::mpsc::channel();
// let's poll it faster than what we check the results for (100ms)
let config =
notify::Config::default().with_poll_interval(Duration::from_millis(50));
let watcher = notify::PollWatcher::new(
move |res| {
let _ = tx.send(res);
},
config,
)?;
(Box::new(watcher), rx)
}
_ => return Err(e.into()),
}
}
};
watcher.watch(pgdata, RecursiveMode::NonRecursive)?;
let started_at = Instant::now();
let mut postmaster_pid_seen = false;
loop {
if let Ok(Some(status)) = pg.try_wait() {
// Postgres exited, that is not what we expected, bail out earlier.
let code = status.code().unwrap_or(-1);
bail!("Postgres exited unexpectedly with code {}", code);
}
let res = rx.recv_timeout(Duration::from_millis(100));
debug!("woken up by notify: {res:?}");
// If there are multiple events in the channel already, we only need to be
// check once. Swallow the extra events before we go ahead to check the
// pid file.
while let Ok(res) = rx.try_recv() {
debug!("swallowing extra event: {res:?}");
}
// Check that we can open pid file first.
if let Ok(file) = File::open(&pid_path) {
if !postmaster_pid_seen {
debug!("postmaster.pid appeared");
watcher
.unwatch(pgdata)
.expect("Failed to remove pgdata dir watch");
watcher
.watch(&pid_path, RecursiveMode::NonRecursive)
.expect("Failed to add postmaster.pid file watch");
postmaster_pid_seen = true;
}
let file = BufReader::new(file);
let last_line = file.lines().last();
// Pid file could be there and we could read it, but it could be empty, for example.
if let Some(Ok(line)) = last_line {
let status = line.trim();
debug!("last line of postmaster.pid: {status:?}");
// Now Postgres is ready to accept connections
if status == "ready" {
break;
}
}
}
// Give up after POSTGRES_WAIT_TIMEOUT.
let duration = started_at.elapsed();
if duration >= POSTGRES_WAIT_TIMEOUT {
bail!("timed out while waiting for Postgres to start");
}
}
tracing::info!("PostgreSQL is now running, continuing to configure it");
Ok(())
}
/// Remove `pgdata` directory and create it again with right permissions.
pub fn create_pgdata(pgdata: &str) -> Result<()> {
// Ignore removal error, likely it is a 'No such file or directory (os error 2)'.
// If it is something different then create_dir() will error out anyway.
let _ok = fs::remove_dir_all(pgdata);
fs::create_dir(pgdata)?;
fs::set_permissions(pgdata, fs::Permissions::from_mode(0o700))?;
Ok(())
}
/// Update pgbouncer.ini with provided options
fn update_pgbouncer_ini(
pgbouncer_config: IndexMap<String, String>,
pgbouncer_ini_path: &str,
) -> Result<()> {
let mut conf = Ini::load_from_file(pgbouncer_ini_path)?;
let section = conf.section_mut(Some("pgbouncer")).unwrap();
for (option_name, value) in pgbouncer_config.iter() {
section.insert(option_name, value);
debug!(
"Updating pgbouncer.ini with new values {}={}",
option_name, value
);
}
conf.write_to_file(pgbouncer_ini_path)?;
Ok(())
}
/// Tune pgbouncer.
/// 1. Apply new config using pgbouncer admin console
/// 2. Add new values to pgbouncer.ini to preserve them after restart
pub async fn tune_pgbouncer(
mut pgbouncer_config: IndexMap<String, String>,
tls_config: Option<TlsConfig>,
) -> Result<()> {
let pgbouncer_connstr = if std::env::var_os("AUTOSCALING").is_some() {
// for VMs use pgbouncer specific way to connect to
// pgbouncer admin console without password
// when pgbouncer is running under the same user.
"host=/tmp port=6432 dbname=pgbouncer user=pgbouncer".to_string()
} else {
// for k8s use normal connection string with password
// to connect to pgbouncer admin console
let mut pgbouncer_connstr =
"host=localhost port=6432 dbname=pgbouncer user=postgres sslmode=disable".to_string();
if let Ok(pass) = std::env::var("PGBOUNCER_PASSWORD") {
pgbouncer_connstr.push_str(format!(" password={pass}").as_str());
}
pgbouncer_connstr
};
info!(
"Connecting to pgbouncer with connection string: {}",
pgbouncer_connstr
);
// connect to pgbouncer, retrying several times
// because pgbouncer may not be ready yet
let mut retries = 3;
let client = loop {
match tokio_postgres::connect(&pgbouncer_connstr, NoTls).await {
Ok((client, connection)) => {
tokio::spawn(async move {
if let Err(e) = connection.await {
eprintln!("connection error: {e}");
}
});
break client;
}
Err(e) => {
if retries == 0 {
return Err(e.into());
}
error!("Failed to connect to pgbouncer: pgbouncer_connstr {}", e);
retries -= 1;
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
};
if let Some(tls_config) = tls_config {
// pgbouncer starts in a half-ok state if it cannot find these files.
// It will default to client_tls_sslmode=deny, which causes proxy to error.
// There is a small window at startup where these files don't yet exist in the VM.
// Best to wait until it exists.
loop {
if let Ok(true) = tokio::fs::try_exists(&tls_config.key_path).await {
break;
}
tokio::time::sleep(Duration::from_millis(500)).await
}
pgbouncer_config.insert("client_tls_cert_file".to_string(), tls_config.cert_path);
pgbouncer_config.insert("client_tls_key_file".to_string(), tls_config.key_path);
pgbouncer_config.insert("client_tls_sslmode".to_string(), "allow".to_string());
}
// save values to pgbouncer.ini
// so that they are preserved after pgbouncer restart
let pgbouncer_ini_path = if std::env::var_os("AUTOSCALING").is_some() {
// in VMs we use /etc/pgbouncer.ini
"/etc/pgbouncer.ini".to_string()
} else {
// in pods we use /var/db/postgres/pgbouncer/pgbouncer.ini
// this is a shared volume between pgbouncer and postgres containers
// FIXME: fix permissions for this file
"/var/db/postgres/pgbouncer/pgbouncer.ini".to_string()
};
update_pgbouncer_ini(pgbouncer_config, &pgbouncer_ini_path)?;
info!("Applying pgbouncer setting change");
if let Err(err) = client.simple_query("RELOAD").await {
// Don't fail on error, just print it into log
error!("Failed to apply pgbouncer setting change, {err}",);
};
Ok(())
}
/// Spawn a task that will read Postgres logs from `stderr`, join multiline logs
/// and send them to the logger. In the future we may also want to add context to
/// these logs.
pub fn handle_postgres_logs(stderr: std::process::ChildStderr) -> JoinHandle<Result<()>> {
tokio::spawn(async move {
let stderr = tokio::process::ChildStderr::from_std(stderr)?;
handle_postgres_logs_async(stderr).await
})
}
/// Read Postgres logs from `stderr` until EOF. Buffer is flushed on one of the following conditions:
/// - next line starts with timestamp
/// - EOF
/// - no new lines were written for the last 100 milliseconds
async fn handle_postgres_logs_async(stderr: tokio::process::ChildStderr) -> Result<()> {
let mut lines = tokio::io::BufReader::new(stderr).lines();
let timeout_duration = Duration::from_millis(100);
let ts_regex =
regex::Regex::new(r"^\d+-\d{2}-\d{2} \d{2}:\d{2}:\d{2}").expect("regex is valid");
let mut buf = vec![];
loop {
let next_line = timeout(timeout_duration, lines.next_line()).await;
// we should flush lines from the buffer if we cannot continue reading multiline message
let should_flush_buf = match next_line {
// Flushing if new line starts with timestamp
Ok(Ok(Some(ref line))) => ts_regex.is_match(line),
// Flushing on EOF, timeout or error
_ => true,
};
if !buf.is_empty() && should_flush_buf {
// join multiline message into a single line, separated by unicode Zero Width Space.
// "PG:" suffix is used to distinguish postgres logs from other logs.
let combined = format!("PG:{}\n", buf.join("\u{200B}"));
buf.clear();
// sync write to stderr to avoid interleaving with other logs
use std::io::Write;
let res = std::io::stderr().lock().write_all(combined.as_bytes());
if let Err(e) = res {
tracing::error!("error while writing to stderr: {}", e);
}
}
// if not timeout, append line to the buffer
if next_line.is_ok() {
match next_line?? {
Some(line) => buf.push(line),
// EOF
None => break,
};
}
}
Ok(())
}
/// `Postgres::config::Config` handles database names with whitespaces
/// and special characters properly.
pub fn postgres_conf_for_db(connstr: &url::Url, dbname: &str) -> Result<Config> {
let mut conf = Config::from_str(connstr.as_str())?;
conf.dbname(dbname);
Ok(conf)
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/lsn_lease.rs | compute_tools/src/lsn_lease.rs | use std::str::FromStr;
use std::sync::Arc;
use std::thread;
use std::time::{Duration, SystemTime};
use anyhow::{Result, bail};
use compute_api::spec::{ComputeMode, PageserverConnectionInfo, PageserverProtocol};
use pageserver_page_api as page_api;
use postgres::{NoTls, SimpleQueryMessage};
use tracing::{info, warn};
use utils::id::{TenantId, TimelineId};
use utils::lsn::Lsn;
use utils::shard::TenantShardId;
use crate::compute::ComputeNode;
/// Spawns a background thread to periodically renew LSN leases for static compute.
/// Do nothing if the compute is not in static mode.
pub fn launch_lsn_lease_bg_task_for_static(compute: &Arc<ComputeNode>) {
let (tenant_id, timeline_id, lsn) = {
let state = compute.state.lock().unwrap();
let spec = state.pspec.as_ref().expect("Spec must be set");
match spec.spec.mode {
ComputeMode::Static(lsn) => (spec.tenant_id, spec.timeline_id, lsn),
_ => return,
}
};
let compute = compute.clone();
let span = tracing::info_span!("lsn_lease_bg_task", %tenant_id, %timeline_id, %lsn);
thread::spawn(move || {
let _entered = span.entered();
if let Err(e) = lsn_lease_bg_task(compute, tenant_id, timeline_id, lsn) {
// TODO: might need stronger error feedback than logging an warning.
warn!("Exited with error: {e}");
}
});
}
/// Renews lsn lease periodically so static compute are not affected by GC.
fn lsn_lease_bg_task(
compute: Arc<ComputeNode>,
tenant_id: TenantId,
timeline_id: TimelineId,
lsn: Lsn,
) -> Result<()> {
loop {
let valid_until = acquire_lsn_lease_with_retry(&compute, tenant_id, timeline_id, lsn)?;
let valid_duration = valid_until
.duration_since(SystemTime::now())
.unwrap_or(Duration::ZERO);
// Sleep for 60 seconds less than the valid duration but no more than half of the valid duration.
let sleep_duration = valid_duration
.saturating_sub(Duration::from_secs(60))
.max(valid_duration / 2);
info!(
"Request succeeded, sleeping for {} seconds",
sleep_duration.as_secs()
);
compute.wait_timeout_while_pageserver_connstr_unchanged(sleep_duration);
}
}
/// Acquires lsn lease in a retry loop. Returns the expiration time if a lease is granted.
/// Returns an error if a lease is explicitly not granted. Otherwise, we keep sending requests.
fn acquire_lsn_lease_with_retry(
compute: &Arc<ComputeNode>,
tenant_id: TenantId,
timeline_id: TimelineId,
lsn: Lsn,
) -> Result<SystemTime> {
let mut attempts = 0usize;
let mut retry_period_ms: f64 = 500.0;
const MAX_RETRY_PERIOD_MS: f64 = 60.0 * 1000.0;
loop {
// Note: List of pageservers is dynamic, need to re-read configs before each attempt.
let (conninfo, auth) = {
let state = compute.state.lock().unwrap();
let spec = state.pspec.as_ref().expect("spec must be set");
(
spec.pageserver_conninfo.clone(),
spec.storage_auth_token.clone(),
)
};
let result = try_acquire_lsn_lease(conninfo, auth.as_deref(), tenant_id, timeline_id, lsn);
match result {
Ok(Some(res)) => {
return Ok(res);
}
Ok(None) => {
bail!("Permanent error: lease could not be obtained, LSN is behind the GC cutoff");
}
Err(e) => {
warn!("Failed to acquire lsn lease: {e} (attempt {attempts})");
compute.wait_timeout_while_pageserver_connstr_unchanged(Duration::from_millis(
retry_period_ms as u64,
));
retry_period_ms *= 1.5;
retry_period_ms = retry_period_ms.min(MAX_RETRY_PERIOD_MS);
}
}
attempts += 1;
}
}
/// Tries to acquire LSN leases on all Pageserver shards.
fn try_acquire_lsn_lease(
conninfo: PageserverConnectionInfo,
auth: Option<&str>,
tenant_id: TenantId,
timeline_id: TimelineId,
lsn: Lsn,
) -> Result<Option<SystemTime>> {
let mut leases = Vec::new();
for (shard_index, shard) in conninfo.shards.into_iter() {
let tenant_shard_id = TenantShardId {
tenant_id,
shard_number: shard_index.shard_number,
shard_count: shard_index.shard_count,
};
// XXX: If there are more than pageserver for the one shard, do we need to get a
// leas on all of them? Currently, that's what we assume, but this is hypothetical
// as of this writing, as we never pass the info for more than one pageserver per
// shard.
for pageserver in shard.pageservers {
let lease = match conninfo.prefer_protocol {
PageserverProtocol::Grpc => acquire_lsn_lease_grpc(
&pageserver.grpc_url.unwrap(),
auth,
tenant_shard_id,
timeline_id,
lsn,
)?,
PageserverProtocol::Libpq => acquire_lsn_lease_libpq(
&pageserver.libpq_url.unwrap(),
auth,
tenant_shard_id,
timeline_id,
lsn,
)?,
};
leases.push(lease);
}
}
Ok(leases.into_iter().min().flatten())
}
/// Acquires an LSN lease on a single shard, using the libpq API. The connstring must use a
/// postgresql:// scheme.
fn acquire_lsn_lease_libpq(
connstring: &str,
auth: Option<&str>,
tenant_shard_id: TenantShardId,
timeline_id: TimelineId,
lsn: Lsn,
) -> Result<Option<SystemTime>> {
let mut config = postgres::Config::from_str(connstring)?;
if let Some(auth) = auth {
config.password(auth);
}
let mut client = config.connect(NoTls)?;
let cmd = format!("lease lsn {tenant_shard_id} {timeline_id} {lsn} ");
let res = client.simple_query(&cmd)?;
let msg = match res.first() {
Some(msg) => msg,
None => bail!("empty response"),
};
let row = match msg {
SimpleQueryMessage::Row(row) => row,
_ => bail!("error parsing lsn lease response"),
};
// Note: this will be None if a lease is explicitly not granted.
let valid_until_str = row.get("valid_until");
let valid_until = valid_until_str.map(|s| {
SystemTime::UNIX_EPOCH
.checked_add(Duration::from_millis(u128::from_str(s).unwrap() as u64))
.expect("Time larger than max SystemTime could handle")
});
Ok(valid_until)
}
/// Acquires an LSN lease on a single shard, using the gRPC API. The connstring must use a
/// grpc:// scheme.
fn acquire_lsn_lease_grpc(
connstring: &str,
auth: Option<&str>,
tenant_shard_id: TenantShardId,
timeline_id: TimelineId,
lsn: Lsn,
) -> Result<Option<SystemTime>> {
tokio::runtime::Handle::current().block_on(async move {
let mut client = page_api::Client::connect(
connstring.to_string(),
tenant_shard_id.tenant_id,
timeline_id,
tenant_shard_id.to_index(),
auth.map(String::from),
None,
)
.await?;
let req = page_api::LeaseLsnRequest { lsn };
match client.lease_lsn(req).await {
Ok(expires) => Ok(Some(expires)),
// Lease couldn't be acquired because the LSN has been garbage collected.
Err(err) if err.code() == tonic::Code::FailedPrecondition => Ok(None),
Err(err) => Err(err.into()),
}
})
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/migration.rs | compute_tools/src/migration.rs | use anyhow::{Context, Result};
use fail::fail_point;
use tokio_postgres::{Client, Transaction};
use tracing::{error, info};
use crate::metrics::DB_MIGRATION_FAILED;
/// Runs a series of migrations on a target database
pub(crate) struct MigrationRunner<'m> {
client: &'m mut Client,
migrations: &'m [&'m str],
lakebase_mode: bool,
}
impl<'m> MigrationRunner<'m> {
/// Create a new migration runner
pub fn new(client: &'m mut Client, migrations: &'m [&'m str], lakebase_mode: bool) -> Self {
// The neon_migration.migration_id::id column is a bigint, which is equivalent to an i64
assert!(migrations.len() + 1 < i64::MAX as usize);
Self {
client,
migrations,
lakebase_mode,
}
}
/// Get the current value neon_migration.migration_id
async fn get_migration_id(&mut self) -> Result<i64> {
let row = self
.client
.query_one("SELECT id FROM neon_migration.migration_id", &[])
.await?;
Ok(row.get::<&str, i64>("id"))
}
/// Update the neon_migration.migration_id value
///
/// This function has a fail point called compute-migration, which can be
/// used if you would like to fail the application of a series of migrations
/// at some point.
async fn update_migration_id(txn: &mut Transaction<'_>, migration_id: i64) -> Result<()> {
// We use this fail point in order to check that failing in the
// middle of applying a series of migrations fails in an expected
// manner
if cfg!(feature = "testing") {
let fail = (|| {
fail_point!("compute-migration", |fail_migration_id| {
migration_id == fail_migration_id.unwrap().parse::<i64>().unwrap()
});
false
})();
if fail {
return Err(anyhow::anyhow!(format!(
"migration {} was configured to fail because of a failpoint",
migration_id
)));
}
}
txn.query(
"UPDATE neon_migration.migration_id SET id = $1",
&[&migration_id],
)
.await
.with_context(|| format!("update neon_migration.migration_id to {migration_id}"))?;
Ok(())
}
/// Prepare the migrations the target database for handling migrations
async fn prepare_database(&mut self) -> Result<()> {
self.client
.simple_query("CREATE SCHEMA IF NOT EXISTS neon_migration")
.await?;
self.client.simple_query("CREATE TABLE IF NOT EXISTS neon_migration.migration_id (key pg_catalog.int4 NOT NULL PRIMARY KEY, id pg_catalog.int8 NOT NULL DEFAULT 0)").await?;
self.client
.simple_query(
"INSERT INTO neon_migration.migration_id VALUES (0, 0) ON CONFLICT DO NOTHING",
)
.await?;
self.client
.simple_query("ALTER SCHEMA neon_migration OWNER TO cloud_admin")
.await?;
self.client
.simple_query("REVOKE ALL ON SCHEMA neon_migration FROM PUBLIC")
.await?;
Ok(())
}
/// Run an individual migration in a separate transaction block.
async fn run_migration(client: &mut Client, migration_id: i64, migration: &str) -> Result<()> {
let mut txn = client
.transaction()
.await
.with_context(|| format!("begin transaction for migration {migration_id}"))?;
if migration.starts_with("-- SKIP") {
info!("Skipping migration id={}", migration_id);
// Even though we are skipping the migration, updating the
// migration ID should help keep logic easy to understand when
// trying to understand the state of a cluster.
Self::update_migration_id(&mut txn, migration_id).await?;
} else {
info!("Running migration id={}:\n{}\n", migration_id, migration);
txn.simple_query(migration)
.await
.with_context(|| format!("apply migration {migration_id}"))?;
Self::update_migration_id(&mut txn, migration_id).await?;
}
txn.commit()
.await
.with_context(|| format!("commit transaction for migration {migration_id}"))?;
Ok(())
}
/// Run the configured set of migrations
pub async fn run_migrations(mut self) -> Result<()> {
self.prepare_database()
.await
.context("prepare database to handle migrations")?;
let mut current_migration = self.get_migration_id().await? as usize;
while current_migration < self.migrations.len() {
// The index lags the migration ID by 1, so the current migration
// ID is also the next index
let migration_id = (current_migration + 1) as i64;
let migration = self.migrations[current_migration];
let migration = if self.lakebase_mode {
migration.replace("neon_superuser", "databricks_superuser")
} else {
migration.to_string()
};
match Self::run_migration(self.client, migration_id, &migration).await {
Ok(_) => {
info!("Finished migration id={}", migration_id);
}
Err(e) => {
error!("Failed to run migration id={}: {:?}", migration_id, e);
DB_MIGRATION_FAILED
.with_label_values(&[migration_id.to_string().as_str()])
.inc();
return Err(e);
}
}
current_migration += 1;
}
Ok(())
}
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/compute.rs | compute_tools/src/compute.rs | use anyhow::{Context, Result};
use chrono::{DateTime, Utc};
use compute_api::privilege::Privilege;
use compute_api::responses::{
ComputeConfig, ComputeCtlConfig, ComputeMetrics, ComputeStatus, LfcOffloadState,
LfcPrewarmState, PromoteState, TlsConfig,
};
use compute_api::spec::{
ComputeAudit, ComputeFeature, ComputeMode, ComputeSpec, ExtVersion, GenericOption,
PageserverConnectionInfo, PageserverProtocol, PgIdent, Role,
};
use futures::StreamExt;
use futures::future::join_all;
use futures::stream::FuturesUnordered;
use itertools::Itertools;
use nix::sys::signal::{Signal, kill};
use nix::unistd::Pid;
use once_cell::sync::Lazy;
use pageserver_page_api::{self as page_api, BaseBackupCompression};
use postgres;
use postgres::NoTls;
use postgres::error::SqlState;
use remote_storage::{DownloadError, RemotePath};
use std::collections::{HashMap, HashSet};
use std::ffi::OsString;
use std::os::unix::fs::{PermissionsExt, symlink};
use std::path::Path;
use std::process::{Command, Stdio};
use std::str::FromStr;
use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
use std::sync::{Arc, Condvar, Mutex, RwLock};
use std::time::{Duration, Instant};
use std::{env, fs};
use tokio::{spawn, sync::watch, task::JoinHandle, time};
use tokio_util::sync::CancellationToken;
use tracing::{Instrument, debug, error, info, instrument, warn};
use url::Url;
use utils::backoff::{
DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS, exponential_backoff_duration,
};
use utils::id::{TenantId, TimelineId};
use utils::lsn::Lsn;
use utils::measured_stream::MeasuredReader;
use utils::pid_file;
use utils::shard::{ShardIndex, ShardNumber, ShardStripeSize};
use crate::configurator::launch_configurator;
use crate::disk_quota::set_disk_quota;
use crate::hadron_metrics::COMPUTE_ATTACHED;
use crate::installed_extensions::get_installed_extensions;
use crate::logger::{self, startup_context_from_env};
use crate::lsn_lease::launch_lsn_lease_bg_task_for_static;
use crate::metrics::COMPUTE_CTL_UP;
use crate::monitor::launch_monitor;
use crate::pg_helpers::*;
use crate::pgbouncer::*;
use crate::rsyslog::{
PostgresLogsRsyslogConfig, configure_audit_rsyslog, configure_postgres_logs_export,
launch_pgaudit_gc,
};
use crate::spec::*;
use crate::swap::resize_swap;
use crate::sync_sk::{check_if_synced, ping_safekeeper};
use crate::tls::watch_cert_for_changes;
use crate::{config, extension_server, local_proxy};
pub static SYNC_SAFEKEEPERS_PID: AtomicU32 = AtomicU32::new(0);
pub static PG_PID: AtomicU32 = AtomicU32::new(0);
// This is an arbitrary build tag. Fine as a default / for testing purposes
// in-case of not-set environment var
const BUILD_TAG_DEFAULT: &str = "latest";
/// Build tag/version of the compute node binaries/image. It's tricky and ugly
/// to pass it everywhere as a part of `ComputeNodeParams`, so we use a
/// global static variable.
pub static BUILD_TAG: Lazy<String> = Lazy::new(|| {
option_env!("BUILD_TAG")
.unwrap_or(BUILD_TAG_DEFAULT)
.to_string()
});
const DEFAULT_INSTALLED_EXTENSIONS_COLLECTION_INTERVAL: u64 = 3600;
/// Static configuration params that don't change after startup. These mostly
/// come from the CLI args, or are derived from them.
#[derive(Clone, Debug)]
pub struct ComputeNodeParams {
/// The ID of the compute
pub compute_id: String,
/// Url type maintains proper escaping
pub connstr: url::Url,
/// The name of the 'weak' superuser role, which we give to the users.
/// It follows the allow list approach, i.e., we take a standard role
/// and grant it extra permissions with explicit GRANTs here and there,
/// and core patches.
pub privileged_role_name: String,
pub resize_swap_on_bind: bool,
pub set_disk_quota_for_fs: Option<String>,
// VM monitor parameters
#[cfg(target_os = "linux")]
pub filecache_connstr: String,
#[cfg(target_os = "linux")]
pub cgroup: String,
#[cfg(target_os = "linux")]
pub vm_monitor_addr: String,
pub pgdata: String,
pub pgbin: String,
pub pgversion: String,
/// The port that the compute's external HTTP server listens on
pub external_http_port: u16,
/// The port that the compute's internal HTTP server listens on
pub internal_http_port: u16,
/// the address of extension storage proxy gateway
pub remote_ext_base_url: Option<Url>,
/// Interval for installed extensions collection
pub installed_extensions_collection_interval: Arc<AtomicU64>,
/// Hadron instance ID of the compute node.
pub instance_id: Option<String>,
/// Timeout of PG compute startup in the Init state.
pub pg_init_timeout: Option<Duration>,
// Path to the `pg_isready` binary.
pub pg_isready_bin: String,
pub lakebase_mode: bool,
pub build_tag: String,
pub control_plane_uri: Option<String>,
pub config_path_test_only: Option<OsString>,
}
type TaskHandle = Mutex<Option<JoinHandle<()>>>;
/// Compute node info shared across several `compute_ctl` threads.
pub struct ComputeNode {
pub params: ComputeNodeParams,
// We connect to Postgres from many different places, so build configs once
// and reuse them where needed. These are derived from 'params.connstr'
pub conn_conf: postgres::config::Config,
pub tokio_conn_conf: tokio_postgres::config::Config,
/// Volatile part of the `ComputeNode`, which should be used under `Mutex`.
/// To allow HTTP API server to serving status requests, while configuration
/// is in progress, lock should be held only for short periods of time to do
/// read/write, not the whole configuration process.
pub state: Mutex<ComputeState>,
/// `Condvar` to allow notifying waiters about state changes.
pub state_changed: Condvar,
// key: ext_archive_name, value: started download time, download_completed?
pub ext_download_progress: RwLock<HashMap<String, (DateTime<Utc>, bool)>>,
pub compute_ctl_config: ComputeCtlConfig,
/// Handle to the extension stats collection task
extension_stats_task: TaskHandle,
lfc_offload_task: TaskHandle,
}
// store some metrics about download size that might impact startup time
#[derive(Clone, Debug)]
pub struct RemoteExtensionMetrics {
num_ext_downloaded: u64,
largest_ext_size: u64,
total_ext_download_size: u64,
}
#[derive(Clone, Debug)]
pub struct ComputeState {
pub start_time: DateTime<Utc>,
pub pg_start_time: Option<DateTime<Utc>>,
pub status: ComputeStatus,
/// Timestamp of the last Postgres activity. It could be `None` if
/// compute wasn't used since start.
pub last_active: Option<DateTime<Utc>>,
pub error: Option<String>,
/// Compute spec. This can be received from the CLI or - more likely -
/// passed by the control plane with a /configure HTTP request.
pub pspec: Option<ParsedSpec>,
/// If the spec is passed by a /configure request, 'startup_span' is the
/// /configure request's tracing span. The main thread enters it when it
/// processes the compute startup, so that the compute startup is considered
/// to be part of the /configure request for tracing purposes.
///
/// If the request handling thread/task called startup_compute() directly,
/// it would automatically be a child of the request handling span, and we
/// wouldn't need this. But because we use the main thread to perform the
/// startup, and the /configure task just waits for it to finish, we need to
/// set up the span relationship ourselves.
pub startup_span: Option<tracing::span::Span>,
pub lfc_prewarm_state: LfcPrewarmState,
pub lfc_prewarm_token: CancellationToken,
pub lfc_offload_state: LfcOffloadState,
/// WAL flush LSN that is set after terminating Postgres and syncing safekeepers if
/// mode == ComputeMode::Primary. None otherwise
pub terminate_flush_lsn: Option<Lsn>,
pub promote_state: Option<watch::Receiver<PromoteState>>,
pub metrics: ComputeMetrics,
}
impl ComputeState {
pub fn new() -> Self {
Self {
start_time: Utc::now(),
pg_start_time: None,
status: ComputeStatus::Empty,
last_active: None,
error: None,
pspec: None,
startup_span: None,
metrics: ComputeMetrics::default(),
lfc_prewarm_state: LfcPrewarmState::default(),
lfc_offload_state: LfcOffloadState::default(),
terminate_flush_lsn: None,
promote_state: None,
lfc_prewarm_token: CancellationToken::new(),
}
}
pub fn set_status(&mut self, status: ComputeStatus, state_changed: &Condvar) {
let prev = self.status;
info!("Changing compute status from {} to {}", prev, status);
self.status = status;
state_changed.notify_all();
COMPUTE_CTL_UP.reset();
COMPUTE_CTL_UP
.with_label_values(&[&BUILD_TAG, status.to_string().as_str()])
.set(1);
}
pub fn set_failed_status(&mut self, err: anyhow::Error, state_changed: &Condvar) {
self.error = Some(format!("{err:?}"));
self.set_status(ComputeStatus::Failed, state_changed);
}
}
impl Default for ComputeState {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone, Debug)]
pub struct ParsedSpec {
pub spec: ComputeSpec,
pub tenant_id: TenantId,
pub timeline_id: TimelineId,
pub pageserver_conninfo: PageserverConnectionInfo,
pub safekeeper_connstrings: Vec<String>,
pub storage_auth_token: Option<String>,
/// k8s dns name and port
pub endpoint_storage_addr: Option<String>,
pub endpoint_storage_token: Option<String>,
}
impl ParsedSpec {
pub fn validate(&self) -> Result<(), String> {
// Only Primary nodes are using safekeeper_connstrings, and at the moment
// this method only validates that part of the specs.
if self.spec.mode != ComputeMode::Primary {
return Ok(());
}
// While it seems like a good idea to check for an odd number of entries in
// the safekeepers connection string, changes to the list of safekeepers might
// incur appending a new server to a list of 3, in which case a list of 4
// entries is okay in production.
//
// Still we want unique entries, and at least one entry in the vector
if self.safekeeper_connstrings.is_empty() {
return Err(String::from("safekeeper_connstrings is empty"));
}
// check for uniqueness of the connection strings in the set
let mut connstrings = self.safekeeper_connstrings.clone();
connstrings.sort();
let mut previous = &connstrings[0];
for current in connstrings.iter().skip(1) {
// duplicate entry?
if current == previous {
return Err(format!(
"duplicate entry in safekeeper_connstrings: {current}!",
));
}
previous = current;
}
Ok(())
}
}
impl TryFrom<ComputeSpec> for ParsedSpec {
type Error = anyhow::Error;
fn try_from(spec: ComputeSpec) -> Result<Self, anyhow::Error> {
// Extract the options from the spec file that are needed to connect to
// the storage system.
//
// In compute specs generated by old control plane versions, the spec file might
// be missing the `pageserver_connection_info` field. In that case, we need to dig
// the pageserver connection info from the `pageserver_connstr` field instead, or
// if that's missing too, from the GUC in the cluster.settings field.
let mut pageserver_conninfo = spec.pageserver_connection_info.clone();
if pageserver_conninfo.is_none() {
if let Some(pageserver_connstr_field) = &spec.pageserver_connstring {
pageserver_conninfo = Some(PageserverConnectionInfo::from_connstr(
pageserver_connstr_field,
spec.shard_stripe_size,
)?);
}
}
if pageserver_conninfo.is_none() {
if let Some(guc) = spec.cluster.settings.find("neon.pageserver_connstring") {
let stripe_size = if let Some(guc) = spec.cluster.settings.find("neon.stripe_size")
{
Some(ShardStripeSize(u32::from_str(&guc)?))
} else {
None
};
pageserver_conninfo =
Some(PageserverConnectionInfo::from_connstr(&guc, stripe_size)?);
}
}
let pageserver_conninfo = pageserver_conninfo.ok_or(anyhow::anyhow!(
"pageserver connection information should be provided"
))?;
// Similarly for safekeeper connection strings
let safekeeper_connstrings = if spec.safekeeper_connstrings.is_empty() {
if matches!(spec.mode, ComputeMode::Primary) {
spec.cluster
.settings
.find("neon.safekeepers")
.ok_or(anyhow::anyhow!("safekeeper connstrings should be provided"))?
.split(',')
.map(|str| str.to_string())
.collect()
} else {
vec![]
}
} else {
spec.safekeeper_connstrings.clone()
};
let storage_auth_token = spec.storage_auth_token.clone();
let tenant_id: TenantId = if let Some(tenant_id) = spec.tenant_id {
tenant_id
} else {
let guc = spec
.cluster
.settings
.find("neon.tenant_id")
.ok_or(anyhow::anyhow!("tenant id should be provided"))?;
TenantId::from_str(&guc).context("invalid tenant id")?
};
let timeline_id: TimelineId = if let Some(timeline_id) = spec.timeline_id {
timeline_id
} else {
let guc = spec
.cluster
.settings
.find("neon.timeline_id")
.ok_or(anyhow::anyhow!("timeline id should be provided"))?;
TimelineId::from_str(&guc).context(anyhow::anyhow!("invalid timeline id"))?
};
let endpoint_storage_addr: Option<String> = spec
.endpoint_storage_addr
.clone()
.or_else(|| spec.cluster.settings.find("neon.endpoint_storage_addr"));
let endpoint_storage_token = spec
.endpoint_storage_token
.clone()
.or_else(|| spec.cluster.settings.find("neon.endpoint_storage_token"));
let res = ParsedSpec {
spec,
pageserver_conninfo,
safekeeper_connstrings,
storage_auth_token,
tenant_id,
timeline_id,
endpoint_storage_addr,
endpoint_storage_token,
};
// Now check validity of the parsed specification
res.validate().map_err(anyhow::Error::msg)?;
Ok(res)
}
}
/// If we are a VM, returns a [`Command`] that will run in the `neon-postgres`
/// cgroup. Otherwise returns the default `Command::new(cmd)`
///
/// This function should be used to start postgres, as it will start it in the
/// neon-postgres cgroup if we are a VM. This allows autoscaling to control
/// postgres' resource usage. The cgroup will exist in VMs because vm-builder
/// creates it during the sysinit phase of its inittab.
fn maybe_cgexec(cmd: &str) -> Command {
// The cplane sets this env var for autoscaling computes.
// use `var_os` so we don't have to worry about the variable being valid
// unicode. Should never be an concern . . . but just in case
if env::var_os("AUTOSCALING").is_some() {
let mut command = Command::new("cgexec");
command.args(["-g", "memory:neon-postgres"]);
command.arg(cmd);
command
} else {
Command::new(cmd)
}
}
struct PostgresHandle {
postgres: std::process::Child,
log_collector: JoinHandle<Result<()>>,
}
impl PostgresHandle {
/// Return PID of the postgres (postmaster) process
fn pid(&self) -> Pid {
Pid::from_raw(self.postgres.id() as i32)
}
}
struct StartVmMonitorResult {
#[cfg(target_os = "linux")]
token: tokio_util::sync::CancellationToken,
#[cfg(target_os = "linux")]
vm_monitor: Option<JoinHandle<Result<()>>>,
}
// BEGIN_HADRON
/// This function creates roles that are used by Databricks.
/// These roles are not needs to be botostrapped at PG Compute provisioning time.
/// The auth method for these roles are configured in databricks_pg_hba.conf in universe repository.
pub(crate) fn create_databricks_roles() -> Vec<String> {
let roles = vec![
// Role for prometheus_stats_exporter
Role {
name: "databricks_monitor".to_string(),
// This uses "local" connection and auth method for that is "trust", so no password is needed.
encrypted_password: None,
options: Some(vec![GenericOption {
name: "IN ROLE pg_monitor".to_string(),
value: None,
vartype: "string".to_string(),
}]),
},
// Role for brickstore control plane
Role {
name: "databricks_control_plane".to_string(),
// Certificate user does not need password.
encrypted_password: None,
options: Some(vec![GenericOption {
name: "SUPERUSER".to_string(),
value: None,
vartype: "string".to_string(),
}]),
},
// Role for brickstore httpgateway.
Role {
name: "databricks_gateway".to_string(),
// Certificate user does not need password.
encrypted_password: None,
options: None,
},
];
roles
.into_iter()
.map(|role| {
let query = format!(
r#"
DO $$
BEGIN
IF NOT EXISTS (
SELECT FROM pg_catalog.pg_roles WHERE rolname = '{}')
THEN
CREATE ROLE {} {};
END IF;
END
$$;"#,
role.name,
role.name.pg_quote(),
role.to_pg_options(),
);
query
})
.collect()
}
/// Databricks-specific environment variables to be passed to the `postgres` sub-process.
pub struct DatabricksEnvVars {
/// The Databricks "endpoint ID" of the compute instance. Used by `postgres` to check
/// the token scopes of internal auth tokens.
pub endpoint_id: String,
/// Hostname of the Databricks workspace URL this compute instance belongs to.
/// Used by postgres to verify Databricks PAT tokens.
pub workspace_host: String,
pub lakebase_mode: bool,
}
impl DatabricksEnvVars {
pub fn new(
compute_spec: &ComputeSpec,
compute_id: Option<&String>,
instance_id: Option<String>,
lakebase_mode: bool,
) -> Self {
let endpoint_id = if let Some(instance_id) = instance_id {
// Use instance_id as endpoint_id if it is set. This code path is for PuPr model.
instance_id
} else {
// Use compute_id as endpoint_id if instance_id is not set. The code path is for PrPr model.
// compute_id is a string format of "{endpoint_id}/{compute_idx}"
// endpoint_id is a uuid. We only need to pass down endpoint_id to postgres.
// Panics if compute_id is not set or not in the expected format.
compute_id.unwrap().split('/').next().unwrap().to_string()
};
let workspace_host = compute_spec
.databricks_settings
.as_ref()
.map(|s| s.databricks_workspace_host.clone())
.unwrap_or("".to_string());
Self {
endpoint_id,
workspace_host,
lakebase_mode,
}
}
/// Constants for the names of Databricks-specific postgres environment variables.
const DATABRICKS_ENDPOINT_ID_ENVVAR: &'static str = "DATABRICKS_ENDPOINT_ID";
const DATABRICKS_WORKSPACE_HOST_ENVVAR: &'static str = "DATABRICKS_WORKSPACE_HOST";
/// Convert DatabricksEnvVars to a list of string pairs that can be passed as env vars. Consumes `self`.
pub fn to_env_var_list(self) -> Vec<(String, String)> {
if !self.lakebase_mode {
// In neon env, we don't need to pass down the env vars to postgres.
return vec![];
}
vec![
(
Self::DATABRICKS_ENDPOINT_ID_ENVVAR.to_string(),
self.endpoint_id.clone(),
),
(
Self::DATABRICKS_WORKSPACE_HOST_ENVVAR.to_string(),
self.workspace_host.clone(),
),
]
}
}
impl ComputeNode {
pub fn new(params: ComputeNodeParams, config: ComputeConfig) -> Result<Self> {
let connstr = params.connstr.as_str();
let mut conn_conf = postgres::config::Config::from_str(connstr)
.context("cannot build postgres config from connstr")?;
let mut tokio_conn_conf = tokio_postgres::config::Config::from_str(connstr)
.context("cannot build tokio postgres config from connstr")?;
// Users can set some configuration parameters per database with
// ALTER DATABASE ... SET ...
//
// There are at least these parameters:
//
// - role=some_other_role
// - default_transaction_read_only=on
// - statement_timeout=1, i.e., 1ms, which will cause most of the queries to fail
// - search_path=non_public_schema, this should be actually safe because
// we don't call any functions in user databases, but better to always reset
// it to public.
//
// that can affect `compute_ctl` and prevent it from properly configuring the database schema.
// Unset them via connection string options before connecting to the database.
// N.B. keep it in sync with `ZENITH_OPTIONS` in `get_maintenance_client()`.
const EXTRA_OPTIONS: &str = "-c role=cloud_admin -c default_transaction_read_only=off -c search_path='' -c statement_timeout=0 -c pgaudit.log=none";
let options = match conn_conf.get_options() {
// Allow the control plane to override any options set by the
// compute
Some(options) => format!("{EXTRA_OPTIONS} {options}"),
None => EXTRA_OPTIONS.to_string(),
};
conn_conf.options(&options);
tokio_conn_conf.options(&options);
let mut new_state = ComputeState::new();
if let Some(spec) = config.spec {
let pspec = ParsedSpec::try_from(spec).map_err(|msg| anyhow::anyhow!(msg))?;
if params.lakebase_mode {
ComputeNode::set_spec(¶ms, &mut new_state, pspec);
} else {
new_state.pspec = Some(pspec);
}
}
Ok(ComputeNode {
params,
conn_conf,
tokio_conn_conf,
state: Mutex::new(new_state),
state_changed: Condvar::new(),
ext_download_progress: RwLock::new(HashMap::new()),
compute_ctl_config: config.compute_ctl_config,
extension_stats_task: Mutex::new(None),
lfc_offload_task: Mutex::new(None),
})
}
/// Top-level control flow of compute_ctl. Returns a process exit code we should
/// exit with.
pub fn run(self) -> Result<Option<i32>> {
let this = Arc::new(self);
let cli_spec = this.state.lock().unwrap().pspec.clone();
// If this is a pooled VM, prewarm before starting HTTP server and becoming
// available for binding. Prewarming helps Postgres start quicker later,
// because QEMU will already have its memory allocated from the host, and
// the necessary binaries will already be cached.
if cli_spec.is_none() {
this.prewarm_postgres_vm_memory()?;
}
// Set the up metric with Empty status before starting the HTTP server.
// That way on the first metric scrape, an external observer will see us
// as 'up' and 'empty' (unless the compute was started with a spec or
// already configured by control plane).
COMPUTE_CTL_UP
.with_label_values(&[&BUILD_TAG, ComputeStatus::Empty.to_string().as_str()])
.set(1);
// Launch the external HTTP server first, so that we can serve control plane
// requests while configuration is still in progress.
crate::http::server::Server::External {
port: this.params.external_http_port,
config: this.compute_ctl_config.clone(),
compute_id: this.params.compute_id.clone(),
instance_id: this.params.instance_id.clone(),
}
.launch(&this);
// The internal HTTP server could be launched later, but there isn't much
// sense in waiting.
crate::http::server::Server::Internal {
port: this.params.internal_http_port,
}
.launch(&this);
// If we got a spec from the CLI already, use that. Otherwise wait for the
// control plane to pass it to us with a /configure HTTP request
let pspec = if let Some(cli_spec) = cli_spec {
cli_spec
} else {
this.wait_spec()?
};
launch_lsn_lease_bg_task_for_static(&this);
// We have a spec, start the compute
let mut delay_exit = false;
let mut vm_monitor = None;
let mut pg_process: Option<PostgresHandle> = None;
match this.start_compute(&mut pg_process) {
Ok(()) => {
// Success! Launch remaining services (just vm-monitor currently)
vm_monitor =
Some(this.start_vm_monitor(pspec.spec.disable_lfc_resizing.unwrap_or(false)));
}
Err(err) => {
// Something went wrong with the startup. Log it and expose the error to
// HTTP status requests.
error!("could not start the compute node: {:#}", err);
this.set_failed_status(err);
delay_exit = true;
// If the error happened after starting PostgreSQL, kill it
if let Some(ref pg_process) = pg_process {
kill(pg_process.pid(), Signal::SIGQUIT).ok();
}
}
}
// If startup was successful, or it failed in the late stages,
// PostgreSQL is now running. Wait until it exits.
let exit_code = if let Some(pg_handle) = pg_process {
let exit_status = this.wait_postgres(pg_handle);
info!("Postgres exited with code {}, shutting down", exit_status);
exit_status.code()
} else {
None
};
this.terminate_extension_stats_task();
this.terminate_lfc_offload_task();
// Terminate the vm_monitor so it releases the file watcher on
// /sys/fs/cgroup/neon-postgres.
// Note: the vm-monitor only runs on linux because it requires cgroups.
if let Some(vm_monitor) = vm_monitor {
cfg_if::cfg_if! {
if #[cfg(target_os = "linux")] {
// Kills all threads spawned by the monitor
vm_monitor.token.cancel();
if let Some(handle) = vm_monitor.vm_monitor {
// Kills the actual task running the monitor
handle.abort();
}
} else {
_ = vm_monitor; // appease unused lint on macOS
}
}
}
// Reap the postgres process
delay_exit |= this.cleanup_after_postgres_exit()?;
// /terminate returns LSN. If we don't sleep at all, connection will break and we
// won't get result. If we sleep too much, tests will take significantly longer
// and Github Action run will error out
let sleep_duration = if delay_exit {
Duration::from_secs(30)
} else {
Duration::from_millis(300)
};
// If launch failed, keep serving HTTP requests for a while, so the cloud
// control plane can get the actual error.
if delay_exit {
info!("giving control plane 30s to collect the error before shutdown");
}
std::thread::sleep(sleep_duration);
Ok(exit_code)
}
pub fn wait_spec(&self) -> Result<ParsedSpec> {
info!("no compute spec provided, waiting");
let mut state = self.state.lock().unwrap();
while state.status != ComputeStatus::ConfigurationPending {
state = self.state_changed.wait(state).unwrap();
}
info!("got spec, continue configuration");
let spec = state.pspec.as_ref().unwrap().clone();
// Record for how long we slept waiting for the spec.
let now = Utc::now();
state.metrics.wait_for_spec_ms = now
.signed_duration_since(state.start_time)
.to_std()
.unwrap()
.as_millis() as u64;
// Reset start time, so that the total startup time that is calculated later will
// not include the time that we waited for the spec.
state.start_time = now;
Ok(spec)
}
/// Start compute.
///
/// Prerequisites:
/// - the compute spec has been placed in self.state.pspec
///
/// On success:
/// - status is set to ComputeStatus::Running
/// - self.running_postgres is set
///
/// On error:
/// - status is left in ComputeStatus::Init. The caller is responsible for setting it to Failed
/// - if Postgres was started before the fatal error happened, self.running_postgres is
/// set. The caller is responsible for killing it.
///
/// Note that this is in the critical path of a compute cold start. Keep this fast.
/// Try to do things concurrently, to hide the latencies.
fn start_compute(self: &Arc<Self>, pg_handle: &mut Option<PostgresHandle>) -> Result<()> {
let compute_state: ComputeState;
let start_compute_span;
let _this_entered;
{
let mut state_guard = self.state.lock().unwrap();
// Create a tracing span for the startup operation.
//
// We could otherwise just annotate the function with #[instrument], but if
// we're being configured from a /configure HTTP request, we want the
// startup to be considered part of the /configure request.
//
// Similarly, if a trace ID was passed in env variables, attach it to the span.
start_compute_span = {
// Temporarily enter the parent span, so that the new span becomes its child.
if let Some(p) = state_guard.startup_span.take() {
let _parent_entered = p.entered();
tracing::info_span!("start_compute")
} else if let Some(otel_context) = startup_context_from_env() {
use tracing_opentelemetry::OpenTelemetrySpanExt;
let span = tracing::info_span!("start_compute");
span.set_parent(otel_context);
span
} else {
tracing::info_span!("start_compute")
}
};
_this_entered = start_compute_span.enter();
// Hadron: Record postgres start time (used to enforce pg_init_timeout).
state_guard.pg_start_time.replace(Utc::now());
state_guard.set_status(ComputeStatus::Init, &self.state_changed);
compute_state = state_guard.clone()
}
let pspec = compute_state.pspec.as_ref().expect("spec must be set");
info!(
"starting compute for project {}, operation {}, tenant {}, timeline {}, project {}, branch {}, endpoint {}, features {:?}, spec.remote_extensions {:?}",
pspec.spec.cluster.cluster_id.as_deref().unwrap_or("None"),
pspec.spec.operation_uuid.as_deref().unwrap_or("None"),
pspec.tenant_id,
pspec.timeline_id,
pspec.spec.project_id.as_deref().unwrap_or("None"),
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | true |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/configurator.rs | compute_tools/src/configurator.rs | use std::fs::File;
use std::thread;
use std::{path::Path, sync::Arc};
use anyhow::Result;
use compute_api::responses::{ComputeConfig, ComputeStatus};
use tracing::{error, info, instrument};
use crate::compute::{ComputeNode, ParsedSpec};
use crate::spec::get_config_from_control_plane;
#[instrument(skip_all)]
fn configurator_main_loop(compute: &Arc<ComputeNode>) {
info!("waiting for reconfiguration requests");
loop {
let mut state = compute.state.lock().unwrap();
/* BEGIN_HADRON */
// RefreshConfiguration should only be used inside the loop
assert_ne!(state.status, ComputeStatus::RefreshConfiguration);
/* END_HADRON */
if compute.params.lakebase_mode {
while state.status != ComputeStatus::ConfigurationPending
&& state.status != ComputeStatus::RefreshConfigurationPending
&& state.status != ComputeStatus::Failed
{
info!("configurator: compute status: {:?}, sleeping", state.status);
state = compute.state_changed.wait(state).unwrap();
}
} else {
// We have to re-check the status after re-acquiring the lock because it could be that
// the status has changed while we were waiting for the lock, and we might not need to
// wait on the condition variable. Otherwise, we might end up in some soft-/deadlock, i.e.
// we are waiting for a condition variable that will never be signaled.
if state.status != ComputeStatus::ConfigurationPending {
state = compute.state_changed.wait(state).unwrap();
}
}
// Re-check the status after waking up
if state.status == ComputeStatus::ConfigurationPending {
info!("got configuration request");
state.set_status(ComputeStatus::Configuration, &compute.state_changed);
drop(state);
let mut new_status = ComputeStatus::Failed;
if let Err(e) = compute.reconfigure() {
error!("could not configure compute node: {}", e);
} else {
new_status = ComputeStatus::Running;
info!("compute node configured");
}
// XXX: used to test that API is blocking
// std::thread::sleep(std::time::Duration::from_millis(10000));
compute.set_status(new_status);
} else if state.status == ComputeStatus::RefreshConfigurationPending {
info!(
"compute node suspects its configuration is out of date, now refreshing configuration"
);
state.set_status(ComputeStatus::RefreshConfiguration, &compute.state_changed);
// Drop the lock guard here to avoid holding the lock while downloading config from the control plane / HCC.
// This is the only thread that can move compute_ctl out of the `RefreshConfiguration` state, so it
// is safe to drop the lock like this.
drop(state);
let get_config_result: anyhow::Result<ComputeConfig> =
if let Some(config_path) = &compute.params.config_path_test_only {
// This path is only to make testing easier. In production we always get the config from the HCC.
info!(
"reloading config.json from path: {}",
config_path.to_string_lossy()
);
let path = Path::new(config_path);
if let Ok(file) = File::open(path) {
match serde_json::from_reader::<File, ComputeConfig>(file) {
Ok(config) => Ok(config),
Err(e) => {
error!("could not parse config file: {}", e);
Err(anyhow::anyhow!("could not parse config file: {}", e))
}
}
} else {
error!(
"could not open config file at path: {:?}",
config_path.to_string_lossy()
);
Err(anyhow::anyhow!(
"could not open config file at path: {}",
config_path.to_string_lossy()
))
}
} else if let Some(control_plane_uri) = &compute.params.control_plane_uri {
get_config_from_control_plane(control_plane_uri, &compute.params.compute_id)
} else {
Err(anyhow::anyhow!("config_path_test_only is not set"))
};
// Parse any received ComputeSpec and transpose the result into a Result<Option<ParsedSpec>>.
let parsed_spec_result: Result<Option<ParsedSpec>> =
get_config_result.and_then(|config| {
if let Some(spec) = config.spec {
if let Ok(pspec) = ParsedSpec::try_from(spec) {
Ok(Some(pspec))
} else {
Err(anyhow::anyhow!("could not parse spec"))
}
} else {
Ok(None)
}
});
let new_status: ComputeStatus;
match parsed_spec_result {
// Control plane (HCM) returned a spec and we were able to parse it.
Ok(Some(pspec)) => {
{
let mut state = compute.state.lock().unwrap();
// Defensive programming to make sure this thread is indeed the only one that can move the compute
// node out of the `RefreshConfiguration` state. Would be nice if we can encode this invariant
// into the type system.
assert_eq!(state.status, ComputeStatus::RefreshConfiguration);
if state
.pspec
.as_ref()
.map(|ps| ps.pageserver_conninfo.clone())
== Some(pspec.pageserver_conninfo.clone())
{
info!(
"Refresh configuration: Retrieved spec is the same as the current spec. Waiting for control plane to update the spec before attempting reconfiguration."
);
state.status = ComputeStatus::Running;
compute.state_changed.notify_all();
drop(state);
std::thread::sleep(std::time::Duration::from_secs(5));
continue;
}
// state.pspec is consumed by compute.reconfigure() below. Note that compute.reconfigure() will acquire
// the compute.state lock again so we need to have the lock guard go out of scope here. We could add a
// "locked" variant of compute.reconfigure() that takes the lock guard as an argument to make this cleaner,
// but it's not worth forking the codebase too much for this minor point alone right now.
state.pspec = Some(pspec);
}
match compute.reconfigure() {
Ok(_) => {
info!("Refresh configuration: compute node configured");
new_status = ComputeStatus::Running;
}
Err(e) => {
error!(
"Refresh configuration: could not configure compute node: {}",
e
);
// Set the compute node back to the `RefreshConfigurationPending` state if the configuration
// was not successful. It should be okay to treat this situation the same as if the loop
// hasn't executed yet as long as the detection side keeps notifying.
new_status = ComputeStatus::RefreshConfigurationPending;
}
}
}
// Control plane (HCM)'s response does not contain a spec. This is the "Empty" attachment case.
Ok(None) => {
info!(
"Compute Manager signaled that this compute is no longer attached to any storage. Exiting."
);
// We just immediately terminate the whole compute_ctl in this case. It's not necessary to attempt a
// clean shutdown as Postgres is probably not responding anyway (which is why we are in this refresh
// configuration state).
std::process::exit(1);
}
// Various error cases:
// - The request to the control plane (HCM) either failed or returned a malformed spec.
// - compute_ctl itself is configured incorrectly (e.g., compute_id is not set).
Err(e) => {
error!(
"Refresh configuration: error getting a parsed spec: {:?}",
e
);
new_status = ComputeStatus::RefreshConfigurationPending;
// We may be dealing with an overloaded HCM if we end up in this path. Backoff 5 seconds before
// retrying to avoid hammering the HCM.
std::thread::sleep(std::time::Duration::from_secs(5));
}
}
compute.set_status(new_status);
} else if state.status == ComputeStatus::Failed {
info!("compute node is now in Failed state, exiting");
break;
} else {
info!("woken up for compute status: {:?}, sleeping", state.status);
}
}
}
pub fn launch_configurator(compute: &Arc<ComputeNode>) -> thread::JoinHandle<()> {
let compute = Arc::clone(compute);
let runtime = tokio::runtime::Handle::current();
thread::Builder::new()
.name("compute-configurator".into())
.spawn(move || {
let _rt_guard = runtime.enter();
configurator_main_loop(&compute);
info!("configurator thread is exited");
})
.expect("cannot launch configurator thread")
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/spec.rs | compute_tools/src/spec.rs | use std::fs::File;
use std::fs::{self, Permissions};
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use anyhow::{Result, anyhow, bail};
use compute_api::responses::{
ComputeConfig, ControlPlaneComputeStatus, ControlPlaneConfigResponse,
};
use reqwest::StatusCode;
use tokio_postgres::Client;
use tracing::{error, info, instrument};
use crate::compute::ComputeNodeParams;
use crate::config;
use crate::metrics::{CPLANE_REQUESTS_TOTAL, CPlaneRequestRPC, UNKNOWN_HTTP_STATUS};
use crate::migration::MigrationRunner;
use crate::params::PG_HBA_ALL_MD5;
// Do control plane request and return response if any. In case of error it
// returns a bool flag indicating whether it makes sense to retry the request
// and a string with error message.
fn do_control_plane_request(
uri: &str,
jwt: &str,
) -> Result<ControlPlaneConfigResponse, (bool, String, String)> {
let resp = reqwest::blocking::Client::new()
.get(uri)
.header("Authorization", format!("Bearer {jwt}"))
.send()
.map_err(|e| {
(
true,
format!("could not perform request to control plane: {e:?}"),
UNKNOWN_HTTP_STATUS.to_string(),
)
})?;
let status = resp.status();
match status {
StatusCode::OK => match resp.json::<ControlPlaneConfigResponse>() {
Ok(spec_resp) => Ok(spec_resp),
Err(e) => Err((
true,
format!("could not deserialize control plane response: {e:?}"),
status.to_string(),
)),
},
StatusCode::SERVICE_UNAVAILABLE => Err((
true,
"control plane is temporarily unavailable".to_string(),
status.to_string(),
)),
StatusCode::BAD_GATEWAY => {
// We have a problem with intermittent 502 errors now
// https://github.com/neondatabase/cloud/issues/2353
// It's fine to retry GET request in this case.
Err((
true,
"control plane request failed with 502".to_string(),
status.to_string(),
))
}
// Another code, likely 500 or 404, means that compute is unknown to the control plane
// or some internal failure happened. Doesn't make much sense to retry in this case.
_ => Err((
false,
format!("unexpected control plane response status code: {status}"),
status.to_string(),
)),
}
}
/// Request config from the control-plane by compute_id. If
/// `NEON_CONTROL_PLANE_TOKEN` env variable is set, it will be used for
/// authorization.
pub fn get_config_from_control_plane(base_uri: &str, compute_id: &str) -> Result<ComputeConfig> {
let cp_uri = format!("{base_uri}/compute/api/v2/computes/{compute_id}/spec");
let jwt: String = std::env::var("NEON_CONTROL_PLANE_TOKEN").unwrap_or_default();
let mut attempt = 1;
info!("getting config from control plane: {}", cp_uri);
// Do 3 attempts to get spec from the control plane using the following logic:
// - network error -> then retry
// - compute id is unknown or any other error -> bail out
// - no spec for compute yet (Empty state) -> return Ok(None)
// - got config -> return Ok(Some(config))
while attempt < 4 {
let result = match do_control_plane_request(&cp_uri, &jwt) {
Ok(config_resp) => {
CPLANE_REQUESTS_TOTAL
.with_label_values(&[
CPlaneRequestRPC::GetConfig.as_str(),
&StatusCode::OK.to_string(),
])
.inc();
match config_resp.status {
ControlPlaneComputeStatus::Empty => Ok(config_resp.into()),
ControlPlaneComputeStatus::Attached => {
if config_resp.spec.is_some() {
Ok(config_resp.into())
} else {
bail!("compute is attached, but spec is empty")
}
}
}
}
Err((retry, msg, status)) => {
CPLANE_REQUESTS_TOTAL
.with_label_values(&[CPlaneRequestRPC::GetConfig.as_str(), &status])
.inc();
if retry {
Err(anyhow!(msg))
} else {
bail!(msg);
}
}
};
if let Err(e) = &result {
error!("attempt {} to get config failed with: {}", attempt, e);
} else {
return result;
}
attempt += 1;
std::thread::sleep(std::time::Duration::from_millis(100));
}
// All attempts failed, return error.
Err(anyhow::anyhow!(
"Exhausted all attempts to retrieve the config from the control plane"
))
}
/// Check `pg_hba.conf` and update if needed to allow external connections.
pub fn update_pg_hba(pgdata_path: &Path, databricks_pg_hba: Option<&String>) -> Result<()> {
// XXX: consider making it a part of config.json
let pghba_path = pgdata_path.join("pg_hba.conf");
// Update pg_hba to contains databricks specfic settings before adding neon settings
// PG uses the first record that matches to perform authentication, so we need to have
// our rules before the default ones from neon.
// See https://www.postgresql.org/docs/current/auth-pg-hba-conf.html
if let Some(databricks_pg_hba) = databricks_pg_hba {
if config::line_in_file(
&pghba_path,
&format!("include_if_exists {}\n", *databricks_pg_hba),
)? {
info!("updated pg_hba.conf to include databricks_pg_hba.conf");
} else {
info!("pg_hba.conf already included databricks_pg_hba.conf");
}
}
if config::line_in_file(&pghba_path, PG_HBA_ALL_MD5)? {
info!("updated pg_hba.conf to allow external connections");
} else {
info!("pg_hba.conf is up-to-date");
}
Ok(())
}
/// Check `pg_ident.conf` and update if needed to allow databricks config.
pub fn update_pg_ident(pgdata_path: &Path, databricks_pg_ident: Option<&String>) -> Result<()> {
info!("checking pg_ident.conf");
let pghba_path = pgdata_path.join("pg_ident.conf");
// Update pg_ident to contains databricks specfic settings
if let Some(databricks_pg_ident) = databricks_pg_ident {
if config::line_in_file(
&pghba_path,
&format!("include_if_exists {}\n", *databricks_pg_ident),
)? {
info!("updated pg_ident.conf to include databricks_pg_ident.conf");
} else {
info!("pg_ident.conf already included databricks_pg_ident.conf");
}
}
Ok(())
}
/// Copy tls key_file and cert_file from k8s secret mount directory
/// to pgdata and set private key file permissions as expected by Postgres.
/// See this doc for expected permission <https://www.postgresql.org/docs/current/ssl-tcp.html>
/// K8s secrets mount on dblet does not honor permission and ownership
/// specified in the Volume or VolumeMount. So we need to explicitly copy the file and set the permissions.
pub fn copy_tls_certificates(
key_file: &String,
cert_file: &String,
pgdata_path: &Path,
) -> Result<()> {
let files = [cert_file, key_file];
for file in files.iter() {
let source = Path::new(file);
let dest = pgdata_path.join(source.file_name().unwrap());
if !dest.exists() {
std::fs::copy(source, &dest)?;
info!(
"Copying tls file: {} to {}",
&source.display(),
&dest.display()
);
}
if *file == key_file {
// Postgres requires private key to be readable only by the owner by having
// chmod 600 permissions.
let permissions = Permissions::from_mode(0o600);
fs::set_permissions(&dest, permissions)?;
info!("Setting permission on {}.", &dest.display());
}
}
Ok(())
}
/// Create a standby.signal file
pub fn add_standby_signal(pgdata_path: &Path) -> Result<()> {
// XXX: consider making it a part of config.json
let signalfile = pgdata_path.join("standby.signal");
if !signalfile.exists() {
File::create(signalfile)?;
info!("created standby.signal");
} else {
info!("reused pre-existing standby.signal");
}
Ok(())
}
#[instrument(skip_all)]
pub async fn handle_neon_extension_upgrade(client: &mut Client) -> Result<()> {
let query = "ALTER EXTENSION neon UPDATE";
info!("update neon extension version with query: {}", query);
client.simple_query(query).await?;
Ok(())
}
#[instrument(skip_all)]
pub async fn handle_migrations(
params: ComputeNodeParams,
client: &mut Client,
lakebase_mode: bool,
) -> Result<()> {
info!("handle migrations");
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// !BE SURE TO ONLY ADD MIGRATIONS TO THE END OF THIS ARRAY. IF YOU DO NOT, VERY VERY BAD THINGS MAY HAPPEN!
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// Add new migrations in numerical order.
let migrations = [
&format!(
include_str!("./migrations/0001-add_bypass_rls_to_privileged_role.sql"),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!("./migrations/0002-alter_roles.sql"),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!("./migrations/0003-grant_pg_create_subscription_to_privileged_role.sql"),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!("./migrations/0004-grant_pg_monitor_to_privileged_role.sql"),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!("./migrations/0005-grant_all_on_tables_to_privileged_role.sql"),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!("./migrations/0006-grant_all_on_sequences_to_privileged_role.sql"),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!(
"./migrations/0007-grant_all_on_tables_with_grant_option_to_privileged_role.sql"
),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!(
"./migrations/0008-grant_all_on_sequences_with_grant_option_to_privileged_role.sql"
),
privileged_role_name = params.privileged_role_name
),
include_str!("./migrations/0009-revoke_replication_for_previously_allowed_roles.sql"),
&format!(
include_str!(
"./migrations/0010-grant_snapshot_synchronization_funcs_to_privileged_role.sql"
),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!(
"./migrations/0011-grant_pg_show_replication_origin_status_to_privileged_role.sql"
),
privileged_role_name = params.privileged_role_name
),
&format!(
include_str!("./migrations/0012-grant_pg_signal_backend_to_privileged_role.sql"),
privileged_role_name = params.privileged_role_name
),
];
MigrationRunner::new(client, &migrations, lakebase_mode)
.run_migrations()
.await?;
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
neondatabase/neon | https://github.com/neondatabase/neon/blob/015b1c7cb3259a6fcd5039bc2bd46a462e163ae8/compute_tools/src/bin/fast_import.rs | compute_tools/src/bin/fast_import.rs | //! This program dumps a remote Postgres database into a local Postgres database
//! and uploads the resulting PGDATA into object storage for import into a Timeline.
//!
//! # Context, Architecture, Design
//!
//! See cloud.git Fast Imports RFC (<https://github.com/neondatabase/cloud/pull/19799>)
//! for the full picture.
//! The RFC describing the storage pieces of importing the PGDATA dump into a Timeline
//! is publicly accessible at <https://github.com/neondatabase/neon/pull/9538>.
//!
//! # This is a Prototype!
//!
//! This program is part of a prototype feature and not yet used in production.
//!
//! The cloud.git RFC contains lots of suggestions for improving e2e throughput
//! of this step of the timeline import process.
//!
//! # Local Testing
//!
//! - Comment out most of the pgxns in compute-node.Dockerfile to speed up the build.
//! - Build the image with the following command:
//!
//! ```bash
//! docker buildx build --platform linux/amd64 --build-arg DEBIAN_VERSION=bullseye --build-arg GIT_VERSION=local --build-arg PG_VERSION=v14 --build-arg BUILD_TAG="$(date --iso-8601=s -u)" -t localhost:3030/localregistry/compute-node-v14:latest -f compute/compute-node.Dockerfile .
//! docker push localhost:3030/localregistry/compute-node-v14:latest
//! ```
use anyhow::{Context, bail};
use aws_config::BehaviorVersion;
use camino::{Utf8Path, Utf8PathBuf};
use clap::{Parser, Subcommand};
use compute_tools::extension_server::get_pg_version;
use nix::unistd::Pid;
use std::ops::Not;
use tracing::{Instrument, error, info, info_span, warn};
use utils::fs_ext::is_directory_empty;
#[path = "fast_import/aws_s3_sync.rs"]
mod aws_s3_sync;
#[path = "fast_import/child_stdio_to_log.rs"]
mod child_stdio_to_log;
#[path = "fast_import/s3_uri.rs"]
mod s3_uri;
const PG_WAIT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(600);
const PG_WAIT_RETRY_INTERVAL: std::time::Duration = std::time::Duration::from_millis(300);
#[derive(Subcommand, Debug, Clone, serde::Serialize)]
enum Command {
/// Runs local postgres (neon binary), restores into it,
/// uploads pgdata to s3 to be consumed by pageservers
Pgdata {
/// Raw connection string to the source database. Used only in tests,
/// real scenario uses encrypted connection string in spec.json from s3.
#[clap(long)]
source_connection_string: Option<String>,
/// If specified, will not shut down the local postgres after the import. Used in local testing
#[clap(short, long)]
interactive: bool,
/// Port to run postgres on. Default is 5432.
#[clap(long, default_value_t = 5432)]
pg_port: u16, // port to run postgres on, 5432 is default
/// Number of CPUs in the system. This is used to configure # of
/// parallel worker processes, for index creation.
#[clap(long, env = "NEON_IMPORTER_NUM_CPUS")]
num_cpus: Option<usize>,
/// Amount of RAM in the system. This is used to configure shared_buffers
/// and maintenance_work_mem.
#[clap(long, env = "NEON_IMPORTER_MEMORY_MB")]
memory_mb: Option<usize>,
},
/// Runs pg_dump-pg_restore from source to destination without running local postgres.
DumpRestore {
/// Raw connection string to the source database. Used only in tests,
/// real scenario uses encrypted connection string in spec.json from s3.
#[clap(long)]
source_connection_string: Option<String>,
/// Raw connection string to the destination database. Used only in tests,
/// real scenario uses encrypted connection string in spec.json from s3.
#[clap(long)]
destination_connection_string: Option<String>,
},
}
impl Command {
fn as_str(&self) -> &'static str {
match self {
Command::Pgdata { .. } => "pgdata",
Command::DumpRestore { .. } => "dump-restore",
}
}
}
#[derive(clap::Parser)]
struct Args {
#[clap(long, env = "NEON_IMPORTER_WORKDIR")]
working_directory: Utf8PathBuf,
#[clap(long, env = "NEON_IMPORTER_S3_PREFIX")]
s3_prefix: Option<s3_uri::S3Uri>,
#[clap(long, env = "NEON_IMPORTER_PG_BIN_DIR")]
pg_bin_dir: Utf8PathBuf,
#[clap(long, env = "NEON_IMPORTER_PG_LIB_DIR")]
pg_lib_dir: Utf8PathBuf,
#[clap(subcommand)]
command: Command,
}
#[serde_with::serde_as]
#[derive(serde::Deserialize)]
struct Spec {
encryption_secret: EncryptionSecret,
#[serde_as(as = "serde_with::base64::Base64")]
source_connstring_ciphertext_base64: Vec<u8>,
#[serde_as(as = "Option<serde_with::base64::Base64>")]
destination_connstring_ciphertext_base64: Option<Vec<u8>>,
}
#[derive(serde::Deserialize)]
enum EncryptionSecret {
#[allow(clippy::upper_case_acronyms)]
KMS { key_id: String },
}
// copied from pageserver_api::config::defaults::DEFAULT_LOCALE to avoid dependency just for a constant
const DEFAULT_LOCALE: &str = if cfg!(target_os = "macos") {
"C"
} else {
"C.UTF-8"
};
async fn decode_connstring(
kms_client: &aws_sdk_kms::Client,
key_id: &String,
connstring_ciphertext_base64: Vec<u8>,
) -> Result<String, anyhow::Error> {
let mut output = kms_client
.decrypt()
.key_id(key_id)
.ciphertext_blob(aws_sdk_s3::primitives::Blob::new(
connstring_ciphertext_base64,
))
.send()
.await
.context("decrypt connection string")?;
let plaintext = output
.plaintext
.take()
.context("get plaintext connection string")?;
String::from_utf8(plaintext.into_inner()).context("parse connection string as utf8")
}
struct PostgresProcess {
pgdata_dir: Utf8PathBuf,
pg_bin_dir: Utf8PathBuf,
pgbin: Utf8PathBuf,
pg_lib_dir: Utf8PathBuf,
postgres_proc: Option<tokio::process::Child>,
}
impl PostgresProcess {
fn new(pgdata_dir: Utf8PathBuf, pg_bin_dir: Utf8PathBuf, pg_lib_dir: Utf8PathBuf) -> Self {
Self {
pgdata_dir,
pgbin: pg_bin_dir.join("postgres"),
pg_bin_dir,
pg_lib_dir,
postgres_proc: None,
}
}
async fn prepare(&self, initdb_user: &str) -> Result<(), anyhow::Error> {
tokio::fs::create_dir(&self.pgdata_dir)
.await
.context("create pgdata directory")?;
let pg_version = get_pg_version(self.pgbin.as_ref());
postgres_initdb::do_run_initdb(postgres_initdb::RunInitdbArgs {
superuser: initdb_user,
locale: DEFAULT_LOCALE, // XXX: this shouldn't be hard-coded,
pg_version,
initdb_bin: self.pg_bin_dir.join("initdb").as_ref(),
library_search_path: &self.pg_lib_dir, // TODO: is this right? Prob works in compute image, not sure about neon_local.
pgdata: &self.pgdata_dir,
})
.await
.context("initdb")
}
async fn start(
&mut self,
initdb_user: &str,
port: u16,
nproc: usize,
memory_mb: usize,
) -> Result<&tokio::process::Child, anyhow::Error> {
self.prepare(initdb_user).await?;
// Somewhat arbitrarily, use 10 % of memory for shared buffer cache, 70% for
// maintenance_work_mem (i.e. for sorting during index creation), and leave the rest
// available for misc other stuff that PostgreSQL uses memory for.
let shared_buffers_mb = ((memory_mb as f32) * 0.10) as usize;
let maintenance_work_mem_mb = ((memory_mb as f32) * 0.70) as usize;
//
// Launch postgres process
//
let mut proc = tokio::process::Command::new(&self.pgbin)
.arg("-D")
.arg(&self.pgdata_dir)
.args(["-p", &format!("{port}")])
.args(["-c", "wal_level=minimal"])
.args(["-c", &format!("shared_buffers={shared_buffers_mb}MB")])
.args(["-c", "max_wal_senders=0"])
.args(["-c", "fsync=off"])
.args(["-c", "full_page_writes=off"])
.args(["-c", "synchronous_commit=off"])
.args([
"-c",
&format!("maintenance_work_mem={maintenance_work_mem_mb}MB"),
])
.args(["-c", &format!("max_parallel_maintenance_workers={nproc}")])
.args(["-c", &format!("max_parallel_workers={nproc}")])
.args(["-c", &format!("max_parallel_workers_per_gather={nproc}")])
.args(["-c", &format!("max_worker_processes={nproc}")])
.args(["-c", "effective_io_concurrency=100"])
.env_clear()
.env("LD_LIBRARY_PATH", &self.pg_lib_dir)
.env(
"ASAN_OPTIONS",
std::env::var("ASAN_OPTIONS").unwrap_or_default(),
)
.env(
"UBSAN_OPTIONS",
std::env::var("UBSAN_OPTIONS").unwrap_or_default(),
)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.context("spawn postgres")?;
info!("spawned postgres, waiting for it to become ready");
tokio::spawn(
child_stdio_to_log::relay_process_output(proc.stdout.take(), proc.stderr.take())
.instrument(info_span!("postgres")),
);
self.postgres_proc = Some(proc);
Ok(self.postgres_proc.as_ref().unwrap())
}
async fn shutdown(&mut self) -> Result<(), anyhow::Error> {
let proc: &mut tokio::process::Child = self.postgres_proc.as_mut().unwrap();
info!("shutdown postgres");
nix::sys::signal::kill(
Pid::from_raw(i32::try_from(proc.id().unwrap()).expect("convert child pid to i32")),
nix::sys::signal::SIGTERM,
)
.context("signal postgres to shut down")?;
proc.wait()
.await
.context("wait for postgres to shut down")
.map(|_| ())
}
}
async fn wait_until_ready(connstring: String, create_dbname: String) {
// Create neondb database in the running postgres
let start_time = std::time::Instant::now();
loop {
if start_time.elapsed() > PG_WAIT_TIMEOUT {
error!(
"timeout exceeded: failed to poll postgres and create database within 10 minutes"
);
std::process::exit(1);
}
match tokio_postgres::connect(
&connstring.replace("dbname=neondb", "dbname=postgres"),
tokio_postgres::NoTls,
)
.await
{
Ok((client, connection)) => {
// Spawn the connection handling task to maintain the connection
tokio::spawn(async move {
if let Err(e) = connection.await {
warn!("connection error: {}", e);
}
});
match client
.simple_query(format!("CREATE DATABASE {create_dbname};").as_str())
.await
{
Ok(_) => {
info!("created {} database", create_dbname);
break;
}
Err(e) => {
warn!(
"failed to create database: {}, retying in {}s",
e,
PG_WAIT_RETRY_INTERVAL.as_secs_f32()
);
tokio::time::sleep(PG_WAIT_RETRY_INTERVAL).await;
continue;
}
}
}
Err(_) => {
info!(
"postgres not ready yet, retrying in {}s",
PG_WAIT_RETRY_INTERVAL.as_secs_f32()
);
tokio::time::sleep(PG_WAIT_RETRY_INTERVAL).await;
continue;
}
}
}
}
async fn run_dump_restore(
workdir: Utf8PathBuf,
pg_bin_dir: Utf8PathBuf,
pg_lib_dir: Utf8PathBuf,
source_connstring: String,
destination_connstring: String,
) -> Result<(), anyhow::Error> {
let dumpdir = workdir.join("dumpdir");
let num_jobs = num_cpus::get().to_string();
info!("using {num_jobs} jobs for dump/restore");
let common_args = [
// schema mapping (prob suffices to specify them on one side)
"--no-owner".to_string(),
"--no-privileges".to_string(),
"--no-publications".to_string(),
"--no-security-labels".to_string(),
"--no-subscriptions".to_string(),
"--no-tablespaces".to_string(),
"--no-event-triggers".to_string(),
// format
"--format".to_string(),
"directory".to_string(),
// concurrency
"--jobs".to_string(),
num_jobs,
// progress updates
"--verbose".to_string(),
];
info!("dump into the working directory");
{
let mut pg_dump = tokio::process::Command::new(pg_bin_dir.join("pg_dump"))
.args(&common_args)
.arg("-f")
.arg(&dumpdir)
.arg("--no-sync")
// POSITIONAL args
// source db (db name included in connection string)
.arg(&source_connstring)
// how we run it
.env_clear()
.env("LD_LIBRARY_PATH", &pg_lib_dir)
.env(
"ASAN_OPTIONS",
std::env::var("ASAN_OPTIONS").unwrap_or_default(),
)
.env(
"UBSAN_OPTIONS",
std::env::var("UBSAN_OPTIONS").unwrap_or_default(),
)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.context("spawn pg_dump")?;
info!(pid=%pg_dump.id().unwrap(), "spawned pg_dump");
tokio::spawn(
child_stdio_to_log::relay_process_output(pg_dump.stdout.take(), pg_dump.stderr.take())
.instrument(info_span!("pg_dump")),
);
let st = pg_dump.wait().await.context("wait for pg_dump")?;
info!(status=?st, "pg_dump exited");
if !st.success() {
error!(status=%st, "pg_dump failed, restore will likely fail as well");
bail!("pg_dump failed");
}
}
// TODO: maybe do it in a streaming way, plenty of internal research done on this already
// TODO: do the unlogged table trick
{
let mut pg_restore = tokio::process::Command::new(pg_bin_dir.join("pg_restore"))
.args(&common_args)
.arg("-d")
.arg(&destination_connstring)
// POSITIONAL args
.arg(&dumpdir)
// how we run it
.env_clear()
.env("LD_LIBRARY_PATH", &pg_lib_dir)
.env(
"ASAN_OPTIONS",
std::env::var("ASAN_OPTIONS").unwrap_or_default(),
)
.env(
"UBSAN_OPTIONS",
std::env::var("UBSAN_OPTIONS").unwrap_or_default(),
)
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()
.context("spawn pg_restore")?;
info!(pid=%pg_restore.id().unwrap(), "spawned pg_restore");
tokio::spawn(
child_stdio_to_log::relay_process_output(
pg_restore.stdout.take(),
pg_restore.stderr.take(),
)
.instrument(info_span!("pg_restore")),
);
let st = pg_restore.wait().await.context("wait for pg_restore")?;
info!(status=?st, "pg_restore exited");
if !st.success() {
error!(status=%st, "pg_restore failed, restore will likely fail as well");
bail!("pg_restore failed");
}
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn cmd_pgdata(
s3_client: Option<&aws_sdk_s3::Client>,
kms_client: Option<aws_sdk_kms::Client>,
maybe_s3_prefix: Option<s3_uri::S3Uri>,
maybe_spec: Option<Spec>,
source_connection_string: Option<String>,
interactive: bool,
pg_port: u16,
workdir: Utf8PathBuf,
pg_bin_dir: Utf8PathBuf,
pg_lib_dir: Utf8PathBuf,
num_cpus: Option<usize>,
memory_mb: Option<usize>,
) -> Result<(), anyhow::Error> {
if maybe_spec.is_none() && source_connection_string.is_none() {
bail!("spec must be provided for pgdata command");
}
if maybe_spec.is_some() && source_connection_string.is_some() {
bail!("only one of spec or source_connection_string can be provided");
}
let source_connection_string = if let Some(spec) = maybe_spec {
match spec.encryption_secret {
EncryptionSecret::KMS { key_id } => {
decode_connstring(
kms_client.as_ref().unwrap(),
&key_id,
spec.source_connstring_ciphertext_base64,
)
.await?
}
}
} else {
source_connection_string.unwrap()
};
let superuser = "cloud_admin";
let destination_connstring =
format!("host=localhost port={pg_port} user={superuser} dbname=neondb");
let pgdata_dir = workdir.join("pgdata");
let mut proc = PostgresProcess::new(pgdata_dir.clone(), pg_bin_dir.clone(), pg_lib_dir.clone());
let nproc = num_cpus.unwrap_or_else(num_cpus::get);
let memory_mb = memory_mb.unwrap_or(256);
proc.start(superuser, pg_port, nproc, memory_mb).await?;
wait_until_ready(destination_connstring.clone(), "neondb".to_string()).await;
run_dump_restore(
workdir.clone(),
pg_bin_dir,
pg_lib_dir,
source_connection_string,
destination_connstring,
)
.await?;
// If interactive mode, wait for Ctrl+C
if interactive {
info!("Running in interactive mode. Press Ctrl+C to shut down.");
tokio::signal::ctrl_c().await.context("wait for ctrl-c")?;
}
proc.shutdown().await?;
// Only sync if s3_prefix was specified
if let Some(s3_prefix) = maybe_s3_prefix {
info!("upload pgdata");
aws_s3_sync::upload_dir_recursive(
s3_client.unwrap(),
Utf8Path::new(&pgdata_dir),
&s3_prefix.append("/pgdata/"),
)
.await
.context("sync dump directory to destination")?;
info!("write pgdata status to s3");
{
let status_dir = workdir.join("status");
std::fs::create_dir(&status_dir).context("create status directory")?;
let status_file = status_dir.join("pgdata");
std::fs::write(&status_file, serde_json::json!({"done": true}).to_string())
.context("write status file")?;
aws_s3_sync::upload_dir_recursive(
s3_client.as_ref().unwrap(),
&status_dir,
&s3_prefix.append("/status/"),
)
.await
.context("sync status directory to destination")?;
}
}
Ok(())
}
async fn cmd_dumprestore(
kms_client: Option<aws_sdk_kms::Client>,
maybe_spec: Option<Spec>,
source_connection_string: Option<String>,
destination_connection_string: Option<String>,
workdir: Utf8PathBuf,
pg_bin_dir: Utf8PathBuf,
pg_lib_dir: Utf8PathBuf,
) -> Result<(), anyhow::Error> {
let (source_connstring, destination_connstring) = if let Some(spec) = maybe_spec {
match spec.encryption_secret {
EncryptionSecret::KMS { key_id } => {
let source = decode_connstring(
kms_client.as_ref().unwrap(),
&key_id,
spec.source_connstring_ciphertext_base64,
)
.await
.context("decrypt source connection string")?;
let dest = if let Some(dest_ciphertext) =
spec.destination_connstring_ciphertext_base64
{
decode_connstring(kms_client.as_ref().unwrap(), &key_id, dest_ciphertext)
.await
.context("decrypt destination connection string")?
} else {
bail!(
"destination connection string must be provided in spec for dump_restore command"
);
};
(source, dest)
}
}
} else {
(
source_connection_string.unwrap(),
if let Some(val) = destination_connection_string {
val
} else {
bail!("destination connection string must be provided for dump_restore command");
},
)
};
run_dump_restore(
workdir,
pg_bin_dir,
pg_lib_dir,
source_connstring,
destination_connstring,
)
.await
}
#[tokio::main]
pub(crate) async fn main() -> anyhow::Result<()> {
utils::logging::init(
utils::logging::LogFormat::Json,
utils::logging::TracingErrorLayerEnablement::EnableWithRustLogFilter,
utils::logging::Output::Stdout,
)?;
info!("starting");
let args = Args::parse();
// Initialize AWS clients only if s3_prefix is specified
let (s3_client, kms_client) = if args.s3_prefix.is_some() {
// Create AWS config with enhanced retry settings
let config = aws_config::defaults(BehaviorVersion::v2024_03_28())
.retry_config(
aws_config::retry::RetryConfig::standard()
.with_max_attempts(5) // Retry up to 5 times
.with_initial_backoff(std::time::Duration::from_millis(200)) // Start with 200ms delay
.with_max_backoff(std::time::Duration::from_secs(5)), // Cap at 5 seconds
)
.load()
.await;
// Create clients from the config with enhanced retry settings
let s3_client = aws_sdk_s3::Client::new(&config);
let kms = aws_sdk_kms::Client::new(&config);
(Some(s3_client), Some(kms))
} else {
(None, None)
};
// Capture everything from spec assignment onwards to handle errors
let res = async {
let spec: Option<Spec> = if let Some(s3_prefix) = &args.s3_prefix {
let spec_key = s3_prefix.append("/spec.json");
let object = s3_client
.as_ref()
.unwrap()
.get_object()
.bucket(&spec_key.bucket)
.key(spec_key.key)
.send()
.await
.context("get spec from s3")?
.body
.collect()
.await
.context("download spec body")?;
serde_json::from_slice(&object.into_bytes()).context("parse spec as json")?
} else {
None
};
match tokio::fs::create_dir(&args.working_directory).await {
Ok(()) => {}
Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {
if !is_directory_empty(&args.working_directory)
.await
.context("check if working directory is empty")?
{
bail!("working directory is not empty");
} else {
// ok
}
}
Err(e) => return Err(anyhow::Error::new(e).context("create working directory")),
}
match args.command.clone() {
Command::Pgdata {
source_connection_string,
interactive,
pg_port,
num_cpus,
memory_mb,
} => {
cmd_pgdata(
s3_client.as_ref(),
kms_client,
args.s3_prefix.clone(),
spec,
source_connection_string,
interactive,
pg_port,
args.working_directory.clone(),
args.pg_bin_dir,
args.pg_lib_dir,
num_cpus,
memory_mb,
)
.await
}
Command::DumpRestore {
source_connection_string,
destination_connection_string,
} => {
cmd_dumprestore(
kms_client,
spec,
source_connection_string,
destination_connection_string,
args.working_directory.clone(),
args.pg_bin_dir,
args.pg_lib_dir,
)
.await
}
}
}
.await;
if let Some(s3_prefix) = args.s3_prefix {
info!("write job status to s3");
{
let status_dir = args.working_directory.join("status");
if std::fs::exists(&status_dir)?.not() {
std::fs::create_dir(&status_dir).context("create status directory")?;
}
let status_file = status_dir.join("fast_import");
let res_obj = match res {
Ok(_) => serde_json::json!({"command": args.command.as_str(), "done": true}),
Err(err) => {
serde_json::json!({"command": args.command.as_str(), "done": false, "error": err.to_string()})
}
};
std::fs::write(&status_file, res_obj.to_string()).context("write status file")?;
aws_s3_sync::upload_dir_recursive(
s3_client.as_ref().unwrap(),
&status_dir,
&s3_prefix.append("/status/"),
)
.await
.context("sync status directory to destination")?;
}
}
Ok(())
}
| rust | Apache-2.0 | 015b1c7cb3259a6fcd5039bc2bd46a462e163ae8 | 2026-01-04T15:40:24.223849Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.