repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/tar_x/src/tar_x_apply_fns.rs | items/tar_x/src/tar_x_apply_fns.rs | use std::marker::PhantomData;
use peace::cfg::{ApplyCheck, FnCtx};
#[cfg(feature = "output_progress")]
use peace::progress_model::ProgressLimit;
use crate::{FileMetadatas, TarXData, TarXError, TarXParams, TarXStateDiff};
/// ApplyFns for the tar to extract.
pub struct TarXApplyFns<Id>(PhantomData<Id>);
impl<Id> TarXApplyFns<Id>
where
Id: Send + Sync + 'static,
{
// Not sure why we can't use this:
//
// #[cfg(not(feature = "output_progress"))] _state_goal: &FileMetadatas,
// #[cfg(feature = "output_progress")] state_goal: &FileMetadatas,
//
// There's an error saying lifetime bounds don't match the trait definition.
//
// Likely an issue with the codegen in `async-trait`.
#[allow(unused_variables)]
pub async fn apply_check(
_params: &TarXParams<Id>,
_data: TarXData<'_, Id>,
_state_current: &FileMetadatas,
state_goal: &FileMetadatas,
diff: &TarXStateDiff,
) -> Result<ApplyCheck, TarXError> {
let apply_check = match diff {
TarXStateDiff::ExtractionInSync => ApplyCheck::ExecNotRequired,
TarXStateDiff::ExtractionOutOfSync {
added: _,
modified: _,
removed: _,
} => {
#[cfg(not(feature = "output_progress"))]
{
ApplyCheck::ExecRequired
}
#[cfg(feature = "output_progress")]
{
let progress_limit = state_goal
.len()
.try_into()
.map(ProgressLimit::Steps)
.unwrap_or(ProgressLimit::Unknown);
ApplyCheck::ExecRequired { progress_limit }
}
}
};
Ok(apply_check)
}
pub async fn apply_dry(
_fn_ctx: FnCtx<'_>,
_params: &TarXParams<Id>,
_data: TarXData<'_, Id>,
_state_current: &FileMetadatas,
state_goal: &FileMetadatas,
_diff: &TarXStateDiff,
) -> Result<FileMetadatas, TarXError> {
Ok(state_goal.clone())
}
#[cfg(not(target_arch = "wasm32"))]
pub async fn apply(
_fn_ctx: FnCtx<'_>,
params: &TarXParams<Id>,
data: TarXData<'_, Id>,
_state_current: &FileMetadatas,
state_goal: &FileMetadatas,
diff: &TarXStateDiff,
) -> Result<FileMetadatas, TarXError> {
use futures::stream::{StreamExt, TryStreamExt};
let storage = data.storage();
let tar_path = params.tar_path();
let dest = params.dest();
tokio::fs::create_dir_all(dest).await.map_err(
#[cfg_attr(coverage_nightly, coverage(off))]
|error| TarXError::TarDestDirCreate {
dest: dest.to_path_buf(),
error,
},
)?;
// TODO: Optimize by unpacking only the entries that changed.
// Probably store entries in `IndexMap`s, then look them up to determine if they
// need to be unpacked.
//
// Then we can send proper progress updates via `fn_ctx.progress_tx`.
if tar_path.exists() {
storage
.read_with_sync_api(
"TarXApplyFns::exec".to_string(),
tar_path,
|sync_io_bridge| {
tar::Archive::new(sync_io_bridge).unpack(dest).map_err(
#[cfg_attr(coverage_nightly, coverage(off))]
|error| TarXError::TarUnpack {
tar_path: tar_path.to_path_buf(),
dest: dest.to_path_buf(),
error,
},
)?;
Result::<_, TarXError>::Ok(())
},
)
.await?;
}
if let TarXStateDiff::ExtractionOutOfSync {
added: _,
modified: _,
removed,
} = diff
{
// Remove files that are not in the tar, but are in the destination directory.
futures::stream::iter(removed.iter())
.map(|file_metadata| Result::<_, TarXError>::Ok(file_metadata.path()))
.try_for_each_concurrent(None, |entry_path| async move {
tokio::fs::remove_file(&dest.join(entry_path))
.await
.map_err(
#[cfg_attr(coverage_nightly, coverage(off))]
|error| TarXError::TarDestFileRemove {
dest: dest.to_path_buf(),
entry_path: entry_path.to_path_buf(),
error,
},
)
})
.await?;
}
Ok(state_goal.clone())
}
#[cfg(target_arch = "wasm32")]
pub async fn apply(
_fn_ctx: FnCtx<'_>,
_params: &TarXParams<Id>,
_data: TarXData<'_, Id>,
_state_current: &FileMetadatas,
_state_goal: &FileMetadatas,
_diff: &TarXStateDiff,
) -> Result<FileMetadatas, TarXError> {
todo!()
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/tar_x/src/tar_x_state_goal_fn.rs | items/tar_x/src/tar_x_state_goal_fn.rs | use std::{io::Read, marker::PhantomData, path::Path};
use peace::{cfg::FnCtx, params::Params, rt_model::Storage};
use tar::Archive;
use crate::{FileMetadata, FileMetadatas, TarXData, TarXError, TarXParams};
/// Reads the goal state of the tar to extract.
#[derive(Debug)]
pub struct TarXStateGoalFn<Id>(PhantomData<Id>);
impl<Id> TarXStateGoalFn<Id>
where
Id: Send + Sync,
{
pub async fn try_state_goal(
_fn_ctx: FnCtx<'_>,
params_partial: &<TarXParams<Id> as Params>::Partial,
data: TarXData<'_, Id>,
) -> Result<Option<FileMetadatas>, TarXError> {
let storage = data.storage();
if let Some(tar_path) = params_partial.tar_path() {
#[cfg(not(target_arch = "wasm32"))]
let tar_file_exists = tar_path.exists();
#[cfg(target_arch = "wasm32")]
let tar_file_exists = storage.contains_item(tar_path)?;
if tar_file_exists {
#[cfg(not(target_arch = "wasm32"))]
let files_in_tar = Self::files_in_tar(storage, tar_path).await?;
#[cfg(target_arch = "wasm32")]
let files_in_tar = Self::files_in_tar(storage, tar_path)?;
Ok(Some(FileMetadatas::from(files_in_tar)))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
pub async fn state_goal(
_fn_ctx: FnCtx<'_>,
params: &TarXParams<Id>,
data: TarXData<'_, Id>,
) -> Result<FileMetadatas, TarXError> {
let storage = data.storage();
let tar_path = params.tar_path();
#[cfg(not(target_arch = "wasm32"))]
let tar_file_exists = params.tar_path().exists();
#[cfg(target_arch = "wasm32")]
let tar_file_exists = storage.contains_item(tar_path)?;
if tar_file_exists {
#[cfg(not(target_arch = "wasm32"))]
let files_in_tar = Self::files_in_tar(storage, tar_path).await?;
#[cfg(target_arch = "wasm32")]
let files_in_tar = Self::files_in_tar(storage, tar_path)?;
Ok(FileMetadatas::from(files_in_tar))
} else {
let tar_path = tar_path.to_path_buf();
Err(TarXError::TarFileNotExists { tar_path })
}
}
#[cfg(not(target_arch = "wasm32"))]
pub async fn files_in_tar(
storage: &Storage,
tar_path: &Path,
) -> Result<Vec<FileMetadata>, TarXError> {
let file_metadatas = storage
.read_with_sync_api(
"TarXStateGoalFn::files_in_tar".to_string(),
tar_path,
|sync_io_bridge| Self::tar_file_metadata(tar_path, Archive::new(sync_io_bridge)),
)
.await?;
Ok(file_metadatas)
}
#[cfg(target_arch = "wasm32")]
pub fn files_in_tar(
storage: &Storage,
tar_path: &Path,
) -> Result<Vec<FileMetadata>, TarXError> {
use std::io::Cursor;
let bytes = storage.get_item_b64(tar_path)?;
Self::tar_file_metadata(tar_path, Archive::new(Cursor::new(bytes)))
}
fn tar_file_metadata<R>(
tar_path: &Path,
mut archive: Archive<R>,
) -> Result<Vec<FileMetadata>, TarXError>
where
R: Read,
{
archive
.entries()
.map_err(|error| {
let tar_path = tar_path.to_path_buf();
TarXError::TarEntryRead { tar_path, error }
})?
.try_fold(Vec::new(), |mut files_in_tar, entry| {
let entry = entry.map_err(|error| {
let tar_path = tar_path.to_path_buf();
TarXError::TarEntryRead { tar_path, error }
})?;
let entry_path = entry.path().map_err(|error| {
let tar_path = tar_path.to_path_buf();
TarXError::TarEntryPathRead { tar_path, error }
})?;
// Ignore directories in tracked `FileMetadata`s, because:
//
// * mtime of tar entries is the mtime it was created.
// * mtime of directories on the file system is always the time it is unpacked,
// even if the unpack is told to `preserve_mtime`.
if entry.header().entry_type().is_dir() {
return Ok(files_in_tar);
}
let modified_time = entry.header().mtime().map_err(|error| {
let tar_path = tar_path.to_path_buf();
let entry_path = entry_path.to_path_buf();
TarXError::TarEntryMTimeRead {
tar_path,
entry_path,
error,
}
})?;
let file_metadata = FileMetadata::new(entry_path.to_path_buf(), modified_time);
files_in_tar.push(file_metadata);
Ok(files_in_tar)
})
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/tar_x/src/tar_x_state_current_fn.rs | items/tar_x/src/tar_x_state_current_fn.rs | use std::{marker::PhantomData, path::Path};
use peace::{cfg::FnCtx, params::Params};
use crate::{FileMetadata, FileMetadatas, TarXData, TarXError, TarXParams};
/// Reads the current state of the tar to extract.
#[derive(Debug)]
pub struct TarXStateCurrentFn<Id>(PhantomData<Id>);
impl<Id> TarXStateCurrentFn<Id>
where
Id: Send + Sync,
{
pub async fn try_state_current(
fn_ctx: FnCtx<'_>,
params_partial: &<TarXParams<Id> as Params>::Partial,
data: TarXData<'_, Id>,
) -> Result<Option<FileMetadatas>, TarXError> {
#[cfg(not(target_arch = "wasm32"))]
let _data = data;
if let Some(dest) = params_partial.dest().as_ref() {
#[cfg(not(target_arch = "wasm32"))]
let files_extracted = Self::files_extracted(fn_ctx, dest).await?;
#[cfg(target_arch = "wasm32")]
let files_extracted = Self::files_extracted(fn_ctx, data.storage(), dest)?;
Ok(Some(FileMetadatas::from(files_extracted)))
} else {
Ok(None)
}
}
pub async fn state_current(
fn_ctx: FnCtx<'_>,
params: &TarXParams<Id>,
data: TarXData<'_, Id>,
) -> Result<FileMetadatas, TarXError> {
#[cfg(not(target_arch = "wasm32"))]
let _data = data;
let dest = params.dest();
#[cfg(not(target_arch = "wasm32"))]
let files_extracted = Self::files_extracted(fn_ctx, dest).await?;
#[cfg(target_arch = "wasm32")]
let files_extracted = Self::files_extracted(fn_ctx, data.storage(), dest)?;
Ok(FileMetadatas::from(files_extracted))
}
#[cfg(not(target_arch = "wasm32"))]
pub async fn files_extracted(
_fn_ctx: FnCtx<'_>,
dest: &Path,
) -> Result<Vec<FileMetadata>, TarXError> {
use std::time::UNIX_EPOCH;
use futures::stream::TryStreamExt;
use crate::native::{DestDirEntry, DirUnfold};
let dest_file_metadatas = if dest.exists() {
DirUnfold::unfold(dest)
.try_fold(
Vec::new(),
|mut dest_file_metadatas, dest_dir_entry| async move {
let DestDirEntry {
dest_dir_relative_path,
dir_entry,
} = dest_dir_entry;
let entry_path = dir_entry.path();
let metadata = dir_entry.metadata().await.map_err(|error| {
Self::dest_metadata_read_error(
dest.to_path_buf(),
entry_path.clone(),
error,
)
})?;
let mtime = metadata
.modified()
.map_err(|error| {
Self::dest_mtime_read_error(
dest.to_path_buf(),
entry_path.clone(),
error,
)
})
.and_then(|system_time| {
let mtime_secs = system_time
.duration_since(UNIX_EPOCH)
.map_err(|error| TarXError::TarDestFileMTimeSystemTimeRead {
dest: dest.to_path_buf(),
entry_path: entry_path.clone(),
error,
})?
.as_secs();
Ok(mtime_secs)
})?;
let file_metadata = FileMetadata::new(dest_dir_relative_path, mtime);
dest_file_metadatas.push(file_metadata);
Ok(dest_file_metadatas)
},
)
.await?
} else {
Vec::new()
};
Ok(dest_file_metadatas)
}
#[cfg(not(target_arch = "wasm32"))]
fn dest_metadata_read_error(
dest: std::path::PathBuf,
entry_path: std::path::PathBuf,
error: std::io::Error,
) -> TarXError {
TarXError::TarDestFileMetadataRead {
dest,
entry_path,
error,
}
}
#[cfg(not(target_arch = "wasm32"))]
fn dest_mtime_read_error(
dest: std::path::PathBuf,
entry_path: std::path::PathBuf,
error: std::io::Error,
) -> TarXError {
TarXError::TarDestFileMTimeRead {
dest,
entry_path,
error,
}
}
#[cfg(target_arch = "wasm32")]
fn files_extracted(
_fn_ctx: FnCtx<'_>,
_storage: &peace::rt_model::Storage,
_dest: &Path,
) -> Result<Vec<FileMetadata>, TarXError> {
todo!()
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/tar_x/src/native/dest_dir_entry.rs | items/tar_x/src/native/dest_dir_entry.rs | use std::path::PathBuf;
use tokio::fs::DirEntry;
/// Intermediary type while calculating `FileMetadata` for native targets.
#[derive(Debug)]
pub(crate) struct DestDirEntry {
/// Path relative to the extraction directory.
pub(crate) dest_dir_relative_path: PathBuf,
/// `DirEntry` from `tokio`.
pub(crate) dir_entry: DirEntry,
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/tar_x/src/native/dir_unfold.rs | items/tar_x/src/native/dir_unfold.rs | use std::{
collections::VecDeque,
path::{Path, PathBuf},
};
use tokio::fs::ReadDir;
use crate::{native::DestDirEntry, TarXError};
pub(crate) struct DirUnfold;
impl DirUnfold {
/// Provides a function that recursively produces file entries within the
/// original directory.
pub(crate) fn unfold(
base_dir: &Path,
) -> impl futures::TryStream<Ok = DestDirEntry, Error = TarXError> + '_ {
// `ReadDir` doesn't implement `Stream`, this does that mapping.
let dir_context = DirContext {
base_dir,
dir_and_read_dir_opt: None,
dir_to_reads: VecDeque::from([DirToRead {
dir_path: base_dir.to_path_buf(),
dir_path_base_rel: PathBuf::new(),
}]),
};
futures::stream::try_unfold(dir_context, move |dir_context| async move {
let DirContext {
base_dir,
mut dir_and_read_dir_opt,
mut dir_to_reads,
} = dir_context;
loop {
if let Some(dir_and_read_dir) = dir_and_read_dir_opt.take() {
let DirAndReadDir {
dir_path_base_rel,
mut read_dir,
} = dir_and_read_dir;
let dir_entry = read_dir.next_entry().await.map_err(
// We don't cover corrupted tar contents in tests.
#[cfg_attr(coverage_nightly, coverage(off))]
|error| {
let base_dir = base_dir.to_path_buf();
TarXError::TarDestEntryRead {
dest: base_dir,
error,
}
},
)?;
if let Some(dir_entry) = dir_entry {
let entry_path = dir_entry.path();
// Don't include directories as dir entries, but recursively descend
let file_type = dir_entry.file_type().await.map_err(
// We don't cover corrupted tar contents in tests.
#[cfg_attr(coverage_nightly, coverage(off))]
|error| TarXError::TarDestEntryFileTypeRead {
entry_path: entry_path.clone(),
error,
},
)?;
let dest_dir_relative_path = dir_path_base_rel.join(dir_entry.file_name());
// Ignore directories in tracked `FileMetadata`s, because:
//
// * mtime of tar entries is the mtime it was created.
// * mtime of directories on the file system is always the time it is
// unpacked, even if the unpack is told to `preserve_mtime`.
if file_type.is_dir() {
dir_to_reads.push_back(DirToRead {
dir_path: entry_path,
dir_path_base_rel: dest_dir_relative_path,
});
dir_and_read_dir_opt = Some(DirAndReadDir {
dir_path_base_rel,
read_dir,
});
continue;
} else {
break Result::<_, TarXError>::Ok(Some((
DestDirEntry {
dest_dir_relative_path,
dir_entry,
},
DirContext {
base_dir,
dir_and_read_dir_opt: Some(DirAndReadDir {
dir_path_base_rel,
read_dir,
}),
dir_to_reads,
},
)));
}
} else {
dir_and_read_dir_opt = None;
continue;
}
} else if let Some(dir_to_read) = dir_to_reads.pop_front() {
let DirToRead {
dir_path,
dir_path_base_rel,
} = dir_to_read;
// Process next directory
dir_and_read_dir_opt = Some(
tokio::fs::read_dir(&dir_path)
.await
.map_err(
// We don't cover corrupted tar contents in tests.
#[cfg_attr(coverage_nightly, coverage(off))]
|error| TarXError::TarDestReadDir {
dir: dir_path,
error,
},
)
.map(|read_dir| DirAndReadDir {
dir_path_base_rel,
read_dir,
})?,
);
continue;
} else {
// no more directories to process
break Ok(None);
}
}
})
}
}
struct DirContext<'base> {
/// Base directory to recurse through.
base_dir: &'base Path,
/// Current `ReadDir` being iterated through.
dir_and_read_dir_opt: Option<DirAndReadDir>,
/// Remaining directories to process,
dir_to_reads: VecDeque<DirToRead>,
}
/// Tracks a directory's path, and its relative path to the base directory.
///
/// Example values:
///
/// ```yaml
/// base_dir: 'extraction/dir'
/// dir_path: 'extraction/dir/sub/dir'
/// dir_path_base_rel: 'sub/dir'
/// ```
struct DirToRead {
/// Path to the directory to process,
dir_path: PathBuf,
/// Path to the directory to process, relative to the base directory.
dir_path_base_rel: PathBuf,
}
struct DirAndReadDir {
/// Path to the directory to process, relative to the base directory.
dir_path_base_rel: PathBuf,
/// `ReadDir` for the directory's entries
read_dir: ReadDir,
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_apply_fns.rs | items/blank/src/blank_apply_fns.rs | use std::marker::PhantomData;
use peace::cfg::{ApplyCheck, FnCtx};
#[cfg(feature = "output_progress")]
use peace::progress_model::ProgressLimit;
use crate::{BlankData, BlankError, BlankParams, BlankState, BlankStateDiff};
/// ApplyFns for the blank state.
#[derive(Debug)]
pub struct BlankApplyFns<Id>(PhantomData<Id>);
impl<Id> BlankApplyFns<Id>
where
Id: Send + Sync + 'static,
{
pub async fn apply_check(
_params: &BlankParams<Id>,
_data: BlankData<'_, Id>,
_state_current: &BlankState,
_state_goal: &BlankState,
diff: &BlankStateDiff,
) -> Result<ApplyCheck, BlankError> {
let apply_check = match *diff {
BlankStateDiff::InSync { .. } => ApplyCheck::ExecNotRequired,
BlankStateDiff::Added { .. } | BlankStateDiff::OutOfSync { .. } => {
#[cfg(not(feature = "output_progress"))]
{
ApplyCheck::ExecRequired
}
#[cfg(feature = "output_progress")]
{
let progress_limit = ProgressLimit::Steps(1);
ApplyCheck::ExecRequired { progress_limit }
}
}
};
Ok(apply_check)
}
pub async fn apply_dry(
_fn_ctx: FnCtx<'_>,
_params: &BlankParams<Id>,
_data: BlankData<'_, Id>,
_state_current: &BlankState,
state_goal: &BlankState,
_diff: &BlankStateDiff,
) -> Result<BlankState, BlankError> {
Ok(*state_goal)
}
pub async fn apply(
_fn_ctx: FnCtx<'_>,
_params: &BlankParams<Id>,
mut data: BlankData<'_, Id>,
_state_current: &BlankState,
state_goal: &BlankState,
_diff: &BlankStateDiff,
) -> Result<BlankState, BlankError> {
let params = data.params_mut();
params.dest.0 = Some(params.src.0);
Ok(*state_goal)
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_item.rs | items/blank/src/blank_item.rs | use std::marker::PhantomData;
use peace::{
cfg::{async_trait, ApplyCheck, FnCtx, Item},
item_model::ItemId,
params::Params,
resource_rt::{resources::ts::Empty, Resources},
};
use crate::{BlankApplyFns, BlankData, BlankError, BlankParams, BlankState, BlankStateDiff};
/// Item for copying a number.
///
/// The `Id` type parameter is needed for each blank params to be a
/// distinct type.
///
/// # Type Parameters
///
/// * `Id`: A zero-sized type used to distinguish different blank parameters
/// from each other.
#[derive(Debug)]
pub struct BlankItem<Id> {
/// ID of the blank item.
item_id: ItemId,
/// Marker for unique blank parameters type.
marker: PhantomData<Id>,
}
impl<Id> BlankItem<Id> {
/// Returns a new `BlankItem`.
pub fn new(item_id: ItemId) -> Self {
Self {
item_id,
marker: PhantomData,
}
}
}
impl<Id> Clone for BlankItem<Id> {
fn clone(&self) -> Self {
Self {
item_id: self.item_id.clone(),
marker: PhantomData,
}
}
}
#[async_trait(?Send)]
impl<Id> Item for BlankItem<Id>
where
Id: Send + Sync + 'static,
{
type Data<'exec> = BlankData<'exec, Id>;
type Error = BlankError;
type Params<'exec> = BlankParams<Id>;
type State = BlankState;
type StateDiff = BlankStateDiff;
fn id(&self) -> &ItemId {
&self.item_id
}
async fn setup(&self, _resources: &mut Resources<Empty>) -> Result<(), BlankError> {
Ok(())
}
#[cfg(feature = "item_state_example")]
fn state_example(params: &Self::Params<'_>, _data: Self::Data<'_>) -> Self::State {
BlankState(params.dest.0)
}
async fn try_state_current(
_fn_ctx: FnCtx<'_>,
params_partial: &<Self::Params<'_> as Params>::Partial,
_data: BlankData<'_, Id>,
) -> Result<Option<Self::State>, BlankError> {
Ok(params_partial.dest.clone().map(|dest| BlankState(dest.0)))
}
async fn state_current(
_fn_ctx: FnCtx<'_>,
params: &Self::Params<'_>,
_data: BlankData<'_, Id>,
) -> Result<Self::State, BlankError> {
Ok(BlankState(params.dest.0))
}
async fn try_state_goal(
_fn_ctx: FnCtx<'_>,
params_partial: &<Self::Params<'_> as Params>::Partial,
_data: BlankData<'_, Id>,
) -> Result<Option<Self::State>, BlankError> {
Ok(params_partial
.src
.clone()
.map(|src| BlankState(Some(src.0))))
}
async fn state_goal(
_fn_ctx: FnCtx<'_>,
params: &Self::Params<'_>,
_data: BlankData<'_, Id>,
) -> Result<Self::State, BlankError> {
Ok(BlankState(Some(params.src.0)))
}
async fn state_diff(
_params_partial: &<Self::Params<'_> as Params>::Partial,
_data: Self::Data<'_>,
state_current: &BlankState,
state_goal: &BlankState,
) -> Result<Self::StateDiff, BlankError> {
let diff = match (state_current, state_goal) {
(BlankState(Some(current)), BlankState(Some(goal))) if current == goal => {
BlankStateDiff::InSync { value: *current }
}
(BlankState(Some(current)), BlankState(Some(goal))) => BlankStateDiff::OutOfSync {
diff: i64::from(goal - current),
},
(BlankState(None), BlankState(Some(goal))) => BlankStateDiff::Added { value: *goal },
(BlankState(_), BlankState(None)) => unreachable!("goal state is always Some"),
};
Ok(diff)
}
async fn state_clean(
_params_partial: &<Self::Params<'_> as Params>::Partial,
_data: Self::Data<'_>,
) -> Result<BlankState, BlankError> {
Ok(BlankState(None))
}
async fn apply_check(
params: &Self::Params<'_>,
data: Self::Data<'_>,
state_current: &Self::State,
state_target: &Self::State,
diff: &Self::StateDiff,
) -> Result<ApplyCheck, Self::Error> {
BlankApplyFns::<Id>::apply_check(params, data, state_current, state_target, diff).await
}
async fn apply_dry(
fn_ctx: FnCtx<'_>,
params: &Self::Params<'_>,
data: Self::Data<'_>,
state_current: &Self::State,
state_target: &Self::State,
diff: &Self::StateDiff,
) -> Result<Self::State, Self::Error> {
BlankApplyFns::<Id>::apply_dry(fn_ctx, params, data, state_current, state_target, diff)
.await
}
async fn apply(
fn_ctx: FnCtx<'_>,
params: &Self::Params<'_>,
data: Self::Data<'_>,
state_current: &Self::State,
state_target: &Self::State,
diff: &Self::StateDiff,
) -> Result<Self::State, Self::Error> {
BlankApplyFns::<Id>::apply(fn_ctx, params, data, state_current, state_target, diff).await
}
#[cfg(feature = "item_interactions")]
fn interactions(
_params: &Self::Params<'_>,
_data: Self::Data<'_>,
) -> Vec<peace::item_interaction_model::ItemInteraction> {
use peace::item_interaction_model::{ItemInteractionWithin, ItemLocation};
let item_interaction =
ItemInteractionWithin::new(vec![ItemLocation::localhost()].into()).into();
vec![item_interaction]
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/lib.rs | items/blank/src/lib.rs | //! Copies a number from one resource to another.
pub use crate::{
blank_apply_fns::BlankApplyFns,
blank_data::BlankData,
blank_dest::BlankDest,
blank_error::BlankError,
blank_item::BlankItem,
blank_params::{BlankParams, BlankParamsFieldWise, BlankParamsPartial},
blank_src::BlankSrc,
blank_state::BlankState,
blank_state_diff::BlankStateDiff,
};
mod blank_apply_fns;
mod blank_data;
mod blank_dest;
mod blank_error;
mod blank_item;
mod blank_params;
mod blank_src;
mod blank_state;
mod blank_state_diff;
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_state_diff.rs | items/blank/src/blank_state_diff.rs | use std::fmt;
use serde::{Deserialize, Serialize};
/// Diff between current (dest) and goal (src) state.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq)]
pub enum BlankStateDiff {
/// Value was added.
Added {
/// The new value.
value: u32,
},
/// Value
OutOfSync {
/// Difference between the current and goal values.
diff: i64,
},
InSync {
/// The current value.
value: u32,
},
}
impl fmt::Display for BlankStateDiff {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BlankStateDiff::Added { value } => write!(f, "`{value}` newly added."),
BlankStateDiff::OutOfSync { diff } => {
write!(f, "Current value differs to goal value by: `{diff}`.")
}
BlankStateDiff::InSync { value } => write!(f, "Value already in sync: `{value}`."),
}
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_dest.rs | items/blank/src/blank_dest.rs | use std::fmt;
use peace::params::Params;
use serde::{Deserialize, Serialize};
/// Destination for blank state.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Params)]
pub struct BlankDest(pub Option<u32>);
impl fmt::Display for BlankDest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
Some(n) => n.fmt(f),
None => "<none>".fmt(f),
}
}
}
impl std::ops::Deref for BlankDest {
type Target = Option<u32>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for BlankDest {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_state.rs | items/blank/src/blank_state.rs | use std::fmt;
use serde::{Deserialize, Serialize};
#[cfg(feature = "output_progress")]
use peace::item_interaction_model::ItemLocationState;
/// Logical blank state.
#[derive(Clone, Copy, Debug, Deserialize, Serialize, PartialEq, Eq)]
pub struct BlankState(pub Option<u32>);
impl fmt::Display for BlankState {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.0 {
Some(n) => n.fmt(f),
None => "<none>".fmt(f),
}
}
}
impl std::ops::Deref for BlankState {
type Target = Option<u32>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for BlankState {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[cfg(feature = "output_progress")]
impl<'state> From<&'state BlankState> for ItemLocationState {
fn from(blank_state: &'state BlankState) -> ItemLocationState {
match blank_state.is_some() {
true => ItemLocationState::Exists,
false => ItemLocationState::NotExists,
}
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_data.rs | items/blank/src/blank_data.rs | use peace::data::{accessors::W, Data};
use crate::BlankParams;
/// Data used to manage blank state.
///
/// # Type Parameters
///
/// * `Id`: A zero-sized type used to distinguish different blank parameters
/// from each other.
#[derive(Data, Debug)]
pub struct BlankData<'exec, Id>
where
Id: Send + Sync + 'static,
{
/// Blank state parameters.
params: W<'exec, BlankParams<Id>>,
}
impl<'exec, Id> BlankData<'exec, Id>
where
Id: Send + Sync + 'static,
{
pub fn new(params: W<'exec, BlankParams<Id>>) -> Self {
Self { params }
}
pub fn params(&self) -> &BlankParams<Id> {
&self.params
}
pub fn params_mut(&mut self) -> &mut BlankParams<Id> {
&mut self.params
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_error.rs | items/blank/src/blank_error.rs | #[cfg(feature = "error_reporting")]
use peace::miette;
/// Error while managing blank state.
#[cfg_attr(feature = "error_reporting", derive(peace::miette::Diagnostic))]
#[derive(Debug, thiserror::Error)]
pub enum BlankError {
/// A `peace` runtime error occurred.
#[error("A `peace` runtime error occurred.")]
PeaceRtError(
#[cfg_attr(feature = "error_reporting", diagnostic_source)]
#[source]
#[from]
peace::rt_model::Error,
),
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_params.rs | items/blank/src/blank_params.rs | use std::marker::PhantomData;
use derivative::Derivative;
use peace::params::Params;
use serde::{Deserialize, Serialize};
use crate::{BlankDest, BlankSrc};
/// Blank item parameters.
///
/// The `Id` type parameter is needed for each blank params to be a distinct
/// type.
///
/// # Type Parameters
///
/// * `Id`: A zero-sized type used to distinguish different blank parameters
/// from each other.
#[derive(Derivative, Params, PartialEq, Eq, Deserialize, Serialize)]
#[derivative(Clone, Debug)]
#[serde(bound = "")]
pub struct BlankParams<Id> {
/// Source / goal value for the state.
pub src: BlankSrc,
/// Destination / current value of the state.
pub dest: BlankDest,
/// Marker for unique blank parameters type.
marker: PhantomData<Id>,
}
impl<Id> BlankParams<Id> {
/// Returns new `BlankParams`.
pub fn new(src: BlankSrc, dest: BlankDest) -> Self {
Self {
src,
dest,
marker: PhantomData,
}
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/items/blank/src/blank_src.rs | items/blank/src/blank_src.rs | use peace::params::Params;
use serde::{Deserialize, Serialize};
/// Source for blank state.
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Params)]
pub struct BlankSrc(pub u32);
impl std::ops::Deref for BlankSrc {
type Target = u32;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for BlankSrc {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/doc/src/learning_material/why_rust/constraints/data_race_1.rs | doc/src/learning_material/why_rust/constraints/data_race_1.rs | use std::thread;
#[derive(Debug)]
struct Data {
value: u32,
}
fn main() {
let mut data = Data { value: 0 };
let data = &mut data;
let work_0 = || (0..50000).for_each(|_| data.value += 1);
let work_1 = || (0..50000).for_each(|_| data.value += 1);
let thread_0 = thread::spawn(work_0);
let thread_1 = thread::spawn(work_1);
thread_0.join().unwrap();
thread_1.join().unwrap();
println!("value: {}", data.value);
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
azriel91/peace | https://github.com/azriel91/peace/blob/5e2c43f2c0b18672749d0902d2285c703e24de97/doc/src/learning_material/why_rust/constraints/data_race_2.rs | doc/src/learning_material/why_rust/constraints/data_race_2.rs | use std::{
sync::{Arc, Mutex},
thread,
};
#[derive(Debug)]
struct Data {
value: u32,
}
fn main() -> thread::Result<()> {
let data = Data { value: 0 };
let arc_mutex = Arc::new(Mutex::new(data));
let arc_mutex_0 = arc_mutex.clone();
let arc_mutex_1 = arc_mutex.clone();
let work_0 = move || {
(0..50000).for_each(|_| {
if let Ok(mut data) = arc_mutex_0.lock() {
data.value += 1;
}
});
};
let work_1 = move || {
(0..50000).for_each(|_| {
if let Ok(mut data) = arc_mutex_1.lock() {
data.value += 1;
}
})
};
let thread_0 = thread::spawn(work_0);
let thread_1 = thread::spawn(work_1);
thread_0.join()?;
thread_1.join()?;
if let Ok(Ok(data)) = Arc::try_unwrap(arc_mutex).map(Mutex::into_inner) {
println!("value: {}", data.value);
}
Ok(())
}
| rust | Apache-2.0 | 5e2c43f2c0b18672749d0902d2285c703e24de97 | 2026-01-04T20:22:52.922300Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/starknet_stub.rs | src/starknet_stub.rs | //! An usable implementation of the starknet syscall handler trait.
//!
//! Based of cairo-lang-runner syscall handler implementation.
//! - https://github.com/starkware-libs/cairo/blob/v2.14.0/crates/cairo-lang-runner/src/casm_run/mod.rs
use std::{
collections::{HashMap, VecDeque},
fmt,
sync::Arc,
};
use crate::{
error::Error,
execution_result::BuiltinStats,
executor::AotNativeExecutor,
starknet::{
ExecutionInfo, ExecutionInfoV2, Secp256k1Point, Secp256r1Point, StarknetSyscallHandler,
SyscallResult, TxV2Info, U256,
},
Value,
};
use ark_ec::short_weierstrass::{Affine, Projective, SWCurveConfig};
use ark_ff::{BigInt, PrimeField};
use cairo_lang_runner::RunResultValue;
use cairo_lang_sierra::ids::FunctionId;
use cairo_lang_starknet::contract::ContractInfo;
use cairo_lang_starknet_classes::casm_contract_class::ENTRY_POINT_COST;
use cairo_lang_utils::ordered_hash_map::OrderedHashMap;
use itertools::Itertools;
use num_bigint::BigUint;
use num_traits::Zero;
use sha2::digest::generic_array::GenericArray;
use starknet_types_core::{
felt::{Felt, NonZeroFelt},
hash::{Pedersen, StarkHash},
};
use tracing::instrument;
/// An usable implementation of the starknet syscall handler trait.
#[derive(Clone, Default, Debug)]
pub struct StubSyscallHandler {
/// The Cairo Native executor
pub executor: Option<Arc<AotNativeExecutor>>,
/// The values of addresses in the simulated storage per contract.
pub storage: HashMap<Felt, HashMap<Felt, Felt>>,
/// A mapping from contract address to class hash.
pub deployed_contracts: HashMap<Felt, Felt>,
/// A mapping from contract address to logs.
pub logs: HashMap<Felt, ContractLogs>,
/// The simulated execution info.
pub execution_info: ExecutionInfo,
/// A mock history, mapping block number to the class hash.
pub block_hash: HashMap<u64, Felt>,
/// Mapping from class_hash to contract info.
pub contracts_info: OrderedHashMap<Felt, ContractInfo>,
/// Keep track of inner call builtin usage.
pub builtin_counters: BuiltinStats,
}
/// Event emitted by the emit_event syscall.
#[derive(Debug, Clone)]
pub struct StubEvent {
pub keys: Vec<Felt>,
pub data: Vec<Felt>,
}
#[derive(Debug, Default, Clone)]
pub struct ContractLogs {
pub events: VecDeque<StubEvent>,
pub l2_to_l1_messages: VecDeque<L2ToL1Message>,
}
type L2ToL1Message = (Felt, Vec<Felt>);
#[derive(PartialEq, Clone, Copy)]
struct Secp256Point<Curve: SWCurveConfig>(Affine<Curve>);
impl<Curve: SWCurveConfig> fmt::Debug for Secp256Point<Curve> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Secp256Point").field(&self.0).finish()
}
}
impl From<Secp256Point<ark_secp256k1::Config>> for Secp256k1Point {
fn from(Secp256Point(Affine { x, y, infinity }): Secp256Point<ark_secp256k1::Config>) -> Self {
Secp256k1Point {
x: big4int_to_u256(x.into()),
y: big4int_to_u256(y.into()),
is_infinity: infinity,
}
}
}
impl From<Secp256Point<ark_secp256r1::Config>> for Secp256r1Point {
fn from(Secp256Point(Affine { x, y, infinity }): Secp256Point<ark_secp256r1::Config>) -> Self {
Secp256r1Point {
x: big4int_to_u256(x.into()),
y: big4int_to_u256(y.into()),
is_infinity: infinity,
}
}
}
impl From<Secp256k1Point> for Secp256Point<ark_secp256k1::Config> {
fn from(p: Secp256k1Point) -> Self {
Secp256Point(Affine {
x: u256_to_biguint(p.x).into(),
y: u256_to_biguint(p.y).into(),
infinity: p.is_infinity,
})
}
}
impl From<Secp256r1Point> for Secp256Point<ark_secp256r1::Config> {
fn from(p: Secp256r1Point) -> Self {
Secp256Point(Affine {
x: u256_to_biguint(p.x).into(),
y: u256_to_biguint(p.y).into(),
infinity: p.is_infinity,
})
}
}
pub fn u256_to_biguint(u256: U256) -> BigUint {
let lo = BigUint::from(u256.lo);
let hi = BigUint::from(u256.hi);
(hi << 128) + lo
}
pub fn big4int_to_u256(b_int: BigInt<4>) -> U256 {
let [a, b, c, d] = b_int.0;
let lo = u128::from(a) | (u128::from(b) << 64);
let hi = u128::from(c) | (u128::from(d) << 64);
U256 { lo, hi }
}
pub fn encode_str_as_felts(msg: &str) -> Vec<Felt> {
const CHUNK_SIZE: usize = 32;
let data = msg.as_bytes().chunks(CHUNK_SIZE - 1);
let mut encoding = vec![Felt::default(); data.len()];
for (i, data_chunk) in data.enumerate() {
let mut chunk = [0_u8; CHUNK_SIZE];
chunk[1..data_chunk.len() + 1].copy_from_slice(data_chunk);
encoding[i] = Felt::from_bytes_be(&chunk);
}
encoding
}
pub fn decode_felts_as_str(encoding: &[Felt]) -> String {
let bytes_err: Vec<_> = encoding
.iter()
.flat_map(|felt| felt.to_bytes_be()[1..32].to_vec())
.collect();
match String::from_utf8(bytes_err) {
Ok(s) => s.trim_matches('\0').to_owned(),
Err(_) => {
let err_msgs = encoding
.iter()
.map(
|felt| match String::from_utf8(felt.to_bytes_be()[1..32].to_vec()) {
Ok(s) => format!("{} ({})", s.trim_matches('\0'), felt),
Err(_) => felt.to_string(),
},
)
.join(", ");
format!("[{}]", err_msgs)
}
}
}
impl<Curve: SWCurveConfig> Secp256Point<Curve>
where
Curve::BaseField: PrimeField, // constraint for get_point_by_id
{
// Given a (x,y) pair it will
// - return the point at infinity for (0,0)
// - Err if either x or y is outside of the modulus
// - Ok(None) if (x,y) are within the modules but not on the curve
// - Ok(Some(Point)) if (x,y) are on the curve
fn new(x: U256, y: U256) -> Result<Option<Self>, Vec<Felt>> {
let x = u256_to_biguint(x);
let y = u256_to_biguint(y);
let modulos = Curve::BaseField::MODULUS.into();
if x >= modulos || y >= modulos {
let error = Felt::from_hex(
"0x00000000000000000000000000000000496e76616c696420617267756d656e74",
) // INVALID_ARGUMENT
.map_err(|err| encode_str_as_felts(&err.to_string()))?;
return Err(vec![error]);
}
Ok(maybe_affine(x.into(), y.into()))
}
fn add(p0: Self, p1: Self) -> Self {
let result: Projective<Curve> = p0.0 + p1.0;
Secp256Point(result.into())
}
fn mul(p: Self, m: U256) -> Self {
let result = p.0 * Curve::ScalarField::from(u256_to_biguint(m));
Secp256Point(result.into())
}
fn get_point_from_x(x: U256, y_parity: bool) -> Result<Option<Self>, Vec<Felt>> {
let modulos = Curve::BaseField::MODULUS.into();
let x = u256_to_biguint(x);
if x >= modulos {
let error = Felt::from_hex(
"0x00000000000000000000000000000000496e76616c696420617267756d656e74",
) // INVALID_ARGUMENT
.map_err(|err| encode_str_as_felts(&err.to_string()))?;
return Err(vec![error]);
}
let x = x.into();
let maybe_ec_point = Affine::<Curve>::get_ys_from_x_unchecked(x)
.map(|(smaller, greater)| {
// Return the correct y coordinate based on the parity.
if ark_ff::BigInteger::is_odd(&smaller.into_bigint()) == y_parity {
smaller
} else {
greater
}
})
.map(|y| Affine::<Curve>::new_unchecked(x, y))
.filter(|p| p.is_in_correct_subgroup_assuming_on_curve());
Ok(maybe_ec_point.map(Secp256Point))
}
}
/// Variation on [`Affine<Curve>::new`] that doesn't panic and maps (x,y) = (0,0) -> infinity
fn maybe_affine<Curve: SWCurveConfig>(
x: Curve::BaseField,
y: Curve::BaseField,
) -> Option<Secp256Point<Curve>> {
let ec_point = if x.is_zero() && y.is_zero() {
Affine::<Curve>::identity()
} else {
Affine::<Curve>::new_unchecked(x, y)
};
if ec_point.is_on_curve() && ec_point.is_in_correct_subgroup_assuming_on_curve() {
Some(Secp256Point(ec_point))
} else {
None
}
}
impl StubSyscallHandler {
#[instrument(skip(self))]
fn call_entry_point(
&mut self,
gas_counter: &mut u64,
entry_point: &FunctionId,
calldata: &[Felt],
) -> Result<Vec<Felt>, Vec<Felt>> {
// The cost of the called syscall include `ENTRY_POINT_COST` so we need
// to refund it here to avoid double charging.
let inner_gas_counter = Some(*gas_counter + ENTRY_POINT_COST as u64);
let inner_args = &[Value::Struct {
fields: vec![Value::Array(
calldata.iter().map(|x| Value::from(*x)).collect_vec(),
)],
debug_name: None,
}];
let concrete_result = self
.executor
.clone()
.expect("calling contracts requires executor")
.invoke_dynamic_with_syscall_handler(
entry_point,
inner_args,
inner_gas_counter,
&mut *self,
)
.expect("failed to execute inner contract");
self.builtin_counters += concrete_result.builtin_stats;
if let Some(remaining_gas) = concrete_result.remaining_gas {
*gas_counter = remaining_gas;
}
let starknet_result = read_contract_result(&concrete_result.return_value)
.expect("return value was not a starknet panic result");
match starknet_result {
RunResultValue::Success(felts) => Ok(felts),
RunResultValue::Panic(felts) => Err(felts),
}
}
/// Replaces the addresses in the context.
///
/// Called before `call_entry_point`.
pub fn open_caller_context(
&mut self,
(new_contract_address, new_caller_address): (Felt, Felt),
) -> (Felt, Felt) {
let old_contract_address = std::mem::replace(
&mut self.execution_info.contract_address,
new_contract_address,
);
let old_caller_address =
std::mem::replace(&mut self.execution_info.caller_address, new_caller_address);
(old_contract_address, old_caller_address)
}
/// Restores the addresses in the context.
///
/// Called after `call_entry_point`.
pub fn close_caller_context(
&mut self,
(old_contract_address, old_caller_address): (Felt, Felt),
) {
self.execution_info.contract_address = old_contract_address;
self.execution_info.caller_address = old_caller_address;
}
}
/// Creates a `RunResultValue` from a contract entrypoint result.
///
/// The value should be of type `PanicResult<(Span<Felt>,)>`.
fn read_contract_result(value: &Value) -> Result<RunResultValue, Error> {
let unexpected_value_error = Err(Error::UnexpectedValue(String::from(
"PanicResult<(Span<Felt>,)>",
)));
// The value should be of type: Enum<Struct<Struct<Span<Felt>>>, Struct<Panic,Array<Felt>>>
let Value::Enum { tag, value, .. } = value else {
return unexpected_value_error;
};
match tag {
0 => {
// The value should be of type: Struct<Struct<Span<Felt>>>
let Value::Struct { fields: values, .. } = value.as_ref() else {
return unexpected_value_error;
};
let value = if values.len() != 1 {
return unexpected_value_error;
} else {
&values[0]
};
// The value should be of type: Struct<Span<Felt>>
let Value::Struct { fields: values, .. } = value else {
return unexpected_value_error;
};
let value = if values.len() != 1 {
return unexpected_value_error;
} else {
&values[0]
};
// The value should be of type: Span<Felt>
let Value::Array(values) = value else {
return unexpected_value_error;
};
// The values should be of type: Felt
let Some(values) = values
.iter()
.map(|value| {
if let Value::Felt252(value) = value {
Some(*value)
} else {
None
}
})
.collect::<Option<Vec<Felt>>>()
else {
return unexpected_value_error;
};
Ok(RunResultValue::Success(values))
}
1 => {
// The value should be of type: Struct<Panic,Array<Felt>>
let Value::Struct { fields: values, .. } = value.as_ref() else {
return unexpected_value_error;
};
let value = if values.len() != 2 {
return unexpected_value_error;
} else {
&values[1]
};
// The value should be of type: Array<Felt>
let Value::Array(values) = value else {
return unexpected_value_error;
};
// The values should be of type: Felt
let Some(values) = values
.iter()
.map(|value| {
if let Value::Felt252(value) = value {
Some(*value)
} else {
None
}
})
.collect::<Option<Vec<Felt>>>()
else {
return unexpected_value_error;
};
Ok(RunResultValue::Panic(values))
}
_ => unexpected_value_error,
}
}
impl StarknetSyscallHandler for &mut StubSyscallHandler {
#[instrument(skip(self))]
fn get_block_hash(
&mut self,
block_number: u64,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<Felt> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::GET_BLOCK_HASH)?;
if let Some(block_hash) = self.block_hash.get(&block_number) {
Ok(*block_hash)
} else {
Err(vec![Felt::from_bytes_be_slice(b"GET_BLOCK_HASH_NOT_SET")])
}
}
#[instrument(skip(self))]
fn get_execution_info(
&mut self,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<crate::starknet::ExecutionInfo> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::GET_EXECUTION_INFO)?;
Ok(self.execution_info.clone())
}
#[instrument(skip(self))]
fn get_execution_info_v2(
&mut self,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<crate::starknet::ExecutionInfoV2> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::GET_EXECUTION_INFO)?;
Ok(ExecutionInfoV2 {
block_info: self.execution_info.block_info,
tx_info: TxV2Info {
version: self.execution_info.tx_info.version,
account_contract_address: self.execution_info.tx_info.account_contract_address,
max_fee: self.execution_info.tx_info.max_fee,
signature: self.execution_info.tx_info.signature.clone(),
transaction_hash: self.execution_info.tx_info.transaction_hash,
chain_id: self.execution_info.tx_info.chain_id,
nonce: self.execution_info.tx_info.nonce,
..TxV2Info::default()
},
caller_address: self.execution_info.caller_address,
contract_address: self.execution_info.contract_address,
entry_point_selector: self.execution_info.entry_point_selector,
})
}
#[instrument(skip(self))]
fn get_execution_info_v3(
&mut self,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<crate::starknet::ExecutionInfoV3> {
todo!();
}
#[instrument(skip(self))]
fn deploy(
&mut self,
class_hash: Felt,
contract_address_salt: Felt,
calldata: &[Felt],
deploy_from_zero: bool,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<(Felt, Vec<Felt>)> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::DEPLOY)?;
/// Max value for a contract address: 2**251 - 256.
const CONTRACT_ADDRESS_BOUND: NonZeroFelt =
NonZeroFelt::from_felt_unchecked(Felt::from_hex_unchecked(
"0x7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00",
));
/// Cairo string for "STARKNET_CONTRACT_ADDRESS"
const CONTRACT_ADDRESS_PREFIX: Felt =
Felt::from_hex_unchecked("0x535441524b4e45545f434f4e54524143545f41444452455353");
let deployer_address = if deploy_from_zero {
Felt::zero()
} else {
self.execution_info.contract_address
};
let deployed_contract_address = {
let constructor_calldata_hash = Pedersen::hash_array(calldata);
Pedersen::hash_array(&[
CONTRACT_ADDRESS_PREFIX,
deployer_address,
contract_address_salt,
class_hash,
constructor_calldata_hash,
])
.mod_floor(&CONTRACT_ADDRESS_BOUND)
};
let Some(contract_info) = self.contracts_info.get(&class_hash) else {
return Err(vec![Felt::from_bytes_be_slice(b"CLASS_HASH_NOT_FOUND")]);
};
if self
.deployed_contracts
.insert(deployed_contract_address, class_hash)
.is_some()
{
return Err(vec![Felt::from_bytes_be_slice(
b"CONTRACT_ALREADY_DEPLOYED",
)]);
}
if let Some(constructor) = contract_info.constructor.clone() {
let old_addrs = self.open_caller_context((deployed_contract_address, deployer_address));
let res = self.call_entry_point(remaining_gas, &constructor, calldata);
self.close_caller_context(old_addrs);
match res {
Ok(res) => Ok((deployed_contract_address, res)),
Err(mut res) => {
res.push(Felt::from_bytes_be_slice(b"CONSTRUCTOR_FAILED"));
Err(res)
}
}
} else if calldata.is_empty() {
Ok((deployed_contract_address, vec![]))
} else {
// Remove the contract from the deployed contracts,
// since it failed to deploy.
self.deployed_contracts.remove(&deployed_contract_address);
Err(vec![Felt::from_bytes_be_slice(b"INVALID_CALLDATA_LEN")])
}
}
#[instrument(skip(self))]
fn replace_class(
&mut self,
class_hash: Felt,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<()> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::REPLACE_CLASS)?;
if !self.contracts_info.contains_key(&class_hash) {
return Err(vec![Felt::from_bytes_be_slice(b"CLASS_HASH_NOT_FOUND")]);
};
self.deployed_contracts
.insert(self.execution_info.contract_address, class_hash);
Ok(())
}
#[instrument(skip(self))]
fn library_call(
&mut self,
class_hash: Felt,
function_selector: Felt,
calldata: &[Felt],
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<Vec<Felt>> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::LIBRARY_CALL)?;
let Some(contract_info) = self.contracts_info.get(&class_hash).cloned() else {
return Err(vec![Felt::from_bytes_be_slice(b"CLASS_HASH_NOT_DECLARED")]);
};
let Some(entry_point) = contract_info.externals.get(&function_selector) else {
return Err(vec![
Felt::from_bytes_be_slice(b"ENTRYPOINT_NOT_FOUND"),
Felt::from_bytes_be_slice(b"ENTRYPOINT_FAILED"),
]);
};
match self.call_entry_point(remaining_gas, entry_point, calldata) {
Ok(res) => Ok(res),
Err(mut err) => {
err.push(Felt::from_bytes_be_slice(b"ENTRYPOINT_FAILED"));
Err(err)
}
}
}
#[instrument(skip(self))]
fn call_contract(
&mut self,
address: Felt,
entry_point_selector: Felt,
calldata: &[Felt],
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<Vec<Felt>> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::CALL_CONTRACT)?;
let Some(class_hash) = self.deployed_contracts.get(&address) else {
return Err(vec![
Felt::from_bytes_be_slice(b"CONTRACT_NOT_DEPLOYED"),
Felt::from_bytes_be_slice(b"ENTRYPOINT_FAILED"),
]);
};
let contract_info = self
.contracts_info
.get(class_hash)
.expect("Deployed contract not found in registry.")
.clone();
let Some(entry_point) = contract_info.externals.get(&entry_point_selector) else {
return Err(vec![
Felt::from_bytes_be_slice(b"ENTRYPOINT_NOT_FOUND"),
Felt::from_bytes_be_slice(b"ENTRYPOINT_FAILED"),
]);
};
let old_addrs = self.open_caller_context((address, self.execution_info.contract_address));
let res = self.call_entry_point(remaining_gas, entry_point, calldata);
self.close_caller_context(old_addrs);
match res {
Ok(res) => Ok(res),
Err(mut res) => {
res.push(Felt::from_bytes_be_slice(b"ENTRYPOINT_FAILED"));
Err(res)
}
}
}
fn storage_read(
&mut self,
address_domain: u32,
address: Felt,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<Felt> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::STORAGE_READ)?;
if !address_domain.is_zero() {
// Only address_domain 0 is currently supported.
return Err(vec![Felt::from_bytes_be_slice(
b"Unsupported address domain",
)]);
}
let value = self
.storage
.get(&self.execution_info.contract_address)
.and_then(|contract_storage| contract_storage.get(&address))
.cloned()
.unwrap_or_else(|| Felt::from(0));
Ok(value)
}
#[instrument(skip(self))]
fn storage_write(
&mut self,
address_domain: u32,
address: Felt,
value: Felt,
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<()> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::STORAGE_WRITE)?;
if !address_domain.is_zero() {
// Only address_domain 0 is currently supported.
return Err(vec![Felt::from_bytes_be_slice(
b"Unsupported address domain",
)]);
}
self.storage
.entry(self.execution_info.contract_address)
.or_default()
.insert(address, value);
Ok(())
}
#[instrument(skip(self))]
fn emit_event(
&mut self,
keys: &[Felt],
data: &[Felt],
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<()> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::EMIT_EVENT)?;
let contract = self.execution_info.contract_address;
self.logs
.entry(contract)
.or_default()
.events
.push_back(StubEvent {
keys: keys.to_vec(),
data: data.to_vec(),
});
Ok(())
}
#[instrument(skip(self))]
fn send_message_to_l1(
&mut self,
to_address: Felt,
payload: &[Felt],
remaining_gas: &mut u64,
) -> crate::starknet::SyscallResult<()> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SEND_MESSAGE_TO_L1)?;
let contract = self.execution_info.contract_address;
self.logs
.entry(contract)
.or_default()
.l2_to_l1_messages
.push_back((to_address, payload.to_vec()));
Ok(())
}
#[instrument(skip(self))]
fn keccak(&mut self, input: &[u64], gas: &mut u64) -> SyscallResult<U256> {
tracing::debug!("called");
deduct_gas(gas, gas_costs::KECCAK)?;
const KECCAK_FULL_RATE_IN_WORDS: usize = 17;
let length = input.len();
let (_n_rounds, remainder) = num_integer::div_rem(length, KECCAK_FULL_RATE_IN_WORDS);
if remainder != 0 {
// In VM this error is wrapped into `SyscallExecutionError::SyscallError`
return Err(vec![Felt::from_hex(
"0x000000000000000000000000496e76616c696420696e707574206c656e677468",
)
.unwrap()]);
}
let mut state = [0u64; 25];
for chunk in input.chunks(KECCAK_FULL_RATE_IN_WORDS) {
deduct_gas(gas, gas_costs::KECCAK_ROUND_COST)?;
for (i, val) in chunk.iter().enumerate() {
state[i] ^= val;
}
keccak::f1600(&mut state)
}
Ok(U256 {
hi: u128::from(state[2]) | (u128::from(state[3]) << 64),
lo: u128::from(state[0]) | (u128::from(state[1]) << 64),
})
}
#[instrument(skip(self))]
fn secp256k1_new(
&mut self,
x: U256,
y: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256k1Point>> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256K1_NEW)?;
Secp256Point::new(x, y).map(|op| op.map(|p| p.into()))
}
#[instrument(skip(self))]
fn secp256k1_add(
&mut self,
p0: Secp256k1Point,
p1: Secp256k1Point,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256k1Point> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256K1_ADD)?;
Ok(Secp256Point::add(p0.into(), p1.into()).into())
}
#[instrument(skip(self))]
fn secp256k1_mul(
&mut self,
p: Secp256k1Point,
m: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256k1Point> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256K1_MUL)?;
Ok(Secp256Point::mul(p.into(), m).into())
}
#[instrument(skip(self))]
fn secp256k1_get_point_from_x(
&mut self,
x: U256,
y_parity: bool,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256k1Point>> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256K1_GET_POINT_FROM_X)?;
Secp256Point::get_point_from_x(x, y_parity).map(|op| op.map(|p| p.into()))
}
#[instrument(skip(self))]
fn secp256k1_get_xy(
&mut self,
p: Secp256k1Point,
remaining_gas: &mut u64,
) -> SyscallResult<(U256, U256)> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256K1_GET_XY)?;
Ok((p.x, p.y))
}
#[instrument(skip(self))]
fn secp256r1_new(
&mut self,
x: U256,
y: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256r1Point>> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256R1_NEW)?;
Secp256Point::new(x, y).map(|op| op.map(|p| p.into()))
}
#[instrument(skip(self))]
fn secp256r1_add(
&mut self,
p0: Secp256r1Point,
p1: Secp256r1Point,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256r1Point> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256R1_ADD)?;
Ok(Secp256Point::add(p0.into(), p1.into()).into())
}
#[instrument(skip(self))]
fn secp256r1_mul(
&mut self,
p: Secp256r1Point,
m: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256r1Point> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256R1_MUL)?;
Ok(Secp256Point::mul(p.into(), m).into())
}
#[instrument(skip(self))]
fn secp256r1_get_point_from_x(
&mut self,
x: U256,
y_parity: bool,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256r1Point>> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256R1_GET_POINT_FROM_X)?;
Secp256Point::get_point_from_x(x, y_parity).map(|op| op.map(|p| p.into()))
}
#[instrument(skip(self))]
fn secp256r1_get_xy(
&mut self,
p: Secp256r1Point,
remaining_gas: &mut u64,
) -> SyscallResult<(U256, U256)> {
tracing::debug!("called");
deduct_gas(remaining_gas, gas_costs::SECP256R1_GET_XY)?;
Ok((p.x, p.y))
}
#[instrument(skip(self))]
fn meta_tx_v0(
&mut self,
address: Felt,
entry_point_selector: Felt,
calldata: &[Felt],
signature: &[Felt],
remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>> {
todo!("Implement meta_tx_v0 syscall");
}
#[cfg(feature = "with-cheatcode")]
#[instrument(skip(self))]
fn cheatcode(&mut self, selector: Felt, input: &[Felt]) -> Vec<Felt> {
tracing::debug!("called");
let selector_bytes = selector.to_bytes_be();
let selector = match std::str::from_utf8(&selector_bytes) {
Ok(selector) => selector.trim_start_matches('\0'),
Err(_) => return Vec::new(),
};
match selector {
"set_sequencer_address" => {
self.execution_info.block_info.sequencer_address = input[0];
vec![]
}
"set_caller_address" => {
self.execution_info.caller_address = input[0];
vec![]
}
"set_contract_address" => {
self.execution_info.contract_address = input[0];
vec![]
}
"set_account_contract_address" => {
self.execution_info.tx_info.account_contract_address = input[0];
vec![]
}
"set_transaction_hash" => {
self.execution_info.tx_info.transaction_hash = input[0];
vec![]
}
"set_nonce" => {
self.execution_info.tx_info.nonce = input[0];
vec![]
}
"set_version" => {
self.execution_info.tx_info.version = input[0];
vec![]
}
"set_chain_id" => {
self.execution_info.tx_info.chain_id = input[0];
vec![]
}
"set_max_fee" => {
let max_fee = input[0].to_biguint().try_into().unwrap();
self.execution_info.tx_info.max_fee = max_fee;
vec![]
}
"set_block_number" => {
let block_number = input[0].to_biguint().try_into().unwrap();
self.execution_info.block_info.block_number = block_number;
vec![]
}
"set_block_timestamp" => {
let block_timestamp = input[0].to_biguint().try_into().unwrap();
self.execution_info.block_info.block_timestamp = block_timestamp;
vec![]
}
"set_block_hash" => {
let block_number = input[0].to_biguint().try_into().unwrap();
let block_hash = input[1];
self.block_hash.insert(block_number, block_hash);
vec![]
}
"set_signature" => {
self.execution_info.tx_info.signature = input.to_vec();
vec![]
}
"pop_log" => self
.logs
.get_mut(&input[0])
.and_then(|logs| logs.events.pop_front())
.map(|mut log| {
let mut serialized_log = Vec::new();
serialized_log.push(log.keys.len().into());
serialized_log.append(&mut log.keys);
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/lib.rs | src/lib.rs | #![allow(clippy::missing_safety_doc)]
#![allow(rustdoc::bare_urls)]
// The following line contains a markdown reference link.
// This is necessary to override the link destination in the README.md file, so
// that when the README.md is rendered standalone (e.g. on Github) it points to
// the online version, and when rendered by rustdoc to the docs module rendered
// page.
//! [developer documentation]: docs
#![doc = include_str!("../README.md")]
pub use self::{
compiler::compile,
ffi::{module_to_object, object_to_shared_lib, OptLevel},
runtime::FormattedItem,
values::Value,
};
mod arch;
pub mod cache;
mod compiler;
pub mod context;
pub mod debug;
pub mod docs;
pub mod error;
pub mod execution_result;
pub mod executor;
mod ffi;
mod libfuncs;
pub mod metadata;
pub mod module;
mod runtime;
pub mod starknet;
pub mod starknet_stub;
pub mod statistics;
mod types;
pub mod utils;
mod values;
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/module.rs | src/module.rs | use crate::metadata::MetadataStorage;
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
program_registry::ProgramRegistry,
};
use melior::ir::Module;
use std::{any::Any, fmt::Debug};
/// A MLIR module in the context of Cairo Native.
/// It is conformed by the MLIR module, the Sierra program registry
/// and the program metadata.
pub struct NativeModule<'m> {
pub(crate) module: Module<'m>,
pub(crate) registry: ProgramRegistry<CoreType, CoreLibfunc>,
pub(crate) metadata: MetadataStorage,
}
impl<'m> NativeModule<'m> {
pub const fn new(
module: Module<'m>,
registry: ProgramRegistry<CoreType, CoreLibfunc>,
metadata: MetadataStorage,
) -> Self {
Self {
module,
registry,
metadata,
}
}
/// Insert some metadata for the program execution and return a mutable reference to it.
///
/// The insertion will fail, if there is already some metadata with the same type, in which case
/// it'll return `None`.
pub fn insert_metadata<T>(&mut self, meta: T) -> Option<&mut T>
where
T: Any,
{
self.metadata.insert(meta)
}
/// Removes metadata
pub fn remove_metadata<T>(&mut self) -> Option<T>
where
T: Any,
{
self.metadata.remove()
}
/// Retrieve a reference to some stored metadata.
///
/// The retrieval will fail if there is no metadata with the requested type, in which case it'll
/// return `None`.
pub fn get_metadata<T>(&self) -> Option<&T>
where
T: Any,
{
self.metadata.get::<T>()
}
pub const fn metadata(&self) -> &MetadataStorage {
&self.metadata
}
pub const fn module(&'_ self) -> &'_ Module<'_> {
&self.module
}
pub const fn program_registry(&self) -> &ProgramRegistry<CoreType, CoreLibfunc> {
&self.registry
}
}
impl Debug for NativeModule<'_> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.module.as_operation().to_string())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::context::NativeContext;
use cairo_lang_sierra::ProgramParser;
use melior::ir::Location;
use starknet_types_core::felt::Felt;
#[test]
fn test_insert_metadata() {
// Create a new context for MLIR operations
let native_context = NativeContext::new();
let context = native_context.context();
// Create an unknown location in the context
let location = Location::unknown(context);
// Create a new MLIR module with the unknown location
let module = Module::new(location);
// Parse a simple program to create a Program instance
let program = ProgramParser::new()
.parse("type felt252 = felt252;")
.unwrap();
// Create a ProgramRegistry based on the parsed program
let registry = ProgramRegistry::<CoreType, CoreLibfunc>::new(&program).unwrap();
// Create a new NativeModule instance with the module, registry, and MetadataStorage
let mut module = NativeModule::new(module, registry, MetadataStorage::new());
// Insert metadata of type u32 into the module
module.insert_metadata(42u32);
// Assert that the inserted metadata of type u32 is retrieved correctly
assert_eq!(module.get_metadata::<u32>(), Some(&42u32));
// Insert metadata of type Felt into the module
module.insert_metadata(Felt::from(43));
// Assert that the inserted metadata of type Felt is retrieved correctly
assert_eq!(module.get_metadata::<Felt>(), Some(&Felt::from(43)));
// Insert metadata of type u64 into the module
module.insert_metadata(44u64);
// Assert that the inserted metadata of type u64 is retrieved correctly
assert_eq!(module.metadata().get::<u64>(), Some(&44u64));
}
#[test]
fn test_remove_metadata() {
// Create a new context for MLIR operations
let native_context = NativeContext::new();
let context = native_context.context();
// Create an unknown location in the context
let location = Location::unknown(context);
// Create a new MLIR module with the unknown location
let module = Module::new(location);
// Parse a simple program to create a Program instance
let program = ProgramParser::new()
.parse("type felt252 = felt252;")
.unwrap();
// Create a ProgramRegistry based on the parsed program
let registry = ProgramRegistry::<CoreType, CoreLibfunc>::new(&program).unwrap();
// Create a new NativeModule instance with the module, registry, and MetadataStorage
let mut module = NativeModule::new(module, registry, MetadataStorage::new());
// Insert metadata of type u32 into the module
module.insert_metadata(42u32);
// Assert that the inserted metadata of type u32 is retrieved correctly
assert_eq!(module.get_metadata::<u32>(), Some(&42u32));
// Insert metadata of type Felt into the module
module.insert_metadata(Felt::from(43));
// Assert that the inserted metadata of type Felt is retrieved correctly
assert_eq!(module.get_metadata::<Felt>(), Some(&Felt::from(43)));
// Remove metadata of type u32 from the module
module.remove_metadata::<u32>();
// Assert that the metadata of type u32 is removed from the module
assert!(module.get_metadata::<u32>().is_none());
// Assert that the metadata of type Felt is still present in the module
assert_eq!(module.get_metadata::<Felt>(), Some(&Felt::from(43)));
// Insert metadata of type u32 into the module again
module.insert_metadata(44u32);
// Assert that the re-inserted metadata of type u32 is retrieved correctly
assert_eq!(module.get_metadata::<u32>(), Some(&44u32));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/values.rs | src/values.rs | //! # Params and return values de/serialization
//!
//! A Rusty interface to provide parameters to cairo-native entry point calls.
use crate::{
error::{panic::ToNativeAssertError, CompilerError, Error},
native_assert, native_panic,
runtime::FeltDict,
starknet::{Secp256k1Point, Secp256r1Point},
types::TypeBuilder,
utils::{
felt252_bigint, get_integer_layout, layout_repeat, libc_free, libc_malloc, RangeExt, PRIME,
},
};
use bumpalo::Bump;
use cairo_lang_sierra::{
extensions::{
circuit::CircuitTypeConcrete,
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
starknet::{secp256::Secp256PointTypeConcrete, StarknetTypeConcrete},
utils::Range,
},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use educe::Educe;
use num_bigint::{BigInt, BigUint, Sign};
use num_traits::{Euclid, One};
use starknet_types_core::felt::Felt;
use std::{
alloc::{alloc, Layout},
collections::HashMap,
ffi::c_void,
mem::forget,
ptr::{null_mut, NonNull},
rc::Rc,
slice,
};
/// A Value is a value that can be passed to either the JIT engine or a compiled program as an argument or received as a result.
///
/// They map to the cairo/sierra types.
///
/// The debug_name field on some variants is `Some` when receiving a [`Value`] as a result.
///
/// A Boxed value or a non-null Nullable value is returned with it's inner value.
#[derive(Clone, Educe, serde::Serialize, serde::Deserialize)]
#[educe(Debug, Eq, PartialEq)]
pub enum Value {
Felt252(#[educe(Debug(method(std::fmt::Display::fmt)))] Felt),
Bytes31([u8; 31]),
/// all elements need to be same type
Array(Vec<Self>),
Struct {
fields: Vec<Self>,
#[educe(PartialEq(ignore))]
debug_name: Option<String>,
}, // element types can differ
Enum {
tag: usize,
value: Box<Self>,
#[educe(PartialEq(ignore))]
debug_name: Option<String>,
},
Felt252Dict {
value: HashMap<Felt, Self>,
#[educe(PartialEq(ignore))]
debug_name: Option<String>,
},
Uint8(u8),
Uint16(u16),
Uint32(u32),
Uint64(u64),
Uint128(u128),
Sint8(i8),
Sint16(i16),
Sint32(i32),
Sint64(i64),
Sint128(i128),
EcPoint(Felt, Felt),
EcState(Felt, Felt, Felt, Felt),
Secp256K1Point(Secp256k1Point),
Secp256R1Point(Secp256r1Point),
BoundedInt {
value: Felt,
#[serde(with = "range_serde")]
range: Range,
},
IntRange {
x: Box<Value>,
y: Box<Value>,
},
/// Used as return value for Nullables that are null.
Null,
}
// Conversions
macro_rules! impl_conversions {
( $( $t:ty as $i:ident ; )+ ) => { $(
impl From<$t> for Value {
fn from(value: $t) -> Self {
Self::$i(value)
}
}
impl TryFrom<Value> for $t {
type Error = Value;
fn try_from(value: Value) -> Result<Self, Self::Error> {
match value {
Value::$i(value) => Ok(value),
_ => Err(value),
}
}
}
)+ };
}
impl_conversions! {
Felt as Felt252;
u8 as Uint8;
u16 as Uint16;
u32 as Uint32;
u64 as Uint64;
u128 as Uint128;
i8 as Sint8;
i16 as Sint16;
i32 as Sint32;
i64 as Sint64;
i128 as Sint128;
}
impl<T: Into<Value> + Clone> From<&[T]> for Value {
fn from(value: &[T]) -> Self {
Self::Array(value.iter().map(|x| x.clone().into()).collect())
}
}
impl<T: Into<Value>> From<Vec<T>> for Value {
fn from(value: Vec<T>) -> Self {
Self::Array(value.into_iter().map(Into::into).collect())
}
}
impl<T: Into<Value>, const N: usize> From<[T; N]> for Value {
fn from(value: [T; N]) -> Self {
Self::Array(value.into_iter().map(Into::into).collect())
}
}
impl Value {
pub(crate) fn resolve_type<'a>(
ty: &'a CoreTypeConcrete,
registry: &'a ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<&'a CoreTypeConcrete, Error> {
Ok(match ty {
CoreTypeConcrete::Snapshot(info) => registry.get_type(&info.ty)?,
x => x,
})
}
/// Allocates the value in the given arena so it can be passed to the JIT engine or a compiled program.
pub(crate) fn to_ptr(
&self,
arena: &Bump,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
type_id: &ConcreteTypeId,
find_dict_drop_override: impl Copy + Fn(&ConcreteTypeId) -> Option<extern "C" fn(*mut c_void)>,
) -> Result<NonNull<()>, Error> {
let ty = registry.get_type(type_id)?;
Ok(unsafe {
match self {
Self::Felt252(value) => {
let ptr = arena.alloc_layout(get_integer_layout(252)).cast();
let data = felt252_bigint(value.to_bigint()).to_bytes_le();
ptr.cast::<[u8; 32]>().as_mut().copy_from_slice(&data);
ptr
}
Self::BoundedInt {
value,
range: Range { lower, upper },
} => {
let value = value.to_bigint();
if lower >= upper {
// If lower bound is greater than or equal to upper bound
// Should not happen with correct range definition
return Err(CompilerError::BoundedIntOutOfRange {
value: Box::new(value),
range: Box::new((lower.clone(), upper.clone())),
}
.into());
}
let prime = BigInt::from_biguint(Sign::Plus, PRIME.clone());
let lower = lower.rem_euclid(&prime);
let upper = upper.rem_euclid(&prime);
// Check if value is within the valid range
if !(lower <= value && value < upper) {
return Err(CompilerError::BoundedIntOutOfRange {
value: Box::new(value),
range: Box::new((lower, upper)),
}
.into());
}
let ptr = arena.alloc_layout(get_integer_layout(252)).cast();
let data = felt252_bigint(value).to_bytes_le();
ptr.cast::<[u8; 32]>().as_mut().copy_from_slice(&data);
ptr
}
Self::Bytes31(_) => native_panic!("todo: allocate type Bytes31"),
Self::Array(data) => {
if let CoreTypeConcrete::Array(info) = Self::resolve_type(ty, registry)? {
let elem_ty = registry.get_type(&info.ty)?;
let elem_layout = elem_ty.layout(registry)?.pad_to_align();
let refcount_offset =
crate::types::array::calc_data_prefix_offset(elem_layout);
let len: u32 = data
.len()
.try_into()
.map_err(|_| Error::IntegerConversion)?;
let ptr: *mut () = match len {
0 => std::ptr::null_mut(),
_ => {
let ptr: *mut () =
libc_malloc(elem_layout.size() * data.len() + refcount_offset)
.cast();
// Write reference count.
ptr.cast::<(u32, u32)>().write((1, len));
ptr.byte_add(refcount_offset).cast()
}
};
// Write the data.
for (idx, elem) in data.iter().enumerate() {
let elem =
elem.to_ptr(arena, registry, &info.ty, find_dict_drop_override)?;
std::ptr::copy_nonoverlapping(
elem.cast::<u8>().as_ptr(),
ptr.byte_add(idx * elem_layout.size()).cast::<u8>(),
elem_layout.size(),
);
}
// Make double pointer.
let ptr_ptr = if ptr.is_null() {
null_mut()
} else {
let ptr_ptr: *mut *mut () = libc_malloc(8).cast();
ptr_ptr.write(ptr);
ptr_ptr
};
let target = arena
.alloc_layout(
Layout::new::<*mut ()>() // ptr
.extend(Layout::new::<u32>())? // start
.0
.extend(Layout::new::<u32>())? // end
.0
.extend(Layout::new::<u32>())? // capacity
.0
.pad_to_align(),
)
.as_ptr();
*target.cast::<*mut ()>() = ptr_ptr.cast();
let (layout, offset) =
Layout::new::<*mut NonNull<()>>().extend(Layout::new::<u32>())?;
*target.byte_add(offset).cast::<u32>() = 0;
let (layout, offset) = layout.extend(Layout::new::<u32>())?;
*target.byte_add(offset).cast::<u32>() = len;
let (_, offset) = layout.extend(Layout::new::<u32>())?;
*target.byte_add(offset).cast::<u32>() = len;
NonNull::new_unchecked(target).cast()
} else {
Err(Error::UnexpectedValue(format!(
"expected value of type {:?} but got an array",
type_id.debug_name
)))?
}
}
Self::Struct {
fields: members, ..
} => {
if let CoreTypeConcrete::Struct(info) = Self::resolve_type(ty, registry)? {
let mut layout: Option<Layout> = None;
let mut data = Vec::with_capacity(info.members.len());
let mut is_memory_allocated = false;
for (member_type_id, member) in info.members.iter().zip(members) {
let member_ty = registry.get_type(member_type_id)?;
let member_layout = member_ty.layout(registry)?;
let (new_layout, offset) = match layout {
Some(layout) => layout.extend(member_layout)?,
None => (member_layout, 0),
};
layout = Some(new_layout);
let member_ptr = member.to_ptr(
arena,
registry,
member_type_id,
find_dict_drop_override,
)?;
data.push((
member_layout,
offset,
if member_ty.is_memory_allocated(registry)? {
is_memory_allocated = true;
// Undo the wrapper pointer added because the member's memory
// allocated flag.
*member_ptr.cast::<NonNull<()>>().as_ref()
} else {
member_ptr
},
));
}
let ptr = arena
.alloc_layout(layout.unwrap_or(Layout::new::<()>()).pad_to_align())
.as_ptr();
for (layout, offset, member_ptr) in data {
std::ptr::copy_nonoverlapping(
member_ptr.cast::<u8>().as_ptr(),
ptr.byte_add(offset),
layout.size(),
);
}
if is_memory_allocated {
// alloc returns a ref, so its never null
NonNull::new_unchecked(arena.alloc(ptr) as *mut _).cast()
} else {
NonNull::new_unchecked(ptr).cast()
}
} else {
Err(Error::UnexpectedValue(format!(
"expected value of type {:?} but got a struct",
type_id.debug_name
)))?
}
}
Self::Enum { tag, value, .. } => {
if let CoreTypeConcrete::Enum(info) = Self::resolve_type(ty, registry)? {
native_assert!(*tag < info.variants.len(), "Variant index out of range.");
let payload_type_id = &info.variants[*tag];
let payload = value.to_ptr(
arena,
registry,
payload_type_id,
find_dict_drop_override,
)?;
let (layout, tag_layout, variant_layouts) =
crate::types::r#enum::get_layout_for_variants(
registry,
&info.variants,
)?;
let ptr = arena.alloc_layout(layout).cast::<()>().as_ptr();
match tag_layout.size() {
0 => native_panic!("An enum without variants cannot be instantiated."),
1 => *ptr.cast::<u8>() = *tag as u8,
2 => *ptr.cast::<u16>() = *tag as u16,
4 => *ptr.cast::<u32>() = *tag as u32,
8 => *ptr.cast::<u64>() = *tag as u64,
_ => native_panic!("reached the maximum size for an enum"),
}
std::ptr::copy_nonoverlapping(
payload.cast::<u8>().as_ptr(),
ptr.byte_add(tag_layout.extend(variant_layouts[*tag])?.1)
.cast(),
variant_layouts[*tag].size(),
);
// alloc returns a reference so its never null
NonNull::new_unchecked(arena.alloc(ptr) as *mut _).cast()
} else {
Err(Error::UnexpectedValue(format!(
"expected value of type {:?} but got an enum value",
type_id.debug_name
)))?
}
}
Self::Felt252Dict { value: map, .. } => {
if let CoreTypeConcrete::Felt252Dict(info) = Self::resolve_type(ty, registry)? {
let elem_ty = registry.get_type(&info.ty)?;
let elem_layout = elem_ty.layout(registry)?.pad_to_align();
// We need `find_dict_drop_override` to obtain the function pointers of drop
// implementations (if any) for the value type. This is required to be able to drop
// the dictionary automatically when their reference count drops to zero.
let drop_fn = find_dict_drop_override(&info.ty);
let mut value_map = FeltDict {
mappings: HashMap::with_capacity(map.len()),
layout: elem_layout,
elements: if map.is_empty() {
null_mut()
} else {
alloc(Layout::from_size_align_unchecked(
elem_layout.pad_to_align().size() * map.len(),
elem_layout.align(),
))
.cast()
},
drop_fn,
count: 0,
};
// next key must be called before next_value
for (key, value) in map.iter() {
let key = key.to_bytes_le();
let value =
value.to_ptr(arena, registry, &info.ty, find_dict_drop_override)?;
let index = value_map.mappings.len();
value_map.mappings.insert(key, index);
std::ptr::copy_nonoverlapping(
value.cast::<u8>().as_ptr(),
value_map
.elements
.byte_add(elem_layout.pad_to_align().size() * index)
.cast(),
elem_layout.size(),
);
}
NonNull::new_unchecked(Rc::into_raw(Rc::new(value_map)) as *mut ()).cast()
} else {
Err(Error::UnexpectedValue(format!(
"expected value of type {:?} but got a felt dict",
type_id.debug_name
)))?
}
}
Self::Uint8(value) => {
let ptr = arena.alloc_layout(Layout::new::<u8>()).cast();
*ptr.cast::<u8>().as_mut() = *value;
ptr
}
Self::Uint16(value) => {
let ptr = arena.alloc_layout(Layout::new::<u16>()).cast();
*ptr.cast::<u16>().as_mut() = *value;
ptr
}
Self::Uint32(value) => {
let ptr = arena.alloc_layout(Layout::new::<u32>()).cast();
*ptr.cast::<u32>().as_mut() = *value;
ptr
}
Self::Uint64(value) => {
let ptr = arena.alloc_layout(Layout::new::<u64>()).cast();
*ptr.cast::<u64>().as_mut() = *value;
ptr
}
Self::Uint128(value) => {
let ptr = arena.alloc_layout(Layout::new::<u128>()).cast();
*ptr.cast::<u128>().as_mut() = *value;
ptr
}
Self::Sint8(value) => {
let ptr = arena.alloc_layout(Layout::new::<i8>()).cast();
*ptr.cast::<i8>().as_mut() = *value;
ptr
}
Self::Sint16(value) => {
let ptr = arena.alloc_layout(Layout::new::<i16>()).cast();
*ptr.cast::<i16>().as_mut() = *value;
ptr
}
Self::Sint32(value) => {
let ptr = arena.alloc_layout(Layout::new::<i32>()).cast();
*ptr.cast::<i32>().as_mut() = *value;
ptr
}
Self::Sint64(value) => {
let ptr = arena.alloc_layout(Layout::new::<i64>()).cast();
*ptr.cast::<i64>().as_mut() = *value;
ptr
}
Self::Sint128(value) => {
let ptr = arena.alloc_layout(Layout::new::<i128>()).cast();
*ptr.cast::<i128>().as_mut() = *value;
ptr
}
Self::EcPoint(a, b) => {
let ptr = arena
.alloc_layout(layout_repeat(&get_integer_layout(252), 2)?.0.pad_to_align())
.cast();
let a = felt252_bigint(a.to_bigint()).to_bytes_le();
let b = felt252_bigint(b.to_bigint()).to_bytes_le();
let data = [a, b];
ptr.cast::<[[u8; 32]; 2]>().as_mut().copy_from_slice(&data);
ptr
}
Self::EcState(a, b, c, d) => {
let ptr = arena
.alloc_layout(layout_repeat(&get_integer_layout(252), 4)?.0.pad_to_align())
.cast();
let a = felt252_bigint(a.to_bigint()).to_bytes_le();
let b = felt252_bigint(b.to_bigint()).to_bytes_le();
let c = felt252_bigint(c.to_bigint()).to_bytes_le();
let d = felt252_bigint(d.to_bigint()).to_bytes_le();
let data = [a, b, c, d];
ptr.cast::<[[u8; 32]; 4]>().as_mut().copy_from_slice(&data);
ptr
}
Self::Secp256K1Point { .. } => native_panic!("todo: allocate type Secp256K1Point"),
Self::Secp256R1Point { .. } => native_panic!("todo: allocate type Secp256R1Point"),
Self::Null => {
native_panic!(
"unimplemented: null is meant as return value for nullable for now"
)
}
Self::IntRange { x, y } => {
if let CoreTypeConcrete::IntRange(info) = Self::resolve_type(ty, registry)? {
let inner = registry.get_type(&info.ty)?;
let inner_layout = inner.layout(registry)?;
let x_ptr = x.to_ptr(arena, registry, &info.ty, find_dict_drop_override)?;
let (struct_layout, y_offset) = inner_layout.extend(inner_layout)?;
let y_ptr = y.to_ptr(arena, registry, &info.ty, find_dict_drop_override)?;
let ptr = arena.alloc_layout(struct_layout.pad_to_align()).as_ptr();
std::ptr::copy_nonoverlapping(
x_ptr.cast::<u8>().as_ptr(),
ptr,
inner_layout.size(),
);
std::ptr::copy_nonoverlapping(
y_ptr.cast::<u8>().as_ptr(),
ptr.byte_add(y_offset),
inner_layout.size(),
);
NonNull::new_unchecked(ptr).cast()
} else {
native_panic!(
"an IntRange value should always have an IntRange CoreTypeConcrete"
)
}
}
}
})
}
/// From the given pointer acquired from the either the JIT / compiled program outputs, convert it to a [`Self`]
pub(crate) fn from_ptr(
ptr: NonNull<()>,
type_id: &ConcreteTypeId,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
should_drop: bool,
) -> Result<Self, Error> {
let ty = registry.get_type(type_id)?;
Ok(unsafe {
match ty {
CoreTypeConcrete::Array(info) => {
let elem_ty = registry.get_type(&info.ty)?;
let elem_layout = elem_ty.layout(registry)?;
let elem_stride = elem_layout.pad_to_align().size();
let ptr_layout = Layout::new::<*mut ()>();
let len_layout = crate::utils::get_integer_layout(32);
let (ptr_layout, offset) = ptr_layout.extend(len_layout)?;
let start_offset_value = *NonNull::new(ptr.as_ptr().byte_add(offset))
.to_native_assert_error("tried to make a non-null ptr out of a null one")?
.cast::<u32>()
.as_ref();
let (_, offset) = ptr_layout.extend(len_layout)?;
let end_offset_value = *NonNull::new(ptr.as_ptr().byte_add(offset))
.to_native_assert_error("tried to make a non-null ptr out of a null one")?
.cast::<u32>()
.as_ref();
// This pointer can be null if the array is empty.
let array_ptr_ptr = *ptr.cast::<*mut *mut ()>().as_ref();
let refcount_offset = crate::types::array::calc_data_prefix_offset(elem_layout);
let array_value = if array_ptr_ptr.is_null() {
Vec::new()
} else {
let array_ptr = array_ptr_ptr.read();
let ref_count = array_ptr
.byte_sub(refcount_offset)
.cast::<u32>()
.as_mut()
.to_native_assert_error("array data pointer should not be null")?;
if should_drop {
*ref_count -= 1;
}
native_assert!(
end_offset_value >= start_offset_value,
"can't have an array with negative length"
);
let num_elems = (end_offset_value - start_offset_value) as usize;
if *ref_count == 0 {
// Drop prefix elements.
for i in 0..start_offset_value {
let cur_elem_ptr =
NonNull::new(array_ptr.byte_add(elem_stride * i as usize))
.to_native_assert_error(
"tried to make a non-null ptr out of a null one",
)?;
drop(Self::from_ptr(
cur_elem_ptr,
&info.ty,
registry,
should_drop,
)?);
}
}
let mut array_value = Vec::with_capacity(num_elems);
for i in start_offset_value..end_offset_value {
let cur_elem_ptr =
NonNull::new(array_ptr.byte_add(elem_stride * i as usize))
.to_native_assert_error(
"tried to make a non-null ptr out of a null one",
)?;
array_value.push(Self::from_ptr(
cur_elem_ptr,
&info.ty,
registry,
*ref_count == 0,
)?);
}
if *ref_count == 0 {
// Drop suffix elements.
let array_max_len = array_ptr
.byte_sub(refcount_offset - size_of::<u32>())
.cast::<u32>()
.read();
for i in end_offset_value..array_max_len {
let cur_elem_ptr =
NonNull::new(array_ptr.byte_add(elem_stride * i as usize))
.to_native_assert_error(
"tried to make a non-null ptr out of a null one",
)?;
drop(Self::from_ptr(
cur_elem_ptr,
&info.ty,
registry,
should_drop,
)?);
}
// Free array storage.
libc_free(array_ptr.byte_sub(refcount_offset).cast());
libc_free(array_ptr_ptr.cast());
}
array_value
};
Self::Array(array_value)
}
CoreTypeConcrete::Box(info) => {
let inner = *ptr.cast::<NonNull<()>>().as_ptr();
let value = Self::from_ptr(inner, &info.ty, registry, should_drop)?;
if should_drop {
libc_free(inner.as_ptr().cast());
}
value
}
CoreTypeConcrete::EcPoint(_) => {
let data = ptr.cast::<[[u8; 32]; 2]>().as_mut();
data[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
data[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
Self::EcPoint(Felt::from_bytes_le(&data[0]), Felt::from_bytes_le(&data[1]))
}
CoreTypeConcrete::EcState(_) => {
let data = ptr.cast::<[[u8; 32]; 4]>().as_mut();
data[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
data[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
data[2][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
data[3][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
Self::EcState(
Felt::from_bytes_le(&data[0]),
Felt::from_bytes_le(&data[1]),
Felt::from_bytes_le(&data[2]),
Felt::from_bytes_le(&data[3]),
)
}
CoreTypeConcrete::Felt252(_) => {
let data = ptr.cast::<[u8; 32]>().as_mut();
data[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let data = Felt::from_bytes_le_slice(data);
Self::Felt252(data)
}
CoreTypeConcrete::Uint8(_) => Self::Uint8(*ptr.cast::<u8>().as_ref()),
CoreTypeConcrete::Uint16(_) => Self::Uint16(*ptr.cast::<u16>().as_ref()),
CoreTypeConcrete::Uint32(_) => Self::Uint32(*ptr.cast::<u32>().as_ref()),
CoreTypeConcrete::Uint64(_) => Self::Uint64(*ptr.cast::<u64>().as_ref()),
CoreTypeConcrete::Uint128(_) => Self::Uint128(*ptr.cast::<u128>().as_ref()),
CoreTypeConcrete::Uint128MulGuarantee(_) => {
native_panic!("todo: implement uint128mulguarantee from_ptr")
}
CoreTypeConcrete::Sint8(_) => Self::Sint8(*ptr.cast::<i8>().as_ref()),
CoreTypeConcrete::Sint16(_) => Self::Sint16(*ptr.cast::<i16>().as_ref()),
CoreTypeConcrete::Sint32(_) => Self::Sint32(*ptr.cast::<i32>().as_ref()),
CoreTypeConcrete::Sint64(_) => Self::Sint64(*ptr.cast::<i64>().as_ref()),
CoreTypeConcrete::Sint128(_) => Self::Sint128(*ptr.cast::<i128>().as_ref()),
CoreTypeConcrete::NonZero(info) => {
Self::from_ptr(ptr, &info.ty, registry, should_drop)?
}
CoreTypeConcrete::Nullable(info) => {
let inner_ptr = *ptr.cast::<*mut ()>().as_ptr();
if inner_ptr.is_null() {
Self::Null
} else {
let value = Self::from_ptr(
NonNull::new_unchecked(inner_ptr).cast(),
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/execution_result.rs | src/execution_result.rs | use std::ops::{Add, AddAssign};
/// # Execution Result
///
/// This module contains the structures used to interpret the program execution results, either
/// normal programs or starknet contracts.
use crate::{error::Error, native_panic, utils::decode_error_message, values::Value};
use starknet_types_core::felt::Felt;
#[derive(
Debug,
Default,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
serde::Serialize,
serde::Deserialize,
)]
pub struct BuiltinStats {
pub range_check: usize,
pub pedersen: usize,
pub bitwise: usize,
pub ec_op: usize,
pub poseidon: usize,
pub segment_arena: usize,
pub range_check96: usize,
pub add_mod: usize,
pub mul_mod: usize,
}
pub const RANGE_CHECK_BUILTIN_SIZE: usize = 1;
pub const PEDERSEN_BUILTIN_SIZE: usize = 3;
pub const BITWISE_BUILTIN_SIZE: usize = 5;
pub const EC_OP_BUILTIN_SIZE: usize = 7;
pub const POSEIDON_BUILTIN_SIZE: usize = 6;
pub const SEGMENT_ARENA_BUILTIN_SIZE: usize = 3;
pub const RANGE_CHECK96_BUILTIN_SIZE: usize = 1;
pub const ADD_MOD_BUILTIN_SIZE: usize = 7;
pub const MUL_MOD_BUILTIN_SIZE: usize = 7;
/// The result of the JIT execution.
#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub struct ExecutionResult {
pub remaining_gas: Option<u64>,
pub return_value: Value,
pub builtin_stats: BuiltinStats,
}
/// Starknet contract execution result.
#[derive(
Debug,
Default,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
serde::Serialize,
serde::Deserialize,
)]
pub struct ContractExecutionResult {
pub remaining_gas: u64,
pub failure_flag: bool,
pub return_values: Vec<Felt>,
pub error_msg: Option<String>,
pub builtin_stats: BuiltinStats,
}
impl ContractExecutionResult {
/// Convert an [`ExecutionResult`] into a [`ContractExecutionResult`]
pub fn from_execution_result(result: ExecutionResult) -> Result<Self, Error> {
let mut error_msg = None;
let failure_flag;
let return_values = match &result.return_value {
Value::Enum { tag, value, .. } => {
failure_flag = *tag != 0;
if !failure_flag {
if let Value::Struct { fields, .. } = &**value {
if let Value::Struct { fields, .. } = &fields[0] {
if let Value::Array(data) = &fields[0] {
let felt_vec: Vec<_> = data
.iter()
.map(|x| {
if let Value::Felt252(f) = x {
Ok(*f)
} else {
native_panic!("should always be a felt")
}
})
.collect::<Result<_, _>>()?;
felt_vec
} else {
Err(Error::UnexpectedValue(format!(
"wrong type, expected: Struct {{ Struct {{ Array<felt252> }} }}, value: {:?}",
value
)))?
}
} else {
Err(Error::UnexpectedValue(format!(
"wrong type, expected: Struct {{ Struct {{ Array<felt252> }} }}, value: {:?}",
value
)))?
}
} else {
Err(Error::UnexpectedValue(format!(
"wrong type, expected: Struct {{ Struct {{ Array<felt252> }} }}, value: {:?}",
value
)))?
}
} else if let Value::Struct { fields, .. } = &**value {
if fields.len() < 2 {
Err(Error::UnexpectedValue(format!(
"wrong type, expect: struct.fields.len() >= 2, value: {:?}",
fields
)))?
}
if let Value::Array(data) = &fields[1] {
let felt_vec: Vec<_> = data
.iter()
.map(|x| {
if let Value::Felt252(f) = x {
Ok(*f)
} else {
native_panic!("should always be a felt")
}
})
.collect::<Result<_, _>>()?;
let bytes_err: Vec<_> = felt_vec
.iter()
.flat_map(|felt| felt.to_bytes_be().to_vec())
// remove null chars
.filter(|b| *b != 0)
.collect();
let str_error = decode_error_message(&bytes_err);
error_msg = Some(str_error);
felt_vec
} else {
Err(Error::UnexpectedValue(format!(
"wrong type, expected: Struct {{ [X, Array<felt252>] }}, value: {:?}",
value
)))?
}
} else {
Err(Error::UnexpectedValue(format!(
"wrong type, expected: Struct {{ [X, Array<felt252>] }}, value: {:?}",
value
)))?
}
}
_ => {
failure_flag = true;
Err(Error::UnexpectedValue(
"wrong return value type expected a enum".to_string(),
))?
}
};
Ok(Self {
remaining_gas: result.remaining_gas.unwrap_or(0),
return_values,
failure_flag,
error_msg,
builtin_stats: result.builtin_stats,
})
}
}
impl Add for BuiltinStats {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self::Output {
range_check: self.range_check + rhs.range_check,
pedersen: self.pedersen + rhs.pedersen,
bitwise: self.bitwise + rhs.bitwise,
ec_op: self.ec_op + rhs.ec_op,
poseidon: self.poseidon + rhs.poseidon,
segment_arena: self.segment_arena + rhs.segment_arena,
range_check96: self.range_check96 + rhs.range_check96,
add_mod: self.add_mod + rhs.add_mod,
mul_mod: self.mul_mod + rhs.mul_mod,
}
}
}
impl AddAssign for BuiltinStats {
fn add_assign(&mut self, rhs: Self) {
*self = *self + rhs
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/error.rs | src/error.rs | //! Various error types used thorough the crate.
use crate::metadata::gas::GasMetadataError;
use cairo_lang_sierra::extensions::modules::utils::Range;
use cairo_lang_sierra::{
edit_state::EditStateError, ids::ConcreteTypeId, program_registry::ProgramRegistryError,
};
use num_bigint::BigInt;
use panic::NativeAssertError;
use std::{alloc::LayoutError, num::TryFromIntError};
use thiserror::Error;
pub type Result<T> = std::result::Result<T, Error>;
#[derive(Error, Debug)]
pub enum Error {
#[error(transparent)]
LayoutError(#[from] LayoutError),
#[error(transparent)]
MlirError(#[from] melior::Error),
#[error("missing parameter of type '{0}'")]
MissingParameter(String),
#[error("unexpected value, expected value of type '{0}'")]
UnexpectedValue(String),
#[error("a syscall handler was expected but was not provided")]
MissingSyscallHandler,
#[error(transparent)]
LayoutErrorPolyfill(#[from] crate::utils::LayoutError),
#[error(transparent)]
ProgramRegistryErrorBoxed(#[from] Box<ProgramRegistryError>),
#[error(transparent)]
TryFromIntError(#[from] TryFromIntError),
#[error("error parsing attribute")]
ParseAttributeError,
#[error("missing metadata")]
MissingMetadata,
#[error(transparent)]
SierraAssert(#[from] SierraAssertError),
#[error(transparent)]
NativeAssert(#[from] NativeAssertError),
#[error(transparent)]
Compiler(#[from] CompilerError),
#[error(transparent)]
EditStateError(#[from] EditStateError),
#[error(transparent)]
GasMetadataError(#[from] GasMetadataError),
#[error("llvm compile error: {0}")]
LLVMCompileError(String),
#[error("ld link error: {0}")]
LinkError(String),
#[error("cairo const data mismatch")]
ConstDataMismatch,
#[error("expected an integer-like type")]
IntegerLikeTypeExpected,
#[error("integer conversion failed")]
IntegerConversion,
#[error("missing BuiltinCosts global symbol, should never happen, this is a bug")]
MissingBuiltinCostsSymbol,
#[error("selector not found in the AotContractExecutor mappings")]
SelectorNotFound,
#[error(transparent)]
IoError(#[from] std::io::Error),
#[error(transparent)]
LibraryLoadError(#[from] libloading::Error),
#[error(transparent)]
SerdeJsonError(#[from] serde_json::Error),
#[error("Failed to parse a Cairo/Sierra program: {0}")]
ProgramParser(String),
#[cfg(feature = "with-segfault-catcher")]
#[error(transparent)]
SafeRunner(crate::utils::safe_runner::SafeRunnerError),
}
impl Error {
pub fn make_missing_parameter(ty: &ConcreteTypeId) -> Self {
Self::MissingParameter(
ty.debug_name
.as_ref()
.map(|x| x.to_string())
.unwrap_or_default(),
)
}
}
#[derive(Error, Debug)]
pub enum SierraAssertError {
#[error("casts always happen between numerical types")]
Cast,
#[error("range should always intersect, from {:?} to {:?}", ranges.0, ranges.1)]
Range { ranges: Box<(Range, Range)> },
#[error("type {:?} should never be initialized", .0)]
BadTypeInit(ConcreteTypeId),
#[error("expected type information was missing")]
BadTypeInfo,
#[error("circuit cannot be evaluated")]
ImpossibleCircuit,
}
#[derive(Error, Debug)]
pub enum CompilerError {
#[error("BoundedInt value is out of range: {:?} not within [{:?}, {:?})", value, range.0, range.1)]
BoundedIntOutOfRange {
value: Box<BigInt>,
range: Box<(BigInt, BigInt)>,
},
}
/// In Cairo Native we want to avoid the use of panic, even in situation where
/// it *should* never happen. The downside of this is that we lose:
/// - Possible compiler opitimizations
/// - Stack backtrace on error
///
/// This modules aims to avoid panics while still obtaining a stack backtrace on eventual errors.
pub mod panic {
use super::{Error, Result};
use std::{
backtrace::{Backtrace, BacktraceStatus},
panic::Location,
};
/// `NativeAssertError` acts as a non-panicking alternative to Rust's panic.
/// When the error is created the backtrace or location is captured, which
/// is useful for debugging.
#[derive(Debug)]
pub struct NativeAssertError {
msg: String,
info: BacktraceOrLocation,
}
impl std::error::Error for NativeAssertError {}
impl NativeAssertError {
#[track_caller]
pub fn new(msg: String) -> Self {
let backtrace = Backtrace::capture();
let info = if BacktraceStatus::Captured == backtrace.status() {
BacktraceOrLocation::Backtrace(backtrace)
} else {
BacktraceOrLocation::Location(std::panic::Location::caller())
};
Self { msg, info }
}
}
/// Extension trait used to easly convert `Result`s and `Option`s to `NativeAssertError`
pub trait ToNativeAssertError<T> {
fn to_native_assert_error(self, msg: &str) -> Result<T>;
}
impl<T> ToNativeAssertError<T> for Option<T> {
fn to_native_assert_error(self, msg: &str) -> Result<T> {
self.ok_or_else(|| Error::NativeAssert(NativeAssertError::new(msg.to_string())))
}
}
impl<T, E> ToNativeAssertError<T> for std::result::Result<T, E> {
fn to_native_assert_error(self, msg: &str) -> Result<T> {
self.map_err(|_| Error::NativeAssert(NativeAssertError::new(msg.to_string())))
}
}
/// Macro that mimicks the behaviour of `panic!`.
/// It should only be used inside of a function that returns Result<T, cairo_native::error::Error>
#[macro_export]
macro_rules! native_panic {
($($arg:tt)*) => {
return Err($crate::error::Error::NativeAssert(
$crate::error::panic::NativeAssertError::new(format!($($arg)*)),
))
};
}
/// Macro that mimicks the behaviour of `assert!`.
/// It should only be used inside of a function that returns Result<T, cairo_native::error::Error>
#[macro_export]
macro_rules! native_assert {
($cond:expr, $($arg:tt)*) => {
if !($cond) {
$crate::native_panic!($($arg)*);
}
};
}
/// If `RUST_BACKTRACE` env var is not set, then the backtrace won't be captured.
/// In that case, only the location is saved, which is better than nothing.
#[derive(Debug)]
enum BacktraceOrLocation {
Backtrace(Backtrace),
Location(&'static Location<'static>),
}
impl std::fmt::Display for NativeAssertError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{}", &self.msg)?;
match &self.info {
BacktraceOrLocation::Backtrace(backtrace) => {
writeln!(f, "Stack backtrace:\n{}", backtrace)
}
BacktraceOrLocation::Location(location) => {
writeln!(f, "Location: {}", location)
}
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_make_missing_parameter() {
// Test with a type ID that has a debug name
let ty_with_debug_name = ConcreteTypeId {
debug_name: Some("u32".into()),
id: 10,
};
assert_eq!(
Error::make_missing_parameter(&ty_with_debug_name).to_string(),
"missing parameter of type 'u32'"
);
// Test with a type ID that does not have a debug name
let ty_without_debug_name = ConcreteTypeId {
debug_name: None,
id: 10,
};
assert_eq!(
Error::make_missing_parameter(&ty_without_debug_name).to_string(),
"missing parameter of type ''"
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/debug.rs | src/debug.rs | use cairo_lang_sierra::{
extensions::{
array::ArrayConcreteLibfunc,
boolean::BoolConcreteLibfunc,
bounded_int::BoundedIntConcreteLibfunc,
boxing::BoxConcreteLibfunc,
bytes31::Bytes31ConcreteLibfunc,
casts::CastConcreteLibfunc,
circuit::{CircuitConcreteLibfunc, CircuitTypeConcrete},
const_type::ConstConcreteLibfunc,
core::{CoreConcreteLibfunc, CoreLibfunc, CoreType, CoreTypeConcrete},
coupon::CouponConcreteLibfunc,
debug::DebugConcreteLibfunc,
ec::EcConcreteLibfunc,
enm::EnumConcreteLibfunc,
felt252::{Felt252BinaryOperationConcrete, Felt252BinaryOperator, Felt252Concrete},
felt252_dict::{Felt252DictConcreteLibfunc, Felt252DictEntryConcreteLibfunc},
gas::GasConcreteLibfunc,
gas_reserve::GasReserveConcreteLibfunc,
int::{
signed::SintConcrete, signed128::Sint128Concrete, unsigned::UintConcrete,
unsigned128::Uint128Concrete, unsigned256::Uint256Concrete,
unsigned512::Uint512Concrete, IntOperator,
},
mem::MemConcreteLibfunc,
nullable::NullableConcreteLibfunc,
pedersen::PedersenConcreteLibfunc,
poseidon::PoseidonConcreteLibfunc,
range::IntRangeConcreteLibfunc,
starknet::{
secp256::{Secp256ConcreteLibfunc, Secp256OpConcreteLibfunc},
testing::TestingConcreteLibfunc,
StarknetConcreteLibfunc, StarknetTypeConcrete,
},
structure::StructConcreteLibfunc,
},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use itertools::Itertools;
pub const fn libfunc_to_name(value: &CoreConcreteLibfunc) -> &'static str {
match value {
CoreConcreteLibfunc::ApTracking(value) => match value {
cairo_lang_sierra::extensions::ap_tracking::ApTrackingConcreteLibfunc::Revoke(_) => {
"revoke_ap_tracking"
}
cairo_lang_sierra::extensions::ap_tracking::ApTrackingConcreteLibfunc::Enable(_) => {
"enable_ap_tracking"
}
cairo_lang_sierra::extensions::ap_tracking::ApTrackingConcreteLibfunc::Disable(_) => {
"disable_ap_tracking"
}
},
CoreConcreteLibfunc::Array(value) => match value {
ArrayConcreteLibfunc::New(_) => "array_new",
ArrayConcreteLibfunc::SpanFromTuple(_) => "span_from_tuple",
ArrayConcreteLibfunc::Append(_) => "array_append",
ArrayConcreteLibfunc::PopFront(_) => "array_pop_front",
ArrayConcreteLibfunc::PopFrontConsume(_) => "array_pop_front_consume",
ArrayConcreteLibfunc::Get(_) => "array_get",
ArrayConcreteLibfunc::Slice(_) => "array_slice",
ArrayConcreteLibfunc::Len(_) => "array_len",
ArrayConcreteLibfunc::SnapshotPopFront(_) => "array_snapshot_pop_front",
ArrayConcreteLibfunc::SnapshotPopBack(_) => "array_snapshot_pop_back",
ArrayConcreteLibfunc::TupleFromSpan(_) => "array_tuple_from_span",
ArrayConcreteLibfunc::SnapshotMultiPopFront(_) => "array_snapshot_multi_pop_front",
ArrayConcreteLibfunc::SnapshotMultiPopBack(_) => "array_snapshot_multi_pop_back",
},
CoreConcreteLibfunc::BranchAlign(_) => "branch_align",
CoreConcreteLibfunc::Bool(value) => match value {
BoolConcreteLibfunc::And(_) => "bool_and",
BoolConcreteLibfunc::Not(_) => "bool_not",
BoolConcreteLibfunc::Xor(_) => "bool_xor",
BoolConcreteLibfunc::Or(_) => "bool_or",
BoolConcreteLibfunc::ToFelt252(_) => "bool_to_felt252",
},
CoreConcreteLibfunc::Box(value) => match value {
BoxConcreteLibfunc::Into(_) => "box_into",
BoxConcreteLibfunc::Unbox(_) => "box_unbox",
BoxConcreteLibfunc::ForwardSnapshot(_) => "box_forward_snapshot",
BoxConcreteLibfunc::LocalInto(_) => "box_local_into",
},
CoreConcreteLibfunc::Cast(value) => match value {
CastConcreteLibfunc::Downcast(_) => "downcast",
CastConcreteLibfunc::Upcast(_) => "upcast",
},
CoreConcreteLibfunc::Coupon(value) => match value {
CouponConcreteLibfunc::Buy(_) => "coupon_buy",
CouponConcreteLibfunc::Refund(_) => "coupon_refund",
},
CoreConcreteLibfunc::CouponCall(_) => "coupon_call",
CoreConcreteLibfunc::Drop(_) => "drop",
CoreConcreteLibfunc::Dup(_) => "dup",
CoreConcreteLibfunc::Ec(value) => match value {
EcConcreteLibfunc::IsZero(_) => "ec_is_zero",
EcConcreteLibfunc::Neg(_) => "ec_neg",
EcConcreteLibfunc::StateAdd(_) => "ec_state_add",
EcConcreteLibfunc::TryNew(_) => "ec_try_new",
EcConcreteLibfunc::StateFinalize(_) => "ec_state_finalize",
EcConcreteLibfunc::StateInit(_) => "ec_state_init",
EcConcreteLibfunc::StateAddMul(_) => "ec_state_add_mul",
EcConcreteLibfunc::PointFromX(_) => "ec_point_from_x",
EcConcreteLibfunc::UnwrapPoint(_) => "ec_unwrap_point",
EcConcreteLibfunc::Zero(_) => "ec_zero",
EcConcreteLibfunc::NegNz(_) => "ec_neg_nz",
},
CoreConcreteLibfunc::Felt252(value) => match value {
Felt252Concrete::BinaryOperation(op) => match op {
Felt252BinaryOperationConcrete::WithVar(op) => match &op.operator {
Felt252BinaryOperator::Add => "felt252_add",
Felt252BinaryOperator::Sub => "felt252_sub",
Felt252BinaryOperator::Mul => "felt252_mul",
Felt252BinaryOperator::Div => "felt252_div",
},
Felt252BinaryOperationConcrete::WithConst(op) => match &op.operator {
Felt252BinaryOperator::Add => "felt252_const_add",
Felt252BinaryOperator::Sub => "felt252_const_sub",
Felt252BinaryOperator::Mul => "felt252_const_mul",
Felt252BinaryOperator::Div => "felt252_const_div",
},
},
Felt252Concrete::Const(_) => "felt252_const",
Felt252Concrete::IsZero(_) => "felt252_is_zero",
},
CoreConcreteLibfunc::Const(value) => match value {
ConstConcreteLibfunc::AsBox(_) => "const_as_box",
ConstConcreteLibfunc::AsImmediate(_) => "const_as_immediate",
},
CoreConcreteLibfunc::FunctionCall(_) => "function_call",
CoreConcreteLibfunc::Gas(value) => match value {
GasConcreteLibfunc::WithdrawGas(_) => "withdraw_gas",
GasConcreteLibfunc::RedepositGas(_) => "redeposit_gas",
GasConcreteLibfunc::GetAvailableGas(_) => "get_available_gas",
GasConcreteLibfunc::BuiltinWithdrawGas(_) => "builtin_withdraw_gas",
GasConcreteLibfunc::GetBuiltinCosts(_) => "get_builtin_costs",
GasConcreteLibfunc::GetUnspentGas(_) => "get_unspent_gas",
},
CoreConcreteLibfunc::Uint8(value) => match value {
UintConcrete::Const(_) => "u8_const",
UintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "u8_overflowing_add",
IntOperator::OverflowingSub => "u8_overflowing_sub",
},
UintConcrete::SquareRoot(_) => "u8_sqrt",
UintConcrete::Equal(_) => "u8_eq",
UintConcrete::ToFelt252(_) => "u8_to_felt252",
UintConcrete::FromFelt252(_) => "u8_from_felt252",
UintConcrete::IsZero(_) => "u8_is_zero",
UintConcrete::Divmod(_) => "u8_divmod",
UintConcrete::WideMul(_) => "u8_wide_mul",
UintConcrete::Bitwise(_) => "u8_bitwise",
},
CoreConcreteLibfunc::Uint16(value) => match value {
UintConcrete::Const(_) => "u16_const",
UintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "u16_overflowing_add",
IntOperator::OverflowingSub => "u16_overflowing_sub",
},
UintConcrete::SquareRoot(_) => "u16_sqrt",
UintConcrete::Equal(_) => "u16_eq",
UintConcrete::ToFelt252(_) => "u16_to_felt252",
UintConcrete::FromFelt252(_) => "u16_from_felt252",
UintConcrete::IsZero(_) => "u16_is_zero",
UintConcrete::Divmod(_) => "u16_divmod",
UintConcrete::WideMul(_) => "u16_wide_mul",
UintConcrete::Bitwise(_) => "u16_bitwise",
},
CoreConcreteLibfunc::Uint32(value) => match value {
UintConcrete::Const(_) => "u32_const",
UintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "u32_overflowing_add",
IntOperator::OverflowingSub => "u32_overflowing_sub",
},
UintConcrete::SquareRoot(_) => "u32_sqrt",
UintConcrete::Equal(_) => "u32_eq",
UintConcrete::ToFelt252(_) => "u32_to_felt252",
UintConcrete::FromFelt252(_) => "u32_from_felt252",
UintConcrete::IsZero(_) => "u32_is_zero",
UintConcrete::Divmod(_) => "u32_divmod",
UintConcrete::WideMul(_) => "u32_wide_mul",
UintConcrete::Bitwise(_) => "u32_bitwise",
},
CoreConcreteLibfunc::Uint64(value) => match value {
UintConcrete::Const(_) => "u64_const",
UintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "u64_overflowing_add",
IntOperator::OverflowingSub => "u64_overflowing_sub",
},
UintConcrete::SquareRoot(_) => "u64_sqrt",
UintConcrete::Equal(_) => "u64_eq",
UintConcrete::ToFelt252(_) => "u64_to_felt252",
UintConcrete::FromFelt252(_) => "u64_from_felt252",
UintConcrete::IsZero(_) => "u64_is_zero",
UintConcrete::Divmod(_) => "u64_divmod",
UintConcrete::WideMul(_) => "u64_wide_mul",
UintConcrete::Bitwise(_) => "u64_bitwise",
},
CoreConcreteLibfunc::Uint128(value) => match value {
Uint128Concrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "u128_overflowing_add",
IntOperator::OverflowingSub => "u128_overflowing_sub",
},
Uint128Concrete::Divmod(_) => "u128_divmod",
Uint128Concrete::GuaranteeMul(_) => "u128_guarantee_mul",
Uint128Concrete::MulGuaranteeVerify(_) => "u128_mul_guarantee_verify",
Uint128Concrete::Equal(_) => "u128_equal",
Uint128Concrete::SquareRoot(_) => "u128_sqrt",
Uint128Concrete::Const(_) => "u128_const",
Uint128Concrete::FromFelt252(_) => "u128_from_felt",
Uint128Concrete::ToFelt252(_) => "u128_to_felt252",
Uint128Concrete::IsZero(_) => "u128_is_zero",
Uint128Concrete::Bitwise(_) => "u128_bitwise",
Uint128Concrete::ByteReverse(_) => "u128_byte_reverse",
},
CoreConcreteLibfunc::Uint256(value) => match value {
Uint256Concrete::IsZero(_) => "u256_is_zero",
Uint256Concrete::Divmod(_) => "u256_divmod",
Uint256Concrete::SquareRoot(_) => "u256_sqrt",
Uint256Concrete::InvModN(_) => "u256_inv_mod_n",
},
CoreConcreteLibfunc::Uint512(value) => match value {
Uint512Concrete::DivModU256(_) => "u512_divmod_u256",
},
CoreConcreteLibfunc::Sint8(value) => match value {
SintConcrete::Const(_) => "i8_const",
SintConcrete::Equal(_) => "i8_eq",
SintConcrete::ToFelt252(_) => "i8_to_felt252",
SintConcrete::FromFelt252(_) => "i8_from_felt252",
SintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "i8_overflowing_add",
IntOperator::OverflowingSub => "i8_overflowing_sub",
},
SintConcrete::Diff(_) => "i8_diff",
SintConcrete::WideMul(_) => "i8_wide_mul",
},
CoreConcreteLibfunc::Sint16(value) => match value {
SintConcrete::Const(_) => "i16_const",
SintConcrete::Equal(_) => "i16_eq",
SintConcrete::ToFelt252(_) => "i16_to_felt252",
SintConcrete::FromFelt252(_) => "i16_from_felt252",
SintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "i16_overflowing_add",
IntOperator::OverflowingSub => "i16_overflowing_sub",
},
SintConcrete::Diff(_) => "i16_diff",
SintConcrete::WideMul(_) => "i16_wide_mul",
},
CoreConcreteLibfunc::Sint32(value) => match value {
SintConcrete::Const(_) => "i32_const",
SintConcrete::Equal(_) => "i32_eq",
SintConcrete::ToFelt252(_) => "i32_to_felt252",
SintConcrete::FromFelt252(_) => "i32_from_felt252",
SintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "i32_overflowing_add",
IntOperator::OverflowingSub => "i32_overflowing_sub",
},
SintConcrete::Diff(_) => "i32_diff",
SintConcrete::WideMul(_) => "i32_wide_mul",
},
CoreConcreteLibfunc::Sint64(value) => match value {
SintConcrete::Const(_) => "i64_const",
SintConcrete::Equal(_) => "i64_eq",
SintConcrete::ToFelt252(_) => "i64_to_felt252",
SintConcrete::FromFelt252(_) => "i64_from_felt252",
SintConcrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "i64_overflowing_add",
IntOperator::OverflowingSub => "i64_overflowing_sub",
},
SintConcrete::Diff(_) => "i64_diff",
SintConcrete::WideMul(_) => "i64_wide_mul",
},
CoreConcreteLibfunc::Sint128(value) => match value {
Sint128Concrete::Const(_) => "i128_const",
Sint128Concrete::Equal(_) => "i128_eq",
Sint128Concrete::ToFelt252(_) => "i128_to_felt252",
Sint128Concrete::FromFelt252(_) => "i128_from_felt252",
Sint128Concrete::Operation(op) => match &op.operator {
IntOperator::OverflowingAdd => "i128_overflowing_add",
IntOperator::OverflowingSub => "i128_overflowing_sub",
},
Sint128Concrete::Diff(_) => "i128_diff",
},
CoreConcreteLibfunc::Mem(value) => match value {
MemConcreteLibfunc::StoreTemp(_) => "store_temp",
MemConcreteLibfunc::StoreLocal(_) => "store_local",
MemConcreteLibfunc::FinalizeLocals(_) => "finalize_locals",
MemConcreteLibfunc::AllocLocal(_) => "alloc_local",
MemConcreteLibfunc::Rename(_) => "rename",
},
CoreConcreteLibfunc::Nullable(value) => match value {
NullableConcreteLibfunc::Null(_) => "nullable_null",
NullableConcreteLibfunc::NullableFromBox(_) => "nullable_from_box",
NullableConcreteLibfunc::MatchNullable(_) => "match_nullable",
NullableConcreteLibfunc::ForwardSnapshot(_) => "nullable_forward_snapshot",
},
CoreConcreteLibfunc::UnwrapNonZero(_) => "unwrap_non_zero",
CoreConcreteLibfunc::UnconditionalJump(_) => "jump",
CoreConcreteLibfunc::Enum(value) => match value {
EnumConcreteLibfunc::Init(_) => "enum_init",
EnumConcreteLibfunc::FromBoundedInt(_) => "enum_from_bounded_int",
EnumConcreteLibfunc::Match(_) => "enum_match",
EnumConcreteLibfunc::SnapshotMatch(_) => "enum_snapshot_match",
},
CoreConcreteLibfunc::Struct(value) => match value {
StructConcreteLibfunc::Construct(_) => "struct_construct",
StructConcreteLibfunc::Deconstruct(_) => "struct_deconstruct",
StructConcreteLibfunc::SnapshotDeconstruct(_) => "struct_snapshot_deconstruct",
StructConcreteLibfunc::BoxedDeconstruct(_) => "struct_boxed_deconstruct",
},
CoreConcreteLibfunc::Felt252Dict(value) => match value {
Felt252DictConcreteLibfunc::New(_) => "felt252dict_new",
Felt252DictConcreteLibfunc::Squash(_) => "felt252dict_squash",
},
CoreConcreteLibfunc::Felt252DictEntry(value) => match value {
Felt252DictEntryConcreteLibfunc::Get(_) => "felt252dict_get",
Felt252DictEntryConcreteLibfunc::Finalize(_) => "felt252dict_finalize",
},
CoreConcreteLibfunc::Felt252SquashedDict(_) => "felt252_squashed_dict",
CoreConcreteLibfunc::Pedersen(value) => match value {
PedersenConcreteLibfunc::PedersenHash(_) => "pedersen_hash",
},
CoreConcreteLibfunc::Poseidon(value) => match value {
PoseidonConcreteLibfunc::HadesPermutation(_) => "hades_permutation",
},
CoreConcreteLibfunc::Starknet(value) => match value {
StarknetConcreteLibfunc::CallContract(_) => "call_contract",
StarknetConcreteLibfunc::ClassHashConst(_) => "class_hash_const",
StarknetConcreteLibfunc::ClassHashTryFromFelt252(_) => "class_hash_try_from_felt252",
StarknetConcreteLibfunc::ClassHashToFelt252(_) => "class_hash_to_felt252",
StarknetConcreteLibfunc::ContractAddressConst(_) => "contract_address_const",
StarknetConcreteLibfunc::ContractAddressTryFromFelt252(_) => {
"contract_address_try_from_felt252"
}
StarknetConcreteLibfunc::ContractAddressToFelt252(_) => "contract_address_to_felt252",
StarknetConcreteLibfunc::StorageRead(_) => "storage_read",
StarknetConcreteLibfunc::StorageWrite(_) => "storage_write",
StarknetConcreteLibfunc::StorageBaseAddressConst(_) => "storage_base_address_const",
StarknetConcreteLibfunc::StorageBaseAddressFromFelt252(_) => {
"storage_base_address_from_felt252"
}
StarknetConcreteLibfunc::StorageAddressFromBase(_) => "storage_address_from_base",
StarknetConcreteLibfunc::StorageAddressFromBaseAndOffset(_) => {
"storage_address_from_base_and_offset"
}
StarknetConcreteLibfunc::StorageAddressToFelt252(_) => "storage_address_to_felt252",
StarknetConcreteLibfunc::StorageAddressTryFromFelt252(_) => {
"storage_address_try_from_felt252"
}
StarknetConcreteLibfunc::EmitEvent(_) => "emit_event",
StarknetConcreteLibfunc::GetBlockHash(_) => "get_block_hash",
StarknetConcreteLibfunc::GetExecutionInfo(_) => "get_exec_info_v1",
StarknetConcreteLibfunc::GetExecutionInfoV2(_) => "get_exec_info_v2",
StarknetConcreteLibfunc::GetExecutionInfoV3(_) => "get_exec_info_v3",
StarknetConcreteLibfunc::Deploy(_) => "deploy",
StarknetConcreteLibfunc::Keccak(_) => "keccak",
StarknetConcreteLibfunc::LibraryCall(_) => "library_call",
StarknetConcreteLibfunc::ReplaceClass(_) => "replace_class",
StarknetConcreteLibfunc::SendMessageToL1(_) => "send_message_to_l1",
StarknetConcreteLibfunc::Testing(value) => match value {
TestingConcreteLibfunc::Cheatcode(_) => "cheatcode",
},
StarknetConcreteLibfunc::Secp256(value) => match value {
Secp256ConcreteLibfunc::K1(value) => match value {
Secp256OpConcreteLibfunc::New(_) => "secp256k1_new",
Secp256OpConcreteLibfunc::Add(_) => "secp256k1_add",
Secp256OpConcreteLibfunc::Mul(_) => "secp256k1_mul",
Secp256OpConcreteLibfunc::GetPointFromX(_) => "secp256k1_get_point_from_x",
Secp256OpConcreteLibfunc::GetXy(_) => "secp256k1_get_xy",
},
Secp256ConcreteLibfunc::R1(value) => match value {
Secp256OpConcreteLibfunc::New(_) => "secp256r1_new",
Secp256OpConcreteLibfunc::Add(_) => "secp256r1_add",
Secp256OpConcreteLibfunc::Mul(_) => "secp256r1_mul",
Secp256OpConcreteLibfunc::GetPointFromX(_) => "secp256r1_get_point_from_x",
Secp256OpConcreteLibfunc::GetXy(_) => "secp256r1_get_xy",
},
},
StarknetConcreteLibfunc::Sha256ProcessBlock(_) => "sha256_process_block",
StarknetConcreteLibfunc::Sha256StateHandleInit(_) => "sha256_state_handle_init",
StarknetConcreteLibfunc::Sha256StateHandleDigest(_) => "sha256_state_handle_digest",
StarknetConcreteLibfunc::GetClassHashAt(_) => "get_class_hash_at_syscall",
StarknetConcreteLibfunc::MetaTxV0(_) => "meta_tx_v0",
},
CoreConcreteLibfunc::Debug(value) => match value {
DebugConcreteLibfunc::Print(_) => "debug_print",
},
CoreConcreteLibfunc::Trace(_) => "trace",
CoreConcreteLibfunc::SnapshotTake(_) => "snapshot_take",
CoreConcreteLibfunc::Bytes31(value) => match value {
Bytes31ConcreteLibfunc::Const(_) => "bytes31_const",
Bytes31ConcreteLibfunc::ToFelt252(_) => "bytes31_to_felt252",
Bytes31ConcreteLibfunc::TryFromFelt252(_) => "bytes31_try_from_felt252",
},
CoreConcreteLibfunc::Circuit(selector) => match selector {
CircuitConcreteLibfunc::AddInput(_) => "circuit_add_input",
CircuitConcreteLibfunc::Eval(_) => "circuit_eval",
CircuitConcreteLibfunc::GetDescriptor(_) => "circuit_get_descriptor",
CircuitConcreteLibfunc::InitCircuitData(_) => "circuit_init_circuit_data",
CircuitConcreteLibfunc::GetOutput(_) => "circuit_get_output",
CircuitConcreteLibfunc::TryIntoCircuitModulus(_) => "circuit_try_into_circuit_modulus",
CircuitConcreteLibfunc::FailureGuaranteeVerify(_) => "circuit_failure_guarantee_verify",
CircuitConcreteLibfunc::IntoU96Guarantee(_) => "circuit_into_u96_guarantee",
CircuitConcreteLibfunc::U96GuaranteeVerify(_) => "circuit_u96_guarantee_verify",
CircuitConcreteLibfunc::U96LimbsLessThanGuaranteeVerify(_) => {
"circuit_u96_limbs_less_than_guarantee_verify"
}
CircuitConcreteLibfunc::U96SingleLimbLessThanGuaranteeVerify(_) => {
"circuit_u96_single_limb_less_than_guarantee_verify"
}
},
CoreConcreteLibfunc::BoundedInt(selector) => match selector {
BoundedIntConcreteLibfunc::Add(_) => "bounded_int_add",
BoundedIntConcreteLibfunc::Sub(_) => "bounded_int_sub",
BoundedIntConcreteLibfunc::Mul(_) => "bounded_int_mul",
BoundedIntConcreteLibfunc::DivRem(_) => "bounded_int_div_rem",
BoundedIntConcreteLibfunc::Constrain(_) => "bounded_int_constrain",
BoundedIntConcreteLibfunc::IsZero(_) => "bounded_int_is_zero",
BoundedIntConcreteLibfunc::WrapNonZero(_) => "bounded_int_wrap_non_zero",
BoundedIntConcreteLibfunc::TrimMin(_) => "bounded_int_trim_min",
BoundedIntConcreteLibfunc::TrimMax(_) => "bounded_int_trim_max",
},
CoreConcreteLibfunc::IntRange(selector) => match selector {
IntRangeConcreteLibfunc::TryNew(_) => "int_range_try_new",
IntRangeConcreteLibfunc::PopFront(_) => "int_range_pop_front",
},
CoreConcreteLibfunc::Blake(_) => "blake",
CoreConcreteLibfunc::QM31(_) => "qm31",
CoreConcreteLibfunc::UnsafePanic(_) => "unsafe_panic",
CoreConcreteLibfunc::DummyFunctionCall(_) => "dummy_function_call",
CoreConcreteLibfunc::GasReserve(selector) => match selector {
GasReserveConcreteLibfunc::Create(_) => "gas_reserve_create",
GasReserveConcreteLibfunc::Utilize(_) => "gas_reserve_utilize",
},
}
}
pub fn generic_type_to_name(
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
name: &str,
args: &[ConcreteTypeId],
) -> String {
format!(
"{}<{}>",
name,
args.iter()
.map(|field_type| {
registry
.get_type(field_type)
.expect("failed to find type in registry")
})
.map(|field_type| type_to_name(registry, field_type))
.join(",")
)
}
pub fn type_to_name(
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
ty: &CoreTypeConcrete,
) -> String {
match ty {
CoreTypeConcrete::Struct(info) => generic_type_to_name(registry, "struct", &info.members),
CoreTypeConcrete::Enum(info) => generic_type_to_name(registry, "enum", &info.variants),
CoreTypeConcrete::BoundedInt(info) => {
format!("bounded_int<{},{}>", info.range.lower, info.range.upper)
}
CoreTypeConcrete::Array(info) => {
generic_type_to_name(registry, "array", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Snapshot(info) => {
generic_type_to_name(registry, "snapshot", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Span(info) => {
generic_type_to_name(registry, "span", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Felt252Dict(info) => {
generic_type_to_name(registry, "felt252_dict", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Felt252DictEntry(info) => generic_type_to_name(
registry,
"felt252_dict_entry",
std::slice::from_ref(&info.ty),
),
CoreTypeConcrete::SquashedFelt252Dict(info) => generic_type_to_name(
registry,
"squashed_felt252_dict",
std::slice::from_ref(&info.ty),
),
CoreTypeConcrete::NonZero(info) => {
generic_type_to_name(registry, "non_zero", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Box(info) => {
generic_type_to_name(registry, "box", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Uninitialized(info) => {
generic_type_to_name(registry, "uninitialized", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Nullable(info) => {
generic_type_to_name(registry, "nullable", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Const(info) => {
generic_type_to_name(registry, "const", std::slice::from_ref(&info.inner_ty))
}
CoreTypeConcrete::IntRange(info) => {
generic_type_to_name(registry, "int_range", std::slice::from_ref(&info.ty))
}
CoreTypeConcrete::Starknet(selector) => match selector {
StarknetTypeConcrete::ClassHash(_) => String::from("class_hash"),
StarknetTypeConcrete::ContractAddress(_) => String::from("contract_address"),
StarknetTypeConcrete::StorageBaseAddress(_) => String::from("storage_base_address"),
StarknetTypeConcrete::StorageAddress(_) => String::from("storage_address"),
StarknetTypeConcrete::System(_) => String::from("system"),
StarknetTypeConcrete::Secp256Point(_) => String::from("secp256_point"),
StarknetTypeConcrete::Sha256StateHandle(_) => String::from("sha256_state_handle"),
},
CoreTypeConcrete::Circuit(selector) => match selector {
CircuitTypeConcrete::AddMod(_) => String::from("add_mod"),
CircuitTypeConcrete::MulMod(_) => String::from("mul_mod"),
CircuitTypeConcrete::AddModGate(_) => String::from("add_mod_gate"),
CircuitTypeConcrete::Circuit(_) => String::from("circuit"),
CircuitTypeConcrete::CircuitData(_) => String::from("circuit_data"),
CircuitTypeConcrete::CircuitOutputs(_) => String::from("circuit_outputs"),
CircuitTypeConcrete::CircuitPartialOutputs(_) => {
String::from("circuit_partial_outputs")
}
CircuitTypeConcrete::CircuitDescriptor(_) => String::from("circuit_descriptor"),
CircuitTypeConcrete::CircuitFailureGuarantee(_) => {
String::from("circuit_failure_guarantee")
}
CircuitTypeConcrete::CircuitInput(_) => String::from("circuit_input"),
CircuitTypeConcrete::CircuitInputAccumulator(_) => {
String::from("circuit_input_accumulator")
}
CircuitTypeConcrete::CircuitModulus(_) => String::from("circuit_modulus"),
CircuitTypeConcrete::InverseGate(_) => String::from("inverse_gate"),
CircuitTypeConcrete::MulModGate(_) => String::from("mul_mod_gate"),
CircuitTypeConcrete::SubModGate(_) => String::from("sub_mod_gate"),
CircuitTypeConcrete::U96Guarantee(_) => String::from("u96_guarantee"),
CircuitTypeConcrete::U96LimbsLessThanGuarantee(_) => {
String::from("u96_limbs_less_than_guarantee")
}
},
CoreTypeConcrete::Felt252(_) => String::from("felt252"),
CoreTypeConcrete::QM31(_) => String::from("qm31"),
CoreTypeConcrete::Uint8(_) => String::from("uint8"),
CoreTypeConcrete::Uint16(_) => String::from("uint16"),
CoreTypeConcrete::Uint32(_) => String::from("uint32"),
CoreTypeConcrete::Uint64(_) => String::from("uint64"),
CoreTypeConcrete::Uint128(_) => String::from("uint128"),
CoreTypeConcrete::Sint8(_) => String::from("sint8"),
CoreTypeConcrete::Sint16(_) => String::from("sint16"),
CoreTypeConcrete::Sint32(_) => String::from("sint32"),
CoreTypeConcrete::Sint64(_) => String::from("sint64"),
CoreTypeConcrete::Sint128(_) => String::from("sint128"),
CoreTypeConcrete::Uint128MulGuarantee(_) => String::from("uint128_mul_guarantee"),
CoreTypeConcrete::Bytes31(_) => String::from("bytes31"),
CoreTypeConcrete::GasBuiltin(_) => String::from("gas_builtin"),
CoreTypeConcrete::RangeCheck(_) => String::from("range_check"),
CoreTypeConcrete::Bitwise(_) => String::from("bitwise"),
CoreTypeConcrete::Pedersen(_) => String::from("pedersen"),
CoreTypeConcrete::Poseidon(_) => String::from("poseidon"),
CoreTypeConcrete::SegmentArena(_) => String::from("segment_arena"),
CoreTypeConcrete::RangeCheck96(_) => String::from("range_check96"),
CoreTypeConcrete::BuiltinCosts(_) => String::from("builtin_costs"),
CoreTypeConcrete::Coupon(_) => String::from("coupon"),
CoreTypeConcrete::Blake(_) => String::from("blake"),
CoreTypeConcrete::EcOp(_) => String::from("ec_op"),
CoreTypeConcrete::EcPoint(_) => String::from("ec_point"),
CoreTypeConcrete::EcState(_) => String::from("ec_state"),
CoreTypeConcrete::GasReserve(_) => String::from("gas_reserve"),
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types.rs | src/types.rs | //! # Compiler type infrastructure
//!
//! Contains type generation stuff (aka. conversion from Sierra to MLIR types).
use crate::{
error::Error as CoreTypeBuilderError,
libfuncs::LibfuncHelper,
metadata::MetadataStorage,
native_panic,
utils::{get_integer_layout, layout_repeat, RangeExt, PRIME},
};
use cairo_lang_sierra::{
extensions::{
circuit::CircuitTypeConcrete,
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
starknet::StarknetTypeConcrete,
utils::Range,
},
ids::{ConcreteTypeId, UserTypeId},
program::GenericArg,
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm,
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location, Module, Type, Value},
Context,
};
use num_bigint::{BigInt, Sign};
use num_traits::{Bounded, One};
use std::{alloc::Layout, error::Error, ops::Deref, sync::OnceLock};
pub mod array;
mod bitwise;
mod bounded_int;
mod r#box;
mod builtin_costs;
mod bytes31;
pub mod circuit;
mod coupon;
mod ec_op;
mod ec_point;
mod ec_state;
pub mod r#enum;
mod felt252;
mod felt252_dict;
mod felt252_dict_entry;
mod gas_builtin;
mod gas_reserve;
mod int_range;
mod non_zero;
mod nullable;
mod pedersen;
mod poseidon;
mod range_check;
mod segment_arena;
mod snapshot;
mod squashed_felt252_dict;
mod starknet;
mod r#struct;
mod uint128;
mod uint128_mul_guarantee;
mod uint16;
mod uint32;
mod uint64;
mod uint8;
mod uninitialized;
/// Generation of MLIR types from their Sierra counterparts.
///
/// All possible Sierra types must implement it. It is already implemented for all the core Sierra
/// types, contained in [CoreTypeConcrete].
pub trait TypeBuilder {
/// Error type returned by this trait's methods.
type Error: Error;
/// Build the MLIR type.
fn build<'ctx>(
&self,
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
self_ty: &ConcreteTypeId,
) -> Result<Type<'ctx>, Self::Error>;
/// Return whether the type is a builtin.
fn is_builtin(&self) -> bool;
/// Return whether the type requires a return pointer when returning,
/// instead of using the CPU registers.
///
/// This attribute does not modify the compilation, and it only reflects
/// what the ABI of the target architecture already specifies.
/// - For x86-64: https://gitlab.com/x86-psABIs/x86-64-ABI.
/// - For AArch64: https://github.com/ARM-software/abi-aa.
///
/// We can validate this empirically, by building a Cairo program that
/// returns a particular type, and seeing how it is lowered to machine code.
///
/// ```bash
/// llc a.llvmir -o - --mtriple "aarch64"
/// llc a.llvmir -o - --mtriple "x86_64"
/// ```
fn is_complex(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error>;
/// Return whether the Sierra type resolves to a zero-sized type.
fn is_zst(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error>;
/// Generate the layout of the MLIR type.
///
/// Used in both the compiler and the interface when calling the compiled code.
fn layout(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<Layout, Self::Error>;
/// Whether the layout should be allocated in memory (either the stack or
/// the heap) when used as a function invocation argument or return value.
///
/// Unlike `is_complex`, this attribute alters the compilation:
///
/// - When passing a memory allocated value to a function, we allocate that
/// value on the stack, and pass a pointer to it.
///
/// - If a function returns a memory allocated value, we receive a return
/// pointer as its first argument, and write the return value there
/// instead.
///
/// The rationale behind allocating a value in memory, rather than
/// registers, is to avoid putting too much pressure on the register
/// allocation pass for really complex types, like enums.
fn is_memory_allocated(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error>;
/// If the type is an integer, return its value range.
fn integer_range(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<Range, Self::Error>;
/// Return whether the type is a `BoundedInt<>`, either directly or indirectly (ex. through
/// `NonZero<BoundedInt<>>`).
fn is_bounded_int(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error>;
/// Return whether the type is a `felt252`, either directly or indirectly (ex. through
/// `NonZero<BoundedInt<>>`).
fn is_felt252(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error>;
/// If the type is a enum type, return all possible variants.
///
/// TODO: How is it used?
fn variants(&self) -> Option<&[ConcreteTypeId]>;
#[allow(clippy::too_many_arguments)]
fn build_default<'ctx, 'this>(
&self,
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
self_ty: &ConcreteTypeId,
) -> Result<Value<'ctx, 'this>, Self::Error>;
}
impl TypeBuilder for CoreTypeConcrete {
type Error = CoreTypeBuilderError;
fn build<'ctx>(
&self,
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
self_ty: &ConcreteTypeId,
) -> Result<Type<'ctx>, Self::Error> {
match self {
Self::Array(info) => self::array::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Bitwise(info) => self::bitwise::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::BoundedInt(info) => self::bounded_int::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Box(info) => self::r#box::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Bytes31(info) => self::bytes31::build(context, module, registry, metadata, info),
Self::BuiltinCosts(info) => self::builtin_costs::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Const(_) => native_panic!("todo: Const type to MLIR type"),
Self::EcOp(info) => self::ec_op::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::EcPoint(info) => self::ec_point::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::EcState(info) => self::ec_state::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Enum(info) => self::r#enum::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Felt252(info) => self::felt252::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Felt252Dict(info) => self::felt252_dict::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Felt252DictEntry(info) => self::felt252_dict_entry::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::GasBuiltin(info) => self::gas_builtin::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::NonZero(info) => self::non_zero::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Nullable(info) => self::nullable::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Pedersen(info) => self::pedersen::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Poseidon(info) => self::poseidon::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::RangeCheck(info) => self::range_check::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::RangeCheck96(info) => self::range_check::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::SegmentArena(info) => self::segment_arena::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Sint8(info) => self::uint8::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Sint16(info) => self::uint16::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Sint32(info) => self::uint32::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Sint64(info) => self::uint64::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Sint128(info) => self::uint128::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Snapshot(info) => self::snapshot::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Span(_) => native_panic!("todo: Span type to MLIR type"),
Self::SquashedFelt252Dict(info) => self::squashed_felt252_dict::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Starknet(selector) => self::starknet::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, selector),
),
Self::Struct(info) => self::r#struct::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uint128(info) => self::uint128::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uint128MulGuarantee(info) => self::uint128_mul_guarantee::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uint16(info) => self::uint16::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uint32(info) => self::uint32::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uint64(info) => self::uint64::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uint8(info) => self::uint8::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Uninitialized(info) => self::uninitialized::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
CoreTypeConcrete::Coupon(info) => self::coupon::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
CoreTypeConcrete::Circuit(info) => self::circuit::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::IntRange(info) => self::int_range::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
CoreTypeConcrete::GasReserve(info) => self::gas_reserve::build(
context,
module,
registry,
metadata,
WithSelf::new(self_ty, info),
),
Self::Blake(_) => native_panic!("Build Blake type"),
CoreTypeConcrete::QM31(_) => native_panic!("Build QM31 type"),
}
}
fn is_builtin(&self) -> bool {
matches!(
self,
CoreTypeConcrete::Bitwise(_)
| CoreTypeConcrete::EcOp(_)
| CoreTypeConcrete::GasBuiltin(_)
| CoreTypeConcrete::BuiltinCosts(_)
| CoreTypeConcrete::RangeCheck(_)
| CoreTypeConcrete::RangeCheck96(_)
| CoreTypeConcrete::Pedersen(_)
| CoreTypeConcrete::Poseidon(_)
| CoreTypeConcrete::Coupon(_)
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_))
| CoreTypeConcrete::SegmentArena(_)
| CoreTypeConcrete::Circuit(CircuitTypeConcrete::AddMod(_))
| CoreTypeConcrete::Circuit(CircuitTypeConcrete::MulMod(_))
)
}
fn is_complex(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error> {
Ok(match self {
// Builtins.
CoreTypeConcrete::Bitwise(_)
| CoreTypeConcrete::EcOp(_)
| CoreTypeConcrete::GasBuiltin(_)
| CoreTypeConcrete::BuiltinCosts(_)
| CoreTypeConcrete::RangeCheck(_)
| CoreTypeConcrete::Pedersen(_)
| CoreTypeConcrete::Poseidon(_)
| CoreTypeConcrete::RangeCheck96(_)
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_)) // u64 is not complex
| CoreTypeConcrete::SegmentArena(_) => false,
CoreTypeConcrete::Box(_)
| CoreTypeConcrete::Uint8(_)
| CoreTypeConcrete::Uint16(_)
| CoreTypeConcrete::Uint32(_)
| CoreTypeConcrete::Uint64(_)
| CoreTypeConcrete::Uint128(_)
| CoreTypeConcrete::Uint128MulGuarantee(_)
| CoreTypeConcrete::Sint8(_)
| CoreTypeConcrete::Sint16(_)
| CoreTypeConcrete::Sint32(_)
| CoreTypeConcrete::Sint64(_)
| CoreTypeConcrete::Sint128(_)
| CoreTypeConcrete::Nullable(_)
| CoreTypeConcrete::Felt252Dict(_)
| CoreTypeConcrete::SquashedFelt252Dict(_) => false,
CoreTypeConcrete::Array(_) => true,
CoreTypeConcrete::EcPoint(_) => true,
CoreTypeConcrete::EcState(_) => true,
CoreTypeConcrete::Felt252DictEntry(_) => true,
CoreTypeConcrete::Felt252(_)
| CoreTypeConcrete::Bytes31(_)
| CoreTypeConcrete::Starknet(
StarknetTypeConcrete::ClassHash(_)
| StarknetTypeConcrete::ContractAddress(_)
| StarknetTypeConcrete::StorageAddress(_)
| StarknetTypeConcrete::StorageBaseAddress(_)
) => {
#[cfg(target_arch = "x86_64")]
let value = true;
#[cfg(target_arch = "aarch64")]
let value = false;
value
},
CoreTypeConcrete::NonZero(info)
| CoreTypeConcrete::Uninitialized(info)
| CoreTypeConcrete::Snapshot(info) => registry.get_type(&info.ty)?.is_complex(registry)?,
CoreTypeConcrete::Enum(info) => match info.variants.len() {
0 => false,
1 => registry.get_type(&info.variants[0])?.is_complex(registry)?,
_ => !self.is_zst(registry)?,
},
CoreTypeConcrete::Struct(_) => true,
CoreTypeConcrete::BoundedInt(_info) => {
#[cfg(target_arch = "x86_64")]
let value = _info.range.offset_bit_width() > 128;
#[cfg(target_arch = "aarch64")]
let value = false;
value
},
CoreTypeConcrete::Const(_) => native_panic!("todo: check Const is complex"),
CoreTypeConcrete::Span(_) => native_panic!("todo: check Span is complex"),
CoreTypeConcrete::Starknet(StarknetTypeConcrete::Secp256Point(_))
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::Sha256StateHandle(_)) => native_panic!("todo: check Sha256StateHandle is complex"),
CoreTypeConcrete::Coupon(_) => false,
CoreTypeConcrete::Circuit(info) => circuit::is_complex(info),
CoreTypeConcrete::IntRange(_info) => false,
CoreTypeConcrete::GasReserve(_info) => false,
CoreTypeConcrete::Blake(_info) => native_panic!("Implement is_complex for Blake type"),
CoreTypeConcrete::QM31(_info) => native_panic!("Implement is_complex for QM31 type"),
})
}
fn is_zst(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error> {
Ok(match self {
// Builtin counters:
CoreTypeConcrete::Bitwise(_)
| CoreTypeConcrete::EcOp(_)
| CoreTypeConcrete::RangeCheck(_)
| CoreTypeConcrete::Pedersen(_)
| CoreTypeConcrete::Poseidon(_)
| CoreTypeConcrete::RangeCheck96(_)
| CoreTypeConcrete::SegmentArena(_) => false,
// A ptr to a list of costs.
CoreTypeConcrete::BuiltinCosts(_) => false,
// Other builtins:
CoreTypeConcrete::Uint128MulGuarantee(_) | CoreTypeConcrete::Coupon(_) => true,
// Normal types:
CoreTypeConcrete::Array(_)
| CoreTypeConcrete::Box(_)
| CoreTypeConcrete::Bytes31(_)
| CoreTypeConcrete::EcPoint(_)
| CoreTypeConcrete::EcState(_)
| CoreTypeConcrete::Felt252(_)
| CoreTypeConcrete::GasBuiltin(_)
| CoreTypeConcrete::Uint8(_)
| CoreTypeConcrete::Uint16(_)
| CoreTypeConcrete::Uint32(_)
| CoreTypeConcrete::Uint64(_)
| CoreTypeConcrete::Uint128(_)
| CoreTypeConcrete::Sint8(_)
| CoreTypeConcrete::Sint16(_)
| CoreTypeConcrete::Sint32(_)
| CoreTypeConcrete::Sint64(_)
| CoreTypeConcrete::Sint128(_)
| CoreTypeConcrete::Felt252Dict(_)
| CoreTypeConcrete::Felt252DictEntry(_)
| CoreTypeConcrete::SquashedFelt252Dict(_)
| CoreTypeConcrete::Starknet(_)
| CoreTypeConcrete::Nullable(_) => false,
// Containers:
CoreTypeConcrete::NonZero(info)
| CoreTypeConcrete::Uninitialized(info)
| CoreTypeConcrete::Snapshot(info) => {
let type_info = registry.get_type(&info.ty)?;
type_info.is_zst(registry)?
}
// Enums and structs:
CoreTypeConcrete::Enum(info) => {
info.variants.is_empty()
|| (info.variants.len() == 1
&& registry.get_type(&info.variants[0])?.is_zst(registry)?)
}
CoreTypeConcrete::Struct(info) => {
let mut is_zst = true;
for member in &info.members {
if !registry.get_type(member)?.is_zst(registry)? {
is_zst = false;
break;
}
}
is_zst
}
CoreTypeConcrete::BoundedInt(_) => false,
CoreTypeConcrete::GasReserve(_info) => false,
CoreTypeConcrete::Const(info) => {
let type_info = registry.get_type(&info.inner_ty)?;
type_info.is_zst(registry)?
}
CoreTypeConcrete::Span(_) => native_panic!("todo: check Span is zero sized"),
CoreTypeConcrete::Circuit(info) => circuit::is_zst(info),
CoreTypeConcrete::IntRange(info) => {
let type_info = registry.get_type(&info.ty)?;
type_info.is_zst(registry)?
}
CoreTypeConcrete::Blake(_info) => native_panic!("Implement is_zst for Blake type"),
CoreTypeConcrete::QM31(_info) => native_panic!("Implement is_zst for QM31 type"),
})
}
fn layout(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<Layout, Self::Error> {
Ok(match self {
CoreTypeConcrete::Array(_) => {
Layout::new::<*mut ()>()
.extend(get_integer_layout(32))?
.0
.extend(get_integer_layout(32))?
.0
.extend(get_integer_layout(32))?
.0
}
CoreTypeConcrete::Bitwise(_) => Layout::new::<u64>(),
CoreTypeConcrete::Box(_) => Layout::new::<*mut ()>(),
CoreTypeConcrete::EcOp(_) => Layout::new::<u64>(),
CoreTypeConcrete::EcPoint(_) => layout_repeat(&get_integer_layout(252), 2)?.0,
CoreTypeConcrete::EcState(_) => layout_repeat(&get_integer_layout(252), 4)?.0,
CoreTypeConcrete::Felt252(_) => get_integer_layout(252),
CoreTypeConcrete::GasBuiltin(_) => get_integer_layout(64),
CoreTypeConcrete::BuiltinCosts(_) => Layout::new::<*const ()>(),
CoreTypeConcrete::Uint8(_) => get_integer_layout(8),
CoreTypeConcrete::Uint16(_) => get_integer_layout(16),
CoreTypeConcrete::Uint32(_) => get_integer_layout(32),
CoreTypeConcrete::Uint64(_) => get_integer_layout(64),
CoreTypeConcrete::Uint128(_) => get_integer_layout(128),
CoreTypeConcrete::Uint128MulGuarantee(_) => Layout::new::<()>(),
CoreTypeConcrete::NonZero(info) => registry.get_type(&info.ty)?.layout(registry)?,
CoreTypeConcrete::Nullable(_) => Layout::new::<*mut ()>(),
CoreTypeConcrete::RangeCheck(_) => Layout::new::<u64>(),
CoreTypeConcrete::Uninitialized(info) => {
registry.get_type(&info.ty)?.layout(registry)?
}
CoreTypeConcrete::Enum(info) => {
let tag_layout =
get_integer_layout(info.variants.len().next_power_of_two().trailing_zeros());
info.variants.iter().try_fold(tag_layout, |acc, id| {
let layout = tag_layout
.extend(registry.get_type(id)?.layout(registry)?)?
.0;
Result::<_, Self::Error>::Ok(Layout::from_size_align(
acc.size().max(layout.size()),
acc.align().max(layout.align()),
)?)
})?
}
CoreTypeConcrete::Struct(info) => info
.members
.iter()
.try_fold(Option::<Layout>::None, |acc, id| {
Result::<_, Self::Error>::Ok(Some(match acc {
Some(layout) => layout.extend(registry.get_type(id)?.layout(registry)?)?.0,
None => registry.get_type(id)?.layout(registry)?,
}))
})?
.unwrap_or(Layout::from_size_align(0, 1)?),
CoreTypeConcrete::Felt252Dict(_) => Layout::new::<*mut std::ffi::c_void>(), // ptr
CoreTypeConcrete::Felt252DictEntry(_) => {
get_integer_layout(252)
.extend(Layout::new::<*mut std::ffi::c_void>())?
.0
.extend(Layout::new::<*mut std::ffi::c_void>())?
.0
}
CoreTypeConcrete::SquashedFelt252Dict(_) => Layout::new::<*mut std::ffi::c_void>(), // ptr
CoreTypeConcrete::Pedersen(_) => Layout::new::<u64>(),
CoreTypeConcrete::Poseidon(_) => Layout::new::<u64>(),
CoreTypeConcrete::Span(_) => native_panic!("todo: create layout for Span"),
CoreTypeConcrete::Starknet(info) => match info {
StarknetTypeConcrete::ClassHash(_) => get_integer_layout(252),
StarknetTypeConcrete::ContractAddress(_) => get_integer_layout(252),
StarknetTypeConcrete::StorageBaseAddress(_) => get_integer_layout(252),
StarknetTypeConcrete::StorageAddress(_) => get_integer_layout(252),
StarknetTypeConcrete::System(_) => Layout::new::<*mut ()>(),
StarknetTypeConcrete::Secp256Point(_) => {
get_integer_layout(256)
.extend(get_integer_layout(256))?
.0
.extend(get_integer_layout(1))?
.0
}
StarknetTypeConcrete::Sha256StateHandle(_) => Layout::new::<*mut ()>(),
},
CoreTypeConcrete::SegmentArena(_) => Layout::new::<u64>(),
CoreTypeConcrete::Snapshot(info) => registry.get_type(&info.ty)?.layout(registry)?,
CoreTypeConcrete::Sint8(_) => get_integer_layout(8),
CoreTypeConcrete::Sint16(_) => get_integer_layout(16),
CoreTypeConcrete::Sint32(_) => get_integer_layout(32),
CoreTypeConcrete::Sint64(_) => get_integer_layout(64),
CoreTypeConcrete::Sint128(_) => get_integer_layout(128),
CoreTypeConcrete::Bytes31(_) => get_integer_layout(248),
CoreTypeConcrete::BoundedInt(info) => get_integer_layout(info.range.offset_bit_width()),
CoreTypeConcrete::GasReserve(_info) => get_integer_layout(128),
CoreTypeConcrete::Const(const_type) => {
registry.get_type(&const_type.inner_ty)?.layout(registry)?
}
CoreTypeConcrete::Coupon(_) => Layout::new::<()>(),
CoreTypeConcrete::RangeCheck96(_) => get_integer_layout(64),
CoreTypeConcrete::Circuit(info) => circuit::layout(registry, info)?,
CoreTypeConcrete::IntRange(info) => {
let inner = registry.get_type(&info.ty)?.layout(registry)?;
inner.extend(inner)?.0
}
CoreTypeConcrete::Blake(_info) => native_panic!("Implement layout for Blake type"),
CoreTypeConcrete::QM31(_info) => native_panic!("Implement layout for QM31 type"),
}
.pad_to_align())
}
fn is_memory_allocated(
&self,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> Result<bool, Self::Error> {
// Right now, only enums and other structures which may end up passing a flattened enum as
// arguments.
Ok(match self {
CoreTypeConcrete::IntRange(_) => false,
CoreTypeConcrete::Blake(_info) => {
native_panic!("Implement is_memory_allocated for Blake type")
}
CoreTypeConcrete::Array(_) => false,
CoreTypeConcrete::Bitwise(_) => false,
CoreTypeConcrete::Box(_) => false,
CoreTypeConcrete::EcOp(_) => false,
CoreTypeConcrete::EcPoint(_) => false,
CoreTypeConcrete::EcState(_) => false,
CoreTypeConcrete::Felt252(_) => false,
CoreTypeConcrete::GasBuiltin(_) => false,
CoreTypeConcrete::BuiltinCosts(_) => false,
CoreTypeConcrete::Uint8(_) => false,
CoreTypeConcrete::Uint16(_) => false,
CoreTypeConcrete::Uint32(_) => false,
CoreTypeConcrete::Uint64(_) => false,
CoreTypeConcrete::Uint128(_) => false,
CoreTypeConcrete::Uint128MulGuarantee(_) => false,
CoreTypeConcrete::Sint8(_) => false,
CoreTypeConcrete::Sint16(_) => false,
CoreTypeConcrete::Sint32(_) => false,
CoreTypeConcrete::Sint64(_) => false,
CoreTypeConcrete::Sint128(_) => false,
CoreTypeConcrete::NonZero(_) => false,
CoreTypeConcrete::Nullable(_) => false,
CoreTypeConcrete::RangeCheck(_) => false,
CoreTypeConcrete::RangeCheck96(_) => false,
CoreTypeConcrete::Uninitialized(_) => false,
CoreTypeConcrete::Enum(info) => match info.variants.len() {
0 => false,
1 => registry
.get_type(&info.variants[0])?
.is_memory_allocated(registry)?,
_ => true,
},
CoreTypeConcrete::Struct(info) => {
let mut is_memory_allocated = false;
for member in &info.members {
if registry.get_type(member)?.is_memory_allocated(registry)? {
is_memory_allocated = true;
break;
}
}
is_memory_allocated
}
CoreTypeConcrete::Felt252Dict(_) => false,
CoreTypeConcrete::Felt252DictEntry(_) => false,
CoreTypeConcrete::SquashedFelt252Dict(_) => false,
CoreTypeConcrete::Pedersen(_) => false,
CoreTypeConcrete::Poseidon(_) => false,
CoreTypeConcrete::Span(_) => false,
CoreTypeConcrete::Starknet(_) => false,
CoreTypeConcrete::SegmentArena(_) => false,
CoreTypeConcrete::Snapshot(info) => {
registry.get_type(&info.ty)?.is_memory_allocated(registry)?
}
CoreTypeConcrete::Bytes31(_) => false,
CoreTypeConcrete::BoundedInt(_) => false,
CoreTypeConcrete::Const(info) => registry
.get_type(&info.inner_ty)?
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/starknet.rs | src/starknet.rs | //! Starknet related code for `cairo_native`
use serde::{Deserialize, Serialize};
use starknet_types_core::felt::Felt;
pub type SyscallResult<T> = std::result::Result<T, Vec<Felt>>;
#[repr(C)]
#[derive(Debug)]
pub struct ArrayAbi<T> {
pub ptr: *mut *mut T,
pub since: u32,
pub until: u32,
pub capacity: u32,
}
impl From<&ArrayAbi<Felt252Abi>> for Vec<Felt> {
fn from(value: &ArrayAbi<Felt252Abi>) -> Self {
unsafe {
let since_offset = value.since as usize;
let until_offset = value.until as usize;
debug_assert!(since_offset <= until_offset);
let len = until_offset - since_offset;
match len {
0 => &[],
_ => std::slice::from_raw_parts(value.ptr.read().add(since_offset), len),
}
}
.iter()
.map(Felt::from)
.collect()
}
}
/// Binary representation of a `Felt` (in MLIR).
#[derive(Debug, Clone, Copy)]
#[repr(C, align(16))]
pub struct Felt252Abi(pub [u8; 32]);
impl From<Felt252Abi> for Felt {
fn from(mut value: Felt252Abi) -> Felt {
value.0[31] &= 0x0F;
Felt::from_bytes_le(&value.0)
}
}
impl From<&Felt252Abi> for Felt {
fn from(value: &Felt252Abi) -> Felt {
let mut value = *value;
value.0[31] &= 0x0F;
Felt::from_bytes_le(&value.0)
}
}
/// Binary representation of a `u256` (in MLIR).
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
serde::Serialize,
serde::Deserialize,
Default,
)]
#[repr(C, align(16))]
pub struct U256 {
pub lo: u128,
pub hi: u128,
}
#[derive(
Debug,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Default,
serde::Serialize,
serde::Deserialize,
)]
pub struct ExecutionInfo {
pub block_info: BlockInfo,
pub tx_info: TxInfo,
pub caller_address: Felt,
pub contract_address: Felt,
pub entry_point_selector: Felt,
}
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
)]
pub struct ExecutionInfoV2 {
pub block_info: BlockInfo,
pub tx_info: TxV2Info,
pub caller_address: Felt,
pub contract_address: Felt,
pub entry_point_selector: Felt,
}
#[derive(
Debug,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Default,
serde::Serialize,
serde::Deserialize,
)]
pub struct TxV2Info {
pub version: Felt,
pub account_contract_address: Felt,
pub max_fee: u128,
pub signature: Vec<Felt>,
pub transaction_hash: Felt,
pub chain_id: Felt,
pub nonce: Felt,
pub resource_bounds: Vec<ResourceBounds>,
pub tip: u128,
pub paymaster_data: Vec<Felt>,
pub nonce_data_availability_mode: u32,
pub fee_data_availability_mode: u32,
pub account_deployment_data: Vec<Felt>,
}
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
)]
pub struct ExecutionInfoV3 {
pub block_info: BlockInfo,
pub tx_info: TxV3Info,
pub caller_address: Felt,
pub contract_address: Felt,
pub entry_point_selector: Felt,
}
#[derive(
Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
)]
pub struct TxV3Info {
pub version: Felt,
pub account_contract_address: Felt,
pub max_fee: u128,
pub signature: Vec<Felt>,
pub transaction_hash: Felt,
pub chain_id: Felt,
pub nonce: Felt,
pub resource_bounds: Vec<ResourceBounds>,
pub tip: u128,
pub paymaster_data: Vec<Felt>,
pub nonce_data_availability_mode: u32,
pub fee_data_availability_mode: u32,
pub account_deployment_data: Vec<Felt>,
pub proof_facts: Vec<Felt>,
}
#[derive(
Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, serde::Serialize, serde::Deserialize,
)]
pub struct ResourceBounds {
pub resource: Felt,
pub max_amount: u64,
pub max_price_per_unit: u128,
}
#[derive(
Debug,
Clone,
Copy,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Default,
serde::Serialize,
serde::Deserialize,
)]
pub struct BlockInfo {
pub block_number: u64,
pub block_timestamp: u64,
pub sequencer_address: Felt,
}
#[derive(
Debug,
Clone,
PartialEq,
Eq,
PartialOrd,
Ord,
Hash,
Default,
serde::Serialize,
serde::Deserialize,
)]
pub struct TxInfo {
pub version: Felt,
pub account_contract_address: Felt,
pub max_fee: u128,
pub signature: Vec<Felt>,
pub transaction_hash: Felt,
pub chain_id: Felt,
pub nonce: Felt,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize, Default)]
#[repr(C, align(16))]
pub struct Secp256k1Point {
pub x: U256,
pub y: U256,
pub is_infinity: bool,
}
impl Secp256k1Point {
pub const fn new(x_lo: u128, x_hi: u128, y_lo: u128, y_hi: u128, is_infinity: bool) -> Self {
Self {
x: U256 { lo: x_lo, hi: x_hi },
y: U256 { lo: y_lo, hi: y_hi },
is_infinity,
}
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize, Default)]
#[repr(C, align(16))]
pub struct Secp256r1Point {
pub x: U256,
pub y: U256,
pub is_infinity: bool,
}
impl Secp256r1Point {
pub const fn new(x_lo: u128, x_hi: u128, y_lo: u128, y_hi: u128, is_infinity: bool) -> Self {
Self {
x: U256 { lo: x_lo, hi: x_hi },
y: U256 { lo: y_lo, hi: y_hi },
is_infinity,
}
}
}
pub trait StarknetSyscallHandler {
fn get_block_hash(&mut self, block_number: u64, remaining_gas: &mut u64)
-> SyscallResult<Felt>;
fn get_execution_info(&mut self, remaining_gas: &mut u64) -> SyscallResult<ExecutionInfo>;
fn get_execution_info_v2(&mut self, remaining_gas: &mut u64) -> SyscallResult<ExecutionInfoV2>;
fn get_execution_info_v3(&mut self, remaining_gas: &mut u64) -> SyscallResult<ExecutionInfoV3>;
fn deploy(
&mut self,
class_hash: Felt,
contract_address_salt: Felt,
calldata: &[Felt],
deploy_from_zero: bool,
remaining_gas: &mut u64,
) -> SyscallResult<(Felt, Vec<Felt>)>;
fn replace_class(&mut self, class_hash: Felt, remaining_gas: &mut u64) -> SyscallResult<()>;
fn library_call(
&mut self,
class_hash: Felt,
function_selector: Felt,
calldata: &[Felt],
remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>>;
fn call_contract(
&mut self,
address: Felt,
entry_point_selector: Felt,
calldata: &[Felt],
remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>>;
fn storage_read(
&mut self,
address_domain: u32,
address: Felt,
remaining_gas: &mut u64,
) -> SyscallResult<Felt>;
fn storage_write(
&mut self,
address_domain: u32,
address: Felt,
value: Felt,
remaining_gas: &mut u64,
) -> SyscallResult<()>;
fn emit_event(
&mut self,
keys: &[Felt],
data: &[Felt],
remaining_gas: &mut u64,
) -> SyscallResult<()>;
fn send_message_to_l1(
&mut self,
to_address: Felt,
payload: &[Felt],
remaining_gas: &mut u64,
) -> SyscallResult<()>;
fn keccak(&mut self, input: &[u64], remaining_gas: &mut u64) -> SyscallResult<U256>;
fn secp256k1_new(
&mut self,
x: U256,
y: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256k1Point>>;
fn secp256k1_add(
&mut self,
p0: Secp256k1Point,
p1: Secp256k1Point,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256k1Point>;
fn secp256k1_mul(
&mut self,
p: Secp256k1Point,
m: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256k1Point>;
fn secp256k1_get_point_from_x(
&mut self,
x: U256,
y_parity: bool,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256k1Point>>;
fn secp256k1_get_xy(
&mut self,
p: Secp256k1Point,
remaining_gas: &mut u64,
) -> SyscallResult<(U256, U256)>;
fn secp256r1_new(
&mut self,
x: U256,
y: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256r1Point>>;
fn secp256r1_add(
&mut self,
p0: Secp256r1Point,
p1: Secp256r1Point,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256r1Point>;
fn secp256r1_mul(
&mut self,
p: Secp256r1Point,
m: U256,
remaining_gas: &mut u64,
) -> SyscallResult<Secp256r1Point>;
fn secp256r1_get_point_from_x(
&mut self,
x: U256,
y_parity: bool,
remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256r1Point>>;
fn secp256r1_get_xy(
&mut self,
p: Secp256r1Point,
remaining_gas: &mut u64,
) -> SyscallResult<(U256, U256)>;
fn sha256_process_block(
&mut self,
state: &mut [u32; 8],
block: &[u32; 16],
remaining_gas: &mut u64,
) -> SyscallResult<()>;
fn get_class_hash_at(
&mut self,
contract_address: Felt,
remaining_gas: &mut u64,
) -> SyscallResult<Felt>;
fn meta_tx_v0(
&mut self,
address: Felt,
entry_point_selector: Felt,
calldata: &[Felt],
signature: &[Felt],
remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>>;
#[cfg(feature = "with-cheatcode")]
fn cheatcode(&mut self, _selector: Felt, _input: &[Felt]) -> Vec<Felt> {
unimplemented!();
}
}
pub struct DummySyscallHandler;
impl StarknetSyscallHandler for DummySyscallHandler {
fn get_block_hash(
&mut self,
_block_number: u64,
_remaining_gas: &mut u64,
) -> SyscallResult<Felt> {
unimplemented!()
}
fn get_execution_info(&mut self, _remaining_gas: &mut u64) -> SyscallResult<ExecutionInfo> {
unimplemented!()
}
fn get_execution_info_v2(
&mut self,
_remaining_gas: &mut u64,
) -> SyscallResult<ExecutionInfoV2> {
unimplemented!()
}
fn get_execution_info_v3(
&mut self,
_remaining_gas: &mut u64,
) -> SyscallResult<ExecutionInfoV3> {
unimplemented!()
}
fn deploy(
&mut self,
_class_hash: Felt,
_contract_address_salt: Felt,
_calldata: &[Felt],
_deploy_from_zero: bool,
_remaining_gas: &mut u64,
) -> SyscallResult<(Felt, Vec<Felt>)> {
unimplemented!()
}
fn replace_class(&mut self, _class_hash: Felt, _remaining_gas: &mut u64) -> SyscallResult<()> {
unimplemented!()
}
fn library_call(
&mut self,
_class_hash: Felt,
_function_selector: Felt,
_calldata: &[Felt],
_remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>> {
unimplemented!()
}
fn call_contract(
&mut self,
_address: Felt,
_entry_point_selector: Felt,
_calldata: &[Felt],
_remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>> {
unimplemented!()
}
fn storage_read(
&mut self,
_address_domain: u32,
_address: Felt,
_remaining_gas: &mut u64,
) -> SyscallResult<Felt> {
unimplemented!()
}
fn storage_write(
&mut self,
_address_domain: u32,
_address: Felt,
_value: Felt,
_remaining_gas: &mut u64,
) -> SyscallResult<()> {
unimplemented!()
}
fn emit_event(
&mut self,
_keys: &[Felt],
_data: &[Felt],
_remaining_gas: &mut u64,
) -> SyscallResult<()> {
unimplemented!()
}
fn send_message_to_l1(
&mut self,
_to_address: Felt,
_payload: &[Felt],
_remaining_gas: &mut u64,
) -> SyscallResult<()> {
unimplemented!()
}
fn keccak(&mut self, _input: &[u64], _remaining_gas: &mut u64) -> SyscallResult<U256> {
unimplemented!()
}
fn secp256k1_new(
&mut self,
_x: U256,
_y: U256,
_remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256k1Point>> {
unimplemented!()
}
fn secp256k1_add(
&mut self,
_p0: Secp256k1Point,
_p1: Secp256k1Point,
_remaining_gas: &mut u64,
) -> SyscallResult<Secp256k1Point> {
unimplemented!()
}
fn secp256k1_mul(
&mut self,
_p: Secp256k1Point,
_m: U256,
_remaining_gas: &mut u64,
) -> SyscallResult<Secp256k1Point> {
unimplemented!()
}
fn secp256k1_get_point_from_x(
&mut self,
_x: U256,
_y_parity: bool,
_remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256k1Point>> {
unimplemented!()
}
fn secp256k1_get_xy(
&mut self,
_p: Secp256k1Point,
_remaining_gas: &mut u64,
) -> SyscallResult<(U256, U256)> {
unimplemented!()
}
fn secp256r1_new(
&mut self,
_x: U256,
_y: U256,
_remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256r1Point>> {
unimplemented!()
}
fn secp256r1_add(
&mut self,
_p0: Secp256r1Point,
_p1: Secp256r1Point,
_remaining_gas: &mut u64,
) -> SyscallResult<Secp256r1Point> {
unimplemented!()
}
fn secp256r1_mul(
&mut self,
_p: Secp256r1Point,
_m: U256,
_remaining_gas: &mut u64,
) -> SyscallResult<Secp256r1Point> {
unimplemented!()
}
fn secp256r1_get_point_from_x(
&mut self,
_x: U256,
_y_parity: bool,
_remaining_gas: &mut u64,
) -> SyscallResult<Option<Secp256r1Point>> {
unimplemented!()
}
fn secp256r1_get_xy(
&mut self,
_p: Secp256r1Point,
_remaining_gas: &mut u64,
) -> SyscallResult<(U256, U256)> {
unimplemented!()
}
fn sha256_process_block(
&mut self,
_state: &mut [u32; 8],
_block: &[u32; 16],
_remaining_gas: &mut u64,
) -> SyscallResult<()> {
unimplemented!()
}
fn get_class_hash_at(
&mut self,
_contract_address: Felt,
_remaining_gas: &mut u64,
) -> SyscallResult<Felt> {
unimplemented!()
}
fn meta_tx_v0(
&mut self,
_address: Felt,
_entry_point_selector: Felt,
_calldata: &[Felt],
_signature: &[Felt],
_remaining_gas: &mut u64,
) -> SyscallResult<Vec<Felt>> {
unimplemented!()
}
}
// TODO: Move to the correct place or remove if unused. See: https://github.com/lambdaclass/cairo_native/issues/1222
pub(crate) mod handler {
use super::*;
use crate::utils::{libc_free, libc_malloc};
use std::{
alloc::Layout,
fmt::Debug,
mem::{size_of, ManuallyDrop, MaybeUninit},
ptr::{null_mut, NonNull},
};
macro_rules! field_offset {
( $ident:path, $field:ident ) => {
unsafe {
let value_ptr = std::mem::MaybeUninit::<$ident>::uninit().as_ptr();
let field_ptr: *const u8 = std::ptr::addr_of!((*value_ptr).$field) as *const u8;
field_ptr.offset_from(value_ptr as *const u8) as usize
}
};
}
#[repr(C)]
pub union SyscallResultAbi<T> {
pub ok: ManuallyDrop<SyscallResultAbiOk<T>>,
pub err: ManuallyDrop<SyscallResultAbiErr>,
}
#[repr(C)]
#[derive(Debug)]
pub struct SyscallResultAbiOk<T> {
pub tag: u8,
pub payload: ManuallyDrop<T>,
}
#[repr(C)]
#[derive(Debug)]
pub struct SyscallResultAbiErr {
pub tag: u8,
pub payload: ArrayAbi<Felt252Abi>,
}
#[repr(C)]
struct ExecutionInfoAbi {
block_info: NonNull<BlockInfoAbi>,
tx_info: NonNull<TxInfoAbi>,
caller_address: Felt252Abi,
contract_address: Felt252Abi,
entry_point_selector: Felt252Abi,
}
#[repr(C)]
struct ExecutionInfoV2Abi {
block_info: NonNull<BlockInfoAbi>,
tx_info: NonNull<TxInfoV2Abi>,
caller_address: Felt252Abi,
contract_address: Felt252Abi,
entry_point_selector: Felt252Abi,
}
#[repr(C)]
struct TxInfoV2Abi {
version: Felt252Abi,
account_contract_address: Felt252Abi,
max_fee: u128,
signature: ArrayAbi<Felt252Abi>,
transaction_hash: Felt252Abi,
chain_id: Felt252Abi,
nonce: Felt252Abi,
resource_bounds: ArrayAbi<ResourceBoundsAbi>,
tip: u128,
paymaster_data: ArrayAbi<Felt252Abi>,
nonce_data_availability_mode: u32,
fee_data_availability_mode: u32,
account_deployment_data: ArrayAbi<Felt252Abi>,
}
#[repr(C)]
struct ExecutionInfoV3Abi {
block_info: NonNull<BlockInfoAbi>,
tx_info: NonNull<TxInfoV3Abi>,
caller_address: Felt252Abi,
contract_address: Felt252Abi,
entry_point_selector: Felt252Abi,
}
#[repr(C)]
struct TxInfoV3Abi {
version: Felt252Abi,
account_contract_address: Felt252Abi,
max_fee: u128,
signature: ArrayAbi<Felt252Abi>,
transaction_hash: Felt252Abi,
chain_id: Felt252Abi,
nonce: Felt252Abi,
resource_bounds: ArrayAbi<ResourceBoundsAbi>,
tip: u128,
paymaster_data: ArrayAbi<Felt252Abi>,
nonce_data_availability_mode: u32,
fee_data_availability_mode: u32,
account_deployment_data: ArrayAbi<Felt252Abi>,
proof_facts: ArrayAbi<Felt252Abi>,
}
#[repr(C)]
#[derive(Debug, Clone)]
struct ResourceBoundsAbi {
resource: Felt252Abi,
max_amount: u64,
max_price_per_unit: u128,
}
#[repr(C)]
struct BlockInfoAbi {
block_number: u64,
block_timestamp: u64,
sequencer_address: Felt252Abi,
}
#[repr(C)]
struct TxInfoAbi {
version: Felt252Abi,
account_contract_address: Felt252Abi,
max_fee: u128,
signature: ArrayAbi<Felt252Abi>,
transaction_hash: Felt252Abi,
chain_id: Felt252Abi,
nonce: Felt252Abi,
}
/// A C ABI Wrapper around the StarknetSyscallHandler
///
/// It contains pointers to functions which can be called through MLIR based on the field offset.
/// The functions convert C ABI structures to the Rust equivalent and calls the wrapped implementation.
///
/// Unlike runtime functions, the callback table is generic to the StarknetSyscallHandler,
/// which allows the user to specify the desired implementation to use during the execution.
#[repr(C)]
#[derive(Debug)]
pub struct StarknetSyscallHandlerCallbacks<'a, T> {
self_ptr: &'a mut T,
get_block_hash: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Felt252Abi>,
ptr: &mut T,
gas: &mut u64,
block_number: u64,
),
get_execution_info: extern "C" fn(
result_ptr: &mut SyscallResultAbi<NonNull<ExecutionInfoAbi>>,
ptr: &mut T,
gas: &mut u64,
),
get_execution_info_v2: extern "C" fn(
result_ptr: &mut SyscallResultAbi<NonNull<ExecutionInfoV2Abi>>,
ptr: &mut T,
gas: &mut u64,
),
get_execution_info_v3: extern "C" fn(
result_ptr: &mut SyscallResultAbi<NonNull<ExecutionInfoV3Abi>>,
ptr: &mut T,
gas: &mut u64,
),
deploy: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(Felt252Abi, ArrayAbi<Felt252Abi>)>,
ptr: &mut T,
gas: &mut u64,
class_hash: &Felt252Abi,
contract_address_salt: &Felt252Abi,
calldata: &ArrayAbi<Felt252Abi>,
deploy_from_zero: bool,
),
replace_class: extern "C" fn(
result_ptr: &mut SyscallResultAbi<()>,
ptr: &mut T,
_gas: &mut u64,
class_hash: &Felt252Abi,
),
library_call: extern "C" fn(
result_ptr: &mut SyscallResultAbi<ArrayAbi<Felt252Abi>>,
ptr: &mut T,
gas: &mut u64,
class_hash: &Felt252Abi,
function_selector: &Felt252Abi,
calldata: &ArrayAbi<Felt252Abi>,
),
call_contract: extern "C" fn(
result_ptr: &mut SyscallResultAbi<ArrayAbi<Felt252Abi>>,
ptr: &mut T,
gas: &mut u64,
address: &Felt252Abi,
entry_point_selector: &Felt252Abi,
calldata: &ArrayAbi<Felt252Abi>,
),
storage_read: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Felt252Abi>,
ptr: &mut T,
gas: &mut u64,
address_domain: u32,
address: &Felt252Abi,
),
storage_write: extern "C" fn(
result_ptr: &mut SyscallResultAbi<()>,
ptr: &mut T,
gas: &mut u64,
address_domain: u32,
address: &Felt252Abi,
value: &Felt252Abi,
),
emit_event: extern "C" fn(
result_ptr: &mut SyscallResultAbi<()>,
ptr: &mut T,
gas: &mut u64,
keys: &ArrayAbi<Felt252Abi>,
data: &ArrayAbi<Felt252Abi>,
),
send_message_to_l1: extern "C" fn(
result_ptr: &mut SyscallResultAbi<()>,
ptr: &mut T,
gas: &mut u64,
to_address: &Felt252Abi,
data: &ArrayAbi<Felt252Abi>,
),
keccak: extern "C" fn(
result_ptr: &mut SyscallResultAbi<U256>,
ptr: &mut T,
gas: &mut u64,
input: &ArrayAbi<u64>,
),
secp256k1_new: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(u8, MaybeUninit<Secp256k1Point>)>,
ptr: &mut T,
gas: &mut u64,
x: &U256,
y: &U256,
),
secp256k1_add: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Secp256k1Point>,
ptr: &mut T,
gas: &mut u64,
p0: &Secp256k1Point,
p1: &Secp256k1Point,
),
secp256k1_mul: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Secp256k1Point>,
ptr: &mut T,
gas: &mut u64,
p: &Secp256k1Point,
scalar: &U256,
),
secp256k1_get_point_from_x: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(u8, MaybeUninit<Secp256k1Point>)>,
ptr: &mut T,
gas: &mut u64,
x: &U256,
y_parity: &bool,
),
secp256k1_get_xy: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(U256, U256)>,
ptr: &mut T,
gas: &mut u64,
p: &Secp256k1Point,
),
secp256r1_new: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(u8, MaybeUninit<Secp256r1Point>)>,
ptr: &mut T,
gas: &mut u64,
x: &U256,
y: &U256,
),
secp256r1_add: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Secp256r1Point>,
ptr: &mut T,
gas: &mut u64,
p0: &Secp256r1Point,
p1: &Secp256r1Point,
),
secp256r1_mul: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Secp256r1Point>,
ptr: &mut T,
gas: &mut u64,
p: &Secp256r1Point,
scalar: &U256,
),
secp256r1_get_point_from_x: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(u8, MaybeUninit<Secp256r1Point>)>,
ptr: &mut T,
gas: &mut u64,
x: &U256,
y_parity: &bool,
),
secp256r1_get_xy: extern "C" fn(
result_ptr: &mut SyscallResultAbi<(U256, U256)>,
ptr: &mut T,
gas: &mut u64,
p: &Secp256r1Point,
),
sha256_process_block: extern "C" fn(
result_ptr: &mut SyscallResultAbi<*mut [u32; 8]>,
ptr: &mut T,
gas: &mut u64,
state: *mut [u32; 8],
block: &[u32; 16],
),
get_class_hash_at: extern "C" fn(
result_ptr: &mut SyscallResultAbi<Felt252Abi>,
ptr: &mut T,
gas: &mut u64,
contract_address: &Felt252Abi,
),
meta_tx_v0: extern "C" fn(
result_ptr: &mut SyscallResultAbi<ArrayAbi<Felt252Abi>>,
ptr: &mut T,
gas: &mut u64,
address: &Felt252Abi,
entry_point_selector: &Felt252Abi,
calldata: &ArrayAbi<Felt252Abi>,
signature: &ArrayAbi<Felt252Abi>,
),
// testing syscalls
#[cfg(feature = "with-cheatcode")]
pub cheatcode: extern "C" fn(
result_ptr: &mut ArrayAbi<Felt252Abi>,
ptr: &mut T,
selector: &Felt252Abi,
input: &ArrayAbi<Felt252Abi>,
),
}
impl<'a, T> StarknetSyscallHandlerCallbacks<'a, T>
where
T: 'a,
{
// Callback field indices.
pub const CALL_CONTRACT: usize = field_offset!(Self, call_contract) >> 3;
pub const DEPLOY: usize = field_offset!(Self, deploy) >> 3;
pub const EMIT_EVENT: usize = field_offset!(Self, emit_event) >> 3;
pub const GET_BLOCK_HASH: usize = field_offset!(Self, get_block_hash) >> 3;
pub const GET_EXECUTION_INFO: usize = field_offset!(Self, get_execution_info) >> 3;
pub const GET_EXECUTION_INFOV2: usize = field_offset!(Self, get_execution_info_v2) >> 3;
pub const GET_EXECUTION_INFOV3: usize = field_offset!(Self, get_execution_info_v3) >> 3;
pub const KECCAK: usize = field_offset!(Self, keccak) >> 3;
pub const LIBRARY_CALL: usize = field_offset!(Self, library_call) >> 3;
pub const REPLACE_CLASS: usize = field_offset!(Self, replace_class) >> 3;
pub const SEND_MESSAGE_TO_L1: usize = field_offset!(Self, send_message_to_l1) >> 3;
pub const STORAGE_READ: usize = field_offset!(Self, storage_read) >> 3;
pub const STORAGE_WRITE: usize = field_offset!(Self, storage_write) >> 3;
pub const SECP256K1_NEW: usize = field_offset!(Self, secp256k1_new) >> 3;
pub const SECP256K1_ADD: usize = field_offset!(Self, secp256k1_add) >> 3;
pub const SECP256K1_MUL: usize = field_offset!(Self, secp256k1_mul) >> 3;
pub const SECP256K1_GET_POINT_FROM_X: usize =
field_offset!(Self, secp256k1_get_point_from_x) >> 3;
pub const SECP256K1_GET_XY: usize = field_offset!(Self, secp256k1_get_xy) >> 3;
pub const SECP256R1_NEW: usize = field_offset!(Self, secp256r1_new) >> 3;
pub const SECP256R1_ADD: usize = field_offset!(Self, secp256r1_add) >> 3;
pub const SECP256R1_MUL: usize = field_offset!(Self, secp256r1_mul) >> 3;
pub const SECP256R1_GET_POINT_FROM_X: usize =
field_offset!(Self, secp256r1_get_point_from_x) >> 3;
pub const SECP256R1_GET_XY: usize = field_offset!(Self, secp256r1_get_xy) >> 3;
pub const SHA256_PROCESS_BLOCK: usize = field_offset!(Self, sha256_process_block) >> 3;
pub const GET_CLASS_HASH_AT: usize = field_offset!(Self, get_class_hash_at) >> 3;
pub const META_TX_V0: usize = field_offset!(Self, meta_tx_v0) >> 3;
}
#[allow(unused_variables)]
impl<'a, T> StarknetSyscallHandlerCallbacks<'a, T>
where
T: StarknetSyscallHandler + 'a,
{
pub fn new(handler: &'a mut T) -> Self {
Self {
self_ptr: handler,
get_block_hash: Self::wrap_get_block_hash,
get_execution_info: Self::wrap_get_execution_info,
get_execution_info_v2: Self::wrap_get_execution_info_v2,
get_execution_info_v3: Self::wrap_get_execution_info_v3,
deploy: Self::wrap_deploy,
replace_class: Self::wrap_replace_class,
library_call: Self::wrap_library_call,
call_contract: Self::wrap_call_contract,
storage_read: Self::wrap_storage_read,
storage_write: Self::wrap_storage_write,
emit_event: Self::wrap_emit_event,
send_message_to_l1: Self::wrap_send_message_to_l1,
keccak: Self::wrap_keccak,
secp256k1_new: Self::wrap_secp256k1_new,
secp256k1_add: Self::wrap_secp256k1_add,
secp256k1_mul: Self::wrap_secp256k1_mul,
secp256k1_get_point_from_x: Self::wrap_secp256k1_get_point_from_x,
secp256k1_get_xy: Self::wrap_secp256k1_get_xy,
secp256r1_new: Self::wrap_secp256r1_new,
secp256r1_add: Self::wrap_secp256r1_add,
secp256r1_mul: Self::wrap_secp256r1_mul,
secp256r1_get_point_from_x: Self::wrap_secp256r1_get_point_from_x,
secp256r1_get_xy: Self::wrap_secp256r1_get_xy,
sha256_process_block: Self::wrap_sha256_process_block,
get_class_hash_at: Self::wrap_get_class_hash_at,
meta_tx_v0: Self::wrap_meta_tx_v0,
#[cfg(feature = "with-cheatcode")]
cheatcode: Self::wrap_cheatcode,
}
}
unsafe fn alloc_mlir_array<E: Clone>(data: &[E]) -> ArrayAbi<E> {
match data.len() {
0 => ArrayAbi {
ptr: null_mut(),
since: 0,
until: 0,
capacity: 0,
},
_ => {
let refcount_offset =
crate::types::array::calc_data_prefix_offset(Layout::new::<E>());
let ptr = libc_malloc(
Layout::array::<E>(data.len()).unwrap().size() + refcount_offset,
) as *mut E;
let len: u32 = data.len().try_into().unwrap();
ptr.cast::<u32>().write(1);
ptr.byte_add(size_of::<u32>()).cast::<u32>().write(len);
let ptr = ptr.byte_add(refcount_offset);
for (i, val) in data.iter().enumerate() {
ptr.add(i).write(val.clone());
}
let ptr_ptr = libc_malloc(size_of::<*mut ()>()).cast::<*mut E>();
ptr_ptr.write(ptr);
ArrayAbi {
ptr: ptr_ptr,
since: 0,
until: len,
capacity: len,
}
}
}
}
unsafe fn drop_mlir_array<E>(data: &ArrayAbi<E>) {
if data.ptr.is_null() {
return;
}
let refcount_offset = crate::types::array::calc_data_prefix_offset(Layout::new::<E>());
let ptr = data.ptr.read().byte_sub(refcount_offset);
match ptr.cast::<u32>().read() {
1 => {
libc_free(ptr.cast());
libc_free(data.ptr.cast());
}
n => ptr.cast::<u32>().write(n - 1),
}
}
fn wrap_error<E>(e: &[Felt]) -> SyscallResultAbi<E> {
SyscallResultAbi {
err: ManuallyDrop::new(SyscallResultAbiErr {
tag: 1u8,
payload: unsafe {
let data: Vec<_> = e.iter().map(|x| Felt252Abi(x.to_bytes_le())).collect();
Self::alloc_mlir_array(&data)
},
}),
}
}
extern "C" fn wrap_get_block_hash(
result_ptr: &mut SyscallResultAbi<Felt252Abi>,
ptr: &mut T,
gas: &mut u64,
block_number: u64,
) {
let result = ptr.get_block_hash(block_number, gas);
*result_ptr = match result {
Ok(x) => SyscallResultAbi {
ok: ManuallyDrop::new(SyscallResultAbiOk {
tag: 0u8,
payload: ManuallyDrop::new(Felt252Abi(x.to_bytes_le())),
}),
},
Err(e) => Self::wrap_error(&e),
};
}
extern "C" fn wrap_get_execution_info(
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs.rs | src/libfuncs.rs | //! # Compiler libfunc infrastructure
//!
//! Contains libfunc generation stuff (aka. the actual instructions).
use crate::{
error::{panic::ToNativeAssertError, Error as CoreLibfuncBuilderError, Result},
metadata::MetadataStorage,
native_panic,
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use bumpalo::Bump;
use cairo_lang_sierra::{
extensions::{
core::{CoreConcreteLibfunc, CoreLibfunc, CoreType, CoreTypeConcrete},
int::{
signed::{Sint16Traits, Sint32Traits, Sint64Traits, Sint8Traits},
unsigned::{Uint16Traits, Uint32Traits, Uint64Traits, Uint8Traits},
},
lib_func::{BranchSignature, ParamSignature},
starknet::StarknetTypeConcrete,
ConcreteLibfunc,
},
ids::FunctionId,
program_registry::ProgramRegistry,
};
use itertools::Itertools;
use melior::{
dialect::{arith, cf, llvm, ods},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
operation::OperationBuilder,
r#type::IntegerType,
Attribute, Block, BlockLike, BlockRef, Location, Module, Region, Value,
},
Context,
};
use num_bigint::BigInt;
use std::{
cell::Cell,
error::Error,
ops::Deref,
sync::atomic::{AtomicBool, Ordering},
};
mod array;
mod r#bool;
mod bounded_int;
mod r#box;
mod bytes31;
mod cast;
mod circuit;
mod r#const;
mod coupon;
mod debug;
mod drop;
mod dup;
mod ec;
mod r#enum;
mod felt252;
mod felt252_dict;
mod felt252_dict_entry;
mod function_call;
mod gas;
mod gas_reserve;
mod int;
mod int_range;
mod mem;
mod nullable;
mod pedersen;
mod poseidon;
mod starknet;
mod r#struct;
mod uint256;
mod uint512;
/// Generation of MLIR operations from their Sierra counterparts.
///
/// All possible Sierra libfuncs must implement it. It is already implemented for all the core
/// libfuncs, contained in [CoreConcreteLibfunc].
pub trait LibfuncBuilder {
/// Error type returned by this trait's methods.
type Error: Error;
/// Generate the MLIR operations.
fn build<'ctx, 'this>(
&self,
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
) -> Result<()>;
/// Return the target function if the statement is a function call.
///
/// This is used by the compiler to check whether a statement is a function call and apply the
/// tail recursion logic.
fn is_function_call(&self) -> Option<&FunctionId>;
}
impl LibfuncBuilder for CoreConcreteLibfunc {
type Error = CoreLibfuncBuilderError;
fn build<'ctx, 'this>(
&self,
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
) -> Result<()> {
match self {
Self::ApTracking(_) | Self::BranchAlign(_) | Self::UnconditionalJump(_) => {
build_noop::<0, false>(
context,
registry,
entry,
location,
helper,
metadata,
self.param_signatures(),
)
}
Self::Array(selector) => self::array::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Bool(selector) => self::r#bool::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::BoundedInt(info) => {
self::bounded_int::build(context, registry, entry, location, helper, metadata, info)
}
Self::Box(selector) => self::r#box::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Bytes31(selector) => self::bytes31::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Cast(selector) => self::cast::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Circuit(info) => {
self::circuit::build(context, registry, entry, location, helper, metadata, info)
}
Self::Const(selector) => self::r#const::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Coupon(selector) => self::coupon::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::CouponCall(info) => self::function_call::build(
context, registry, entry, location, helper, metadata, info,
),
Self::Debug(selector) => self::debug::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Trace(_) => native_panic!("Implement trace libfunc"),
Self::Drop(info) => {
self::drop::build(context, registry, entry, location, helper, metadata, info)
}
Self::Dup(info) | Self::SnapshotTake(info) => {
self::dup::build(context, registry, entry, location, helper, metadata, info)
}
Self::Ec(selector) => self::ec::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Enum(selector) => self::r#enum::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Felt252(selector) => self::felt252::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Felt252Dict(selector) => self::felt252_dict::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Felt252SquashedDict(_) => {
native_panic!("Implement felt252_squashed_dict libfunc")
}
Self::Felt252DictEntry(selector) => self::felt252_dict_entry::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::FunctionCall(info) => self::function_call::build(
context, registry, entry, location, helper, metadata, info,
),
Self::Gas(selector) => self::gas::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::IntRange(selector) => self::int_range::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Blake(_) => native_panic!("Implement blake libfunc"),
Self::Mem(selector) => self::mem::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Nullable(selector) => self::nullable::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Pedersen(selector) => self::pedersen::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Poseidon(selector) => self::poseidon::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Sint8(selector) => self::int::build_signed::<Sint8Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Sint16(selector) => self::int::build_signed::<Sint16Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Sint32(selector) => self::int::build_signed::<Sint32Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Sint64(selector) => self::int::build_signed::<Sint64Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Sint128(selector) => self::int::build_i128(
context, registry, entry, location, helper, metadata, selector,
),
Self::Starknet(selector) => self::starknet::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Struct(selector) => self::r#struct::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint8(selector) => self::int::build_unsigned::<Uint8Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint16(selector) => self::int::build_unsigned::<Uint16Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint32(selector) => self::int::build_unsigned::<Uint32Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint64(selector) => self::int::build_unsigned::<Uint64Traits>(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint128(selector) => self::int::build_u128(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint256(selector) => self::uint256::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::Uint512(selector) => self::uint512::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::UnwrapNonZero(info) => build_noop::<1, false>(
context,
registry,
entry,
location,
helper,
metadata,
&info.signature.param_signatures,
),
Self::GasReserve(selector) => self::gas_reserve::build(
context, registry, entry, location, helper, metadata, selector,
),
Self::QM31(_) => native_panic!("Implement QM31 libfunc"),
Self::UnsafePanic(_) => native_panic!("Implement unsafe_panic libfunc"),
Self::DummyFunctionCall(_) => native_panic!("Implement dummy_function_call libfunc"),
}
}
fn is_function_call(&self) -> Option<&FunctionId> {
match self {
CoreConcreteLibfunc::FunctionCall(info) => Some(&info.function.id),
CoreConcreteLibfunc::CouponCall(info) => Some(&info.function.id),
_ => None,
}
}
}
/// Helper struct which contains logic generation for extra MLIR blocks and branch operations to the
/// next statements.
///
/// Each branch index should be present in exactly one call a branching method (either
/// [`br`](#method.br) or [`cond_br`](#method.cond_br)).
///
/// This helper is necessary because the statement following the current one may not have the same
/// arguments as the results returned by the current statement. Because of that, a direct jump
/// cannot be made and some processing is required.
pub struct LibfuncHelper<'ctx, 'this>
where
'this: 'ctx,
{
pub module: &'this Module<'ctx>,
pub init_block: &'this BlockRef<'ctx, 'this>,
pub region: &'this Region<'ctx>,
pub blocks_arena: &'this Bump,
pub last_block: Cell<&'this BlockRef<'ctx, 'this>>,
pub branches: Vec<(&'this Block<'ctx>, Vec<BranchArg<'ctx, 'this>>)>,
pub results: Vec<Vec<Cell<Option<Value<'ctx, 'this>>>>>,
#[cfg(feature = "with-libfunc-profiling")]
// Since function calls don't get profiled, this field is optional
pub profiler: Option<(
crate::metadata::profiler::ProfilerMeta,
cairo_lang_sierra::program::StatementIdx,
(Value<'ctx, 'this>, Value<'ctx, 'this>),
)>,
}
impl<'ctx, 'this> LibfuncHelper<'ctx, 'this>
where
'this: 'ctx,
{
#[doc(hidden)]
pub(crate) fn results(self) -> Result<Vec<Vec<Value<'ctx, 'this>>>> {
self.results
.into_iter()
.enumerate()
.map(|(branch_idx, x)| {
x.into_iter()
.enumerate()
.map(|(arg_idx, x)| {
x.into_inner().to_native_assert_error(&format!(
"Argument #{arg_idx} of branch {branch_idx} doesn't have a value."
))
})
.collect()
})
.collect()
}
/// Return the initialization block.
///
/// The init block is used for `llvm.alloca` instructions. It is guaranteed to not be executed
/// multiple times on tail-recursive functions. This property allows generating tail-recursive
/// functions that do not grow the stack.
pub fn init_block(&self) -> &'this Block<'ctx> {
self.init_block
}
/// Inserts a new block after all the current libfunc's blocks.
pub fn append_block(&self, block: Block<'ctx>) -> &'this Block<'ctx> {
let block = self
.region
.insert_block_after(*self.last_block.get(), block);
let block_ref: &'this mut BlockRef<'ctx, 'this> = self.blocks_arena.alloc(block);
self.last_block.set(block_ref);
block_ref
}
/// Creates an unconditional branching operation out of the libfunc and into the next statement.
///
/// This method will also store the returned values so that they can be moved into the state and
/// used later on when required.
fn br(
&self,
block: &'this Block<'ctx>,
branch: usize,
results: &[Value<'ctx, 'this>],
location: Location<'ctx>,
) -> Result<()> {
let (successor, operands) = &self.branches[branch];
for (dst, src) in self.results[branch].iter().zip(results) {
dst.replace(Some(*src));
}
let destination_operands = operands
.iter()
.copied()
.map(|op| match op {
BranchArg::External(x) => x,
BranchArg::Returned(i) => results[i],
})
.collect::<Vec<_>>();
#[cfg(feature = "with-libfunc-profiling")]
self.push_profiler_frame(
unsafe { self.context().to_ref() },
self.module,
block,
location,
)?;
block.append_operation(cf::br(successor, &destination_operands, location));
Ok(())
}
/// Creates a conditional binary branching operation, potentially jumping out of the libfunc and
/// into the next statement.
///
/// While generating a `cond_br` that doesn't jump out of the libfunc is possible, it should be
/// avoided whenever possible. In those cases just use [melior::dialect::cf::cond_br].
///
/// This method will also store the returned values so that they can be moved into the state and
/// used later on when required.
// TODO: Allow one block to be libfunc-internal.
fn cond_br(
&self,
context: &'ctx Context,
block: &'this Block<'ctx>,
condition: Value<'ctx, 'this>,
branches: [usize; 2],
results: [&[Value<'ctx, 'this>]; 2],
location: Location<'ctx>,
) -> Result<()> {
let (block_true, args_true) = {
let (successor, operands) = &self.branches[branches[0]];
for (dst, src) in self.results[branches[0]].iter().zip(results[0]) {
dst.replace(Some(*src));
}
let destination_operands = operands
.iter()
.copied()
.map(|op| match op {
BranchArg::External(x) => x,
BranchArg::Returned(i) => results[0][i],
})
.collect::<Vec<_>>();
(*successor, destination_operands)
};
let (block_false, args_false) = {
let (successor, operands) = &self.branches[branches[1]];
for (dst, src) in self.results[branches[1]].iter().zip(results[1]) {
dst.replace(Some(*src));
}
let destination_operands = operands
.iter()
.copied()
.map(|op| match op {
BranchArg::External(x) => x,
BranchArg::Returned(i) => results[1][i],
})
.collect::<Vec<_>>();
(*successor, destination_operands)
};
#[cfg(feature = "with-libfunc-profiling")]
self.push_profiler_frame(context, self.module, block, location)?;
block.append_operation(cf::cond_br(
context,
condition,
block_true,
block_false,
&args_true,
&args_false,
location,
));
Ok(())
}
#[cfg(feature = "with-libfunc-profiling")]
fn push_profiler_frame(
&self,
context: &'ctx Context,
module: &'this Module,
block: &'this Block<'ctx>,
location: Location<'ctx>,
) -> Result<()> {
if let Some((profiler_meta, statement_idx, t0)) = self.profiler.as_ref() {
let t0 = *t0;
let t1 = profiler_meta.measure_timestamp(context, block, location)?;
profiler_meta.push_frame(context, module, block, statement_idx.0, t0, t1, location)?;
}
Ok(())
}
}
impl<'ctx> Deref for LibfuncHelper<'ctx, '_> {
type Target = Module<'ctx>;
fn deref(&self) -> &Self::Target {
self.module
}
}
#[derive(Clone, Copy, Debug)]
pub enum BranchArg<'ctx, 'this> {
External(Value<'ctx, 'this>),
Returned(usize),
}
fn increment_builtin_counter<'ctx: 'a, 'a>(
context: &'ctx Context,
block: &'ctx Block<'ctx>,
location: Location<'ctx>,
value: Value<'ctx, '_>,
) -> crate::error::Result<Value<'ctx, 'a>> {
increment_builtin_counter_by(context, block, location, value, 1)
}
fn increment_builtin_counter_by<'ctx: 'a, 'a>(
context: &'ctx Context,
block: &'ctx Block<'ctx>,
location: Location<'ctx>,
value: Value<'ctx, '_>,
amount: impl Into<BigInt>,
) -> crate::error::Result<Value<'ctx, 'a>> {
Ok(block.append_op_result(arith::addi(
value,
block.const_int(context, location, amount.into(), 64)?,
location,
))?)
}
fn increment_builtin_counter_conditionally_by<'ctx: 'a, 'a>(
context: &'ctx Context,
block: &'ctx Block<'ctx>,
location: Location<'ctx>,
value_to_inc: Value<'ctx, '_>,
true_amount: impl Into<BigInt>,
false_amount: impl Into<BigInt>,
condition: Value<'ctx, '_>,
) -> crate::error::Result<Value<'ctx, 'a>> {
let true_amount_value = block.const_int(context, location, true_amount.into(), 64)?;
let false_amount_value = block.const_int(context, location, false_amount.into(), 64)?;
let true_incremented =
block.append_op_result(arith::addi(value_to_inc, true_amount_value, location))?;
let false_incremented =
block.append_op_result(arith::addi(value_to_inc, false_amount_value, location))?;
Ok(block.append_op_result(arith::select(
condition,
true_incremented,
false_incremented,
location,
))?)
}
fn build_noop<'ctx, 'this, const N: usize, const PROCESS_BUILTINS: bool>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
param_signatures: &[ParamSignature],
) -> Result<()> {
let mut params = Vec::with_capacity(N);
#[allow(clippy::needless_range_loop)]
for i in 0..N {
let param_ty = registry.get_type(¶m_signatures[i].ty)?;
let mut param_val = entry.argument(i)?.into();
if PROCESS_BUILTINS
&& param_ty.is_builtin()
&& !matches!(
param_ty,
CoreTypeConcrete::BuiltinCosts(_)
| CoreTypeConcrete::Coupon(_)
| CoreTypeConcrete::GasBuiltin(_)
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_))
)
{
param_val = increment_builtin_counter(context, entry, location, param_val)?;
}
params.push(param_val);
}
helper.br(entry, 0, ¶ms, location)
}
/// This function builds a fake libfunc implementation, by mocking a call to a
/// runtime function.
///
/// Useful to trick MLIR into thinking that it cannot optimize an unimplemented libfunc.
///
/// This function is for debugging only, and should never be used.
#[allow(dead_code)]
pub fn build_mock_libfunc<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
branch_signatures: &[BranchSignature],
) -> Result<()> {
let mut args = Vec::new();
for arg_idx in 0..entry.argument_count() {
args.push(entry.arg(arg_idx)?);
}
let flag_type = IntegerType::new(context, 8).into();
let ptr_type = llvm::r#type::pointer(context, 0);
let result_type = llvm::r#type::r#struct(context, &[flag_type, ptr_type], false);
// Mock a runtime call, and pass all libfunc arguments.
let result_ptr = build_mock_runtime_call(context, helper, entry, &args, location)?;
// We read the result as a structure, with a flag and a pointer.
// The flag determines which libfunc branch should we jump to.
let result = entry.load(context, location, result_ptr, result_type)?;
let flag = entry.extract_value(context, location, result, flag_type, 0)?;
let payload_ptr = entry.extract_value(context, location, result, ptr_type, 1)?;
let branches_idxs = (0..branch_signatures.len()).collect_vec();
// We will build one block per branch + a default block, and will use the
// flag to determine to which block to jump to.
// We assume that the flag is within the number of branches
// So the default block will be unreachable.
let default_block = {
let block = helper.append_block(Block::new(&[]));
block.append_operation(llvm::unreachable(location));
block
};
// For each branch, we build a block that will build the return arguments.
let mut destinations = Vec::new();
for &branch_idx in &branches_idxs {
let block = helper.append_block(Block::new(&[]));
// We build all the required types.
let mut branch_types = Vec::new();
for branch_var in &branch_signatures[branch_idx].vars {
let branch_var_type = registry.build_type(context, helper, metadata, &branch_var.ty)?;
branch_types.push(branch_var_type);
}
// The runtime call payload will be interpreted as a structure with as
// many pointers as there are output variables.
let branch_type = llvm::r#type::r#struct(
context,
&(0..branch_types.len()).map(|_| ptr_type).collect_vec(),
false,
);
let branch_result = block.load(context, location, payload_ptr, branch_type)?;
// We load each pointer to get the actual value we want to return.
let mut branch_results = Vec::new();
for (var_idx, var_type) in branch_types.iter().enumerate() {
let var_ptr =
block.extract_value(context, location, branch_result, ptr_type, var_idx)?;
let var = block.load(context, location, var_ptr, *var_type)?;
branch_results.push(var);
}
// We jump to the target branch.
helper.br(block, branch_idx, &branch_results, location)?;
let operands: &[Value] = &[];
destinations.push((block, operands));
}
// Switch to the target block according to the flag.
entry.append_operation(cf::switch(
context,
&branches_idxs.iter().map(|&x| x as i64).collect_vec(),
flag,
flag_type,
(default_block, &[]),
&destinations[..],
location,
)?);
Ok(())
}
/// This function builds a fake call to a runtime variable.
///
/// Useful to trick MLIR into thinking that it cannot optimize an unimplemented feature.
///
/// This function is for debugging only, and should never be used.
#[allow(dead_code)]
pub fn build_mock_runtime_call<'c, 'a>(
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
args: &[Value<'c, 'a>],
location: Location<'c>,
) -> Result<Value<'c, 'a>> {
let ptr_type = llvm::r#type::pointer(context, 0);
// First, declare the global if not declared.
// This should be added to the `RuntimeBindings` metadata, to ensure that
// it is declared once per module. Here we use a static for simplicity, but
// will fail if a single process is used to compile multiple modules.
static MOCK_RUNTIME_SYMBOL_DECLARED: AtomicBool = AtomicBool::new(false);
if !MOCK_RUNTIME_SYMBOL_DECLARED.swap(true, Ordering::Relaxed) {
module.body().append_operation(
ods::llvm::mlir_global(
context,
Region::new(),
TypeAttribute::new(ptr_type),
StringAttribute::new(context, "cairo_native__mock"),
Attribute::parse(context, "#llvm.linkage<weak>")
.ok_or(CoreLibfuncBuilderError::ParseAttributeError)?,
location,
)
.into(),
);
}
// Obtain a pointer to the global. The global would contain a pointer to a function.
let function_ptr_ptr = block.append_op_result(
ods::llvm::mlir_addressof(
context,
ptr_type,
FlatSymbolRefAttribute::new(context, "cairo_native__mock"),
location,
)
.into(),
)?;
// Load the function pointer, and call the function
let function_ptr = block.load(context, location, function_ptr_ptr, ptr_type)?;
let result = block.append_op_result(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function_ptr])
.add_operands(args)
.add_results(&[llvm::r#type::pointer(context, 0)])
.build()?,
)?;
Ok(result)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/ffi.rs | src/ffi.rs | //! # FFI Wrappers
//!
//! This is a "hotfix" for missing Rust interfaces to the C/C++ libraries we use, namely LLVM/MLIR
//! APIs that are missing from melior.
use crate::{
error::{panic::ToNativeAssertError, Error, Result},
statistics::Statistics,
utils::walk_ir::walk_llvm_instructions,
};
use llvm_sys::{
core::{
LLVMContextCreate, LLVMContextDispose, LLVMDisposeMemoryBuffer, LLVMDisposeMessage,
LLVMDisposeModule, LLVMGetBufferSize, LLVMGetBufferStart, LLVMGetFirstUse,
LLVMGetInstructionOpcode,
},
error::LLVMGetErrorMessage,
prelude::LLVMMemoryBufferRef,
target::{
LLVM_InitializeAllAsmParsers, LLVM_InitializeAllAsmPrinters, LLVM_InitializeAllTargetInfos,
LLVM_InitializeAllTargetMCs, LLVM_InitializeAllTargets,
},
target_machine::{
LLVMCodeGenFileType, LLVMCodeGenOptLevel, LLVMCodeModel, LLVMCreateTargetMachine,
LLVMDisposeTargetMachine, LLVMGetDefaultTargetTriple, LLVMGetHostCPUFeatures,
LLVMGetHostCPUName, LLVMGetTargetFromTriple, LLVMRelocMode,
LLVMTargetMachineEmitToMemoryBuffer, LLVMTargetRef,
},
transforms::pass_builder::{
LLVMCreatePassBuilderOptions, LLVMDisposePassBuilderOptions, LLVMRunPasses,
},
};
use melior::ir::{Module, Type, TypeLike};
use mlir_sys::{mlirLLVMStructTypeGetElementType, mlirTranslateModuleToLLVMIR};
use std::{
borrow::Cow,
ffi::{CStr, CString},
io::Write,
mem::MaybeUninit,
path::Path,
ptr::{addr_of_mut, null_mut},
sync::OnceLock,
time::Instant,
};
use tempfile::NamedTempFile;
use tracing::trace;
/// For any `!llvm.struct<...>` type, return the MLIR type of the field at the requested index.
pub fn get_struct_field_type_at<'c>(r#type: &Type<'c>, index: usize) -> Type<'c> {
assert!(r#type.is_llvm_struct_type());
unsafe {
Type::from_raw(mlirLLVMStructTypeGetElementType(
r#type.to_raw(),
index as isize,
))
}
}
/// Optimization levels.
#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum OptLevel {
None,
Less,
#[default]
Default,
Aggressive,
}
impl From<usize> for OptLevel {
fn from(value: usize) -> Self {
match value {
0 => OptLevel::None,
1 => OptLevel::Less,
2 => OptLevel::Default,
_ => OptLevel::Aggressive,
}
}
}
impl From<OptLevel> for usize {
fn from(val: OptLevel) -> Self {
match val {
OptLevel::None => 0,
OptLevel::Less => 1,
OptLevel::Default => 2,
OptLevel::Aggressive => 3,
}
}
}
impl From<u8> for OptLevel {
fn from(value: u8) -> Self {
match value {
0 => OptLevel::None,
1 => OptLevel::Less,
2 => OptLevel::Default,
_ => OptLevel::Aggressive,
}
}
}
/// Converts a MLIR module to a compile object, that can be linked with a linker.
pub fn module_to_object(
module: &Module<'_>,
opt_level: OptLevel,
stats: Option<&mut Statistics>,
) -> Result<Vec<u8>> {
static INITIALIZED: OnceLock<()> = OnceLock::new();
INITIALIZED.get_or_init(|| unsafe {
LLVM_InitializeAllTargets();
LLVM_InitializeAllTargetInfos();
LLVM_InitializeAllTargetMCs();
LLVM_InitializeAllAsmPrinters();
LLVM_InitializeAllAsmParsers();
});
unsafe {
let llvm_context = LLVMContextCreate();
let op = module.as_operation().to_raw();
let pre_mlir_to_llvm_instant = Instant::now();
let llvm_module = mlirTranslateModuleToLLVMIR(op, llvm_context as *mut _) as *mut _;
let mlir_to_llvm_time = pre_mlir_to_llvm_instant.elapsed().as_millis();
if let Some(&mut ref mut stats) = stats {
stats.compilation_mlir_to_llvm_time_ms = Some(mlir_to_llvm_time);
}
if let Some(&mut ref mut stats) = stats {
let mut llvmir_instruction_count = 0;
let mut llvmir_virtual_register_count = 0;
walk_llvm_instructions(llvm_module, |instruction| {
// Increase total instruction count.
llvmir_instruction_count += 1;
// Debug string looks like "LLVM{OP}".
let full_opcode = format!("{:?}", LLVMGetInstructionOpcode(instruction));
// Strip leading "LLVM".
let opcode = full_opcode
.strip_prefix("LLVM")
.map(str::to_string)
.unwrap_or(full_opcode);
// Update opcode frequency map.
*stats.llvmir_opcode_frequency.entry(opcode).or_insert(0) += 1;
// Increase virtual register count, only if the
// instruction value is used somewhere.
let first_use = LLVMGetFirstUse(instruction);
if !first_use.is_null() {
llvmir_virtual_register_count += 1;
}
});
stats.llvmir_instruction_count = Some(llvmir_instruction_count);
stats.llvmir_virtual_register_count = Some(llvmir_virtual_register_count)
}
let mut null = null_mut();
let mut error_buffer = addr_of_mut!(null);
let target_triple = LLVMGetDefaultTargetTriple();
let target_cpu = LLVMGetHostCPUName();
let target_cpu_features = LLVMGetHostCPUFeatures();
let mut target: MaybeUninit<LLVMTargetRef> = MaybeUninit::uninit();
if LLVMGetTargetFromTriple(target_triple, target.as_mut_ptr(), error_buffer) != 0 {
let error = CStr::from_ptr(*error_buffer);
let err = error.to_string_lossy().to_string();
LLVMDisposeMessage(*error_buffer);
Err(Error::LLVMCompileError(err))?;
} else if !(*error_buffer).is_null() {
LLVMDisposeMessage(*error_buffer);
error_buffer = addr_of_mut!(null);
}
let target = target.assume_init();
let machine = LLVMCreateTargetMachine(
target,
target_triple.cast(),
target_cpu.cast(),
target_cpu_features.cast(),
match opt_level {
OptLevel::None => LLVMCodeGenOptLevel::LLVMCodeGenLevelNone,
OptLevel::Less => LLVMCodeGenOptLevel::LLVMCodeGenLevelLess,
OptLevel::Default => LLVMCodeGenOptLevel::LLVMCodeGenLevelDefault,
OptLevel::Aggressive => LLVMCodeGenOptLevel::LLVMCodeGenLevelAggressive,
},
LLVMRelocMode::LLVMRelocPIC,
LLVMCodeModel::LLVMCodeModelDefault,
);
let opts = LLVMCreatePassBuilderOptions();
let opt = match opt_level {
OptLevel::None => 0,
OptLevel::Less => 1,
// slp-vectorizer pass did cause some issues, but after the change
// on function attributes it seems to not trigger them anymore.
// https://github.com/llvm/llvm-project/issues/107198
OptLevel::Default => 2,
OptLevel::Aggressive => 3,
};
let passes = CString::new(format!("default<O{opt}>"))
.to_native_assert_error("only fails if the hardcoded string contains a null byte")?;
let pre_llvm_passes_instant = Instant::now();
let error = LLVMRunPasses(llvm_module, passes.as_ptr(), machine, opts);
let llvm_passes_time = pre_llvm_passes_instant.elapsed().as_millis();
if let Some(&mut ref mut stats) = stats {
stats.compilation_llvm_passes_time_ms = Some(llvm_passes_time);
}
if !error.is_null() {
let msg = LLVMGetErrorMessage(error);
let msg = CStr::from_ptr(msg);
Err(Error::LLVMCompileError(msg.to_string_lossy().into_owned()))?;
}
LLVMDisposePassBuilderOptions(opts);
let mut out_buf: MaybeUninit<LLVMMemoryBufferRef> = MaybeUninit::uninit();
trace!("starting llvm to object compilation");
let pre_llvm_to_object_instant = Instant::now();
let ok = LLVMTargetMachineEmitToMemoryBuffer(
machine,
llvm_module,
LLVMCodeGenFileType::LLVMObjectFile,
error_buffer,
out_buf.as_mut_ptr(),
);
let llvm_to_object_time = pre_llvm_to_object_instant.elapsed().as_millis();
if let Some(&mut ref mut stats) = stats {
stats.compilation_llvm_to_object_time_ms = Some(llvm_to_object_time);
}
if ok != 0 {
let error = CStr::from_ptr(*error_buffer);
let err = error.to_string_lossy().to_string();
LLVMDisposeMessage(*error_buffer);
Err(Error::LLVMCompileError(err))?;
} else if !(*error_buffer).is_null() {
LLVMDisposeMessage(*error_buffer);
}
let out_buf = out_buf.assume_init();
let out_buf_start: *const u8 = LLVMGetBufferStart(out_buf).cast();
let out_buf_size = LLVMGetBufferSize(out_buf);
// keep it in rust side
let data = std::slice::from_raw_parts(out_buf_start, out_buf_size).to_vec();
LLVMDisposeMemoryBuffer(out_buf);
LLVMDisposeTargetMachine(machine);
LLVMDisposeModule(llvm_module);
LLVMContextDispose(llvm_context);
Ok(data)
}
}
/// Links the passed object into a shared library, stored on the given path.
pub fn object_to_shared_lib(
object: &[u8],
output_filename: &Path,
stats: Option<&mut Statistics>,
) -> Result<()> {
// linker seems to need a file and doesn't accept stdin
let mut file = NamedTempFile::new()?;
file.write_all(object)?;
let file = file.into_temp_path();
let file_path = file.display().to_string();
let output_path = output_filename.display().to_string();
if let Ok(x) = std::env::var("NATIVE_DEBUG_DUMP") {
if x == "1" || x == "true" {
// forget so the temp file is not deleted and the debugger can load it.
// its still in a temp file directory so eventually the OS will delete it, but just not instantly.
// todo: maybe remove it when exiting, for example using atexit.
std::mem::forget(file);
}
}
let args: Vec<Cow<'static, str>> = {
#[cfg(target_os = "macos")]
{
let mut args: Vec<Cow<'static, str>> = vec![
"-demangle".into(),
"-no_deduplicate".into(),
"-dynamic".into(),
"-dylib".into(),
"-L/usr/local/lib".into(),
"-L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib".into(),
];
args.extend([
Cow::from(file_path),
"-o".into(),
Cow::from(output_path),
"-lSystem".into(),
]);
args
}
#[cfg(target_os = "linux")]
{
let mut args: Vec<Cow<'static, str>> = vec![
"--hash-style=gnu".into(),
"-shared".into(),
"-L/lib/../lib64".into(),
"-L/usr/lib/../lib64".into(),
];
args.extend([
"-o".into(),
Cow::from(output_path),
"-lc".into(),
Cow::from(file_path),
]);
args
}
#[cfg(target_os = "windows")]
{
unimplemented!()
}
};
let mut linker = std::process::Command::new("ld");
let pre_linking_instant = Instant::now();
let proc = linker.args(args.iter().map(|x| x.as_ref())).output()?;
let linking_time = pre_linking_instant.elapsed().as_millis();
if let Some(&mut ref mut stats) = stats {
stats.compilation_linking_time_ms = Some(linking_time);
}
if proc.status.success() {
Ok(())
} else {
let msg = String::from_utf8_lossy(&proc.stderr);
Err(Error::LinkError(msg.to_string()))
}
}
/// Gets the target triple, which identifies the platform and ABI.
pub fn get_target_triple() -> String {
let target_triple = unsafe {
let value = LLVMGetDefaultTargetTriple();
CStr::from_ptr(value).to_string_lossy().into_owned()
};
target_triple
}
/// Gets the data layout reprrsentation as a string, to be given to the MLIR module.
/// LLVM uses this to know the proper alignments for the given sizes, etc.
/// This function gets the data layout of the host target triple.
pub fn get_data_layout_rep() -> Result<String> {
unsafe {
let mut null = null_mut();
let error_buffer = addr_of_mut!(null);
let target_triple = LLVMGetDefaultTargetTriple();
let target_cpu = LLVMGetHostCPUName();
let target_cpu_features = LLVMGetHostCPUFeatures();
let mut target: MaybeUninit<LLVMTargetRef> = MaybeUninit::uninit();
if LLVMGetTargetFromTriple(target_triple, target.as_mut_ptr(), error_buffer) != 0 {
let error = CStr::from_ptr(*error_buffer);
let err = error.to_string_lossy().to_string();
tracing::error!("error getting target triple: {}", err);
LLVMDisposeMessage(*error_buffer);
Err(Error::LLVMCompileError(err))?;
}
if !(*error_buffer).is_null() {
LLVMDisposeMessage(*error_buffer);
}
let target = target.assume_init();
let machine = LLVMCreateTargetMachine(
target,
target_triple.cast(),
target_cpu.cast(),
target_cpu_features.cast(),
LLVMCodeGenOptLevel::LLVMCodeGenLevelNone,
LLVMRelocMode::LLVMRelocDynamicNoPic,
LLVMCodeModel::LLVMCodeModelDefault,
);
let data_layout = llvm_sys::target_machine::LLVMCreateTargetDataLayout(machine);
let data_layout_str =
CStr::from_ptr(llvm_sys::target::LLVMCopyStringRepOfTargetData(data_layout));
Ok(data_layout_str.to_string_lossy().into_owned())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_opt_level_default() {
// Asserts that the default implementation of `OptLevel` returns `OptLevel::Default`.
assert_eq!(OptLevel::default(), OptLevel::Default);
// Asserts that converting from usize value 2 returns `OptLevel::Default`.
assert_eq!(OptLevel::from(2usize), OptLevel::Default);
// Asserts that converting from u8 value 2 returns `OptLevel::Default`.
assert_eq!(OptLevel::from(2u8), OptLevel::Default);
}
#[test]
fn test_opt_level_conversion() {
// Test conversion from usize to OptLevel
assert_eq!(OptLevel::from(0usize), OptLevel::None);
assert_eq!(OptLevel::from(1usize), OptLevel::Less);
assert_eq!(OptLevel::from(2usize), OptLevel::Default);
assert_eq!(OptLevel::from(3usize), OptLevel::Aggressive);
assert_eq!(OptLevel::from(30usize), OptLevel::Aggressive);
// Test conversion from OptLevel to usize
assert_eq!(usize::from(OptLevel::None), 0usize);
assert_eq!(usize::from(OptLevel::Less), 1usize);
assert_eq!(usize::from(OptLevel::Default), 2usize);
assert_eq!(usize::from(OptLevel::Aggressive), 3usize);
// Test conversion from u8 to OptLevel
assert_eq!(OptLevel::from(0u8), OptLevel::None);
assert_eq!(OptLevel::from(1u8), OptLevel::Less);
assert_eq!(OptLevel::from(2u8), OptLevel::Default);
assert_eq!(OptLevel::from(3u8), OptLevel::Aggressive);
assert_eq!(OptLevel::from(30u8), OptLevel::Aggressive);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils.rs | src/utils.rs | //! # Various utilities
pub(crate) use self::{program_registry_ext::ProgramRegistryExt, range_ext::RangeExt};
use crate::{
error::Result as NativeResult, metadata::MetadataStorage, native_panic, types::TypeBuilder,
OptLevel,
};
use cairo_lang_runner::token_gas_cost;
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
gas::CostTokenType,
},
ids::{ConcreteTypeId, FunctionId},
program::{GenFunction, Program, StatementIdx},
program_registry::ProgramRegistry,
};
use melior::{
ir::Module,
pass::{self, PassManager},
Context, Error, ExecutionEngine,
};
use num_bigint::{BigInt, BigUint, Sign};
use serde::{Deserialize, Serialize};
use starknet_types_core::felt::Felt;
use std::sync::LazyLock;
use std::{
alloc::Layout,
borrow::Cow,
fmt::{self, Display},
};
use thiserror::Error;
pub mod mem_tracing;
mod program_registry_ext;
mod range_ext;
#[cfg(feature = "with-segfault-catcher")]
pub mod safe_runner;
pub mod sierra_gen;
pub mod testing;
pub mod trace_dump;
pub mod walk_ir;
#[cfg(target_os = "macos")]
pub const SHARED_LIBRARY_EXT: &str = "dylib";
#[cfg(target_os = "linux")]
pub const SHARED_LIBRARY_EXT: &str = "so";
/// The `felt252` prime modulo.
pub static PRIME: LazyLock<BigUint> = LazyLock::new(|| {
"3618502788666131213697322783095070105623107215331596699973092056135872020481"
.parse()
.expect("hardcoded prime constant should be valid")
});
pub static HALF_PRIME: LazyLock<BigUint> = LazyLock::new(|| {
"1809251394333065606848661391547535052811553607665798349986546028067936010240"
.parse()
.expect("hardcoded half prime constant should be valid")
});
/// Represents the gas cost of each cost token type
///
/// See `crate::metadata::gas` for more documentation.
///
/// Order matters, for the libfunc impl
/// https://github.com/starkware-libs/sequencer/blob/1b7252f8a30244d39614d7666aa113b81291808e/crates/blockifier/src/execution/entry_point_execution.rs#L208
#[derive(Debug, Clone, Copy, Deserialize, Serialize)]
#[repr(C)]
pub struct BuiltinCosts {
pub r#const: u64,
pub pedersen: u64,
pub bitwise: u64,
pub ecop: u64,
pub poseidon: u64,
pub add_mod: u64,
pub mul_mod: u64,
}
impl BuiltinCosts {
pub fn index_for_token_type(token_type: &CostTokenType) -> NativeResult<usize> {
let index = match token_type {
CostTokenType::Const => 0,
CostTokenType::Pedersen => 1,
CostTokenType::Bitwise => 2,
CostTokenType::EcOp => 3,
CostTokenType::Poseidon => 4,
CostTokenType::AddMod => 5,
CostTokenType::MulMod => 6,
_ => native_panic!("matched an unexpected CostTokenType which is not being used"),
};
Ok(index)
}
}
impl Default for BuiltinCosts {
fn default() -> Self {
Self {
r#const: token_gas_cost(CostTokenType::Const) as u64,
pedersen: token_gas_cost(CostTokenType::Pedersen) as u64,
bitwise: token_gas_cost(CostTokenType::Bitwise) as u64,
ecop: token_gas_cost(CostTokenType::EcOp) as u64,
poseidon: token_gas_cost(CostTokenType::Poseidon) as u64,
add_mod: token_gas_cost(CostTokenType::AddMod) as u64,
mul_mod: token_gas_cost(CostTokenType::MulMod) as u64,
}
}
}
impl crate::arch::AbiArgument for BuiltinCosts {
fn to_bytes(
&self,
buffer: &mut Vec<u8>,
find_dict_drop_override: impl Copy
+ Fn(
&cairo_lang_sierra::ids::ConcreteTypeId,
) -> Option<extern "C" fn(*mut std::ffi::c_void)>,
) -> crate::error::Result<()> {
self.r#const.to_bytes(buffer, find_dict_drop_override)?;
self.pedersen.to_bytes(buffer, find_dict_drop_override)?;
self.bitwise.to_bytes(buffer, find_dict_drop_override)?;
self.ecop.to_bytes(buffer, find_dict_drop_override)?;
self.poseidon.to_bytes(buffer, find_dict_drop_override)?;
self.add_mod.to_bytes(buffer, find_dict_drop_override)?;
self.mul_mod.to_bytes(buffer, find_dict_drop_override)?;
Ok(())
}
}
#[cfg(feature = "with-mem-tracing")]
#[allow(unused_imports)]
pub(crate) use self::mem_tracing::{
_wrapped_free as libc_free, _wrapped_malloc as libc_malloc, _wrapped_realloc as libc_realloc,
};
#[cfg(not(feature = "with-mem-tracing"))]
#[allow(unused_imports)]
pub(crate) use libc::{free as libc_free, malloc as libc_malloc, realloc as libc_realloc};
/// Generate a function name.
///
/// If the program includes function identifiers, return those. Otherwise return `f` followed by the
/// identifier number.
pub fn generate_function_name(
function_id: &'_ FunctionId,
is_for_contract_executor: bool,
) -> Cow<'_, str> {
// Generic functions can omit their type in the debug_name, leading to multiple functions
// having the same name, we solve this by adding the id number even if the function has a debug_name
if is_for_contract_executor {
Cow::Owned(format!("f{}", function_id.id))
} else if let Some(name) = function_id.debug_name.as_deref() {
Cow::Owned(format!("{}(f{})", mangle_name(name), function_id.id))
} else {
Cow::Owned(format!("f{}", function_id.id))
}
}
/// Mangles the given function name to ensure safe compilation.
///
/// The compiler generates debug names with symbols that are not compatible in
/// all environments. For example, the GNU linker `ld` doesn't support symbol
/// symbols with the `@` character.
///
/// TODO(#1507): Improve the name mangling algorithm to ensure that name
/// collisions are imposible.
pub fn mangle_name(name: &str) -> String {
name.replace("@", "at")
}
/// Decode an UTF-8 error message replacing invalid bytes with their hexadecimal representation, as
/// done by Python's `x.decode('utf-8', errors='backslashreplace')`.
pub fn decode_error_message(data: &[u8]) -> String {
let mut pos = 0;
utf8_iter::ErrorReportingUtf8Chars::new(data).fold(String::new(), |mut acc, ch| {
match ch {
Ok(ch) => {
acc.push(ch);
pos += ch.len_utf8();
}
Err(_) => {
acc.push_str(&format!("\\x{:02x}", data[pos]));
pos += 1;
}
};
acc
})
}
/// Return the layout for an integer of arbitrary width.
///
/// This assumes the platform's maximum (effective) alignment is 16 bytes, and that every integer
/// with a size in bytes of a power of two has the same alignment as its size.
pub fn get_integer_layout(width: u32) -> Layout {
if width == 0 {
Layout::new::<()>()
} else if width <= 8 {
Layout::new::<u8>()
} else if width <= 16 {
Layout::new::<u16>()
} else if width <= 32 {
Layout::new::<u32>()
} else if width <= 64 {
Layout::new::<u64>()
} else if width <= 128 {
Layout::new::<u128>()
} else {
// According to the docs this should never return an error.
Layout::from_size_align((width as usize).next_multiple_of(8) >> 3, 16)
.expect("layout size rounded up to the next multiple of 16 should never be greater than ISIZE::MAX")
}
}
/// Returns the given entry point if present.
pub fn find_entry_point<'a>(
program: &'a Program,
entry_point: &str,
) -> Option<&'a GenFunction<StatementIdx>> {
program
.funcs
.iter()
.find(|x| x.id.debug_name.as_deref() == Some(entry_point))
}
/// Returns the given entry point if present.
pub fn find_entry_point_by_idx(
program: &Program,
entry_point_idx: usize,
) -> Option<&GenFunction<StatementIdx>> {
program
.funcs
.iter()
.find(|x| x.id.id == entry_point_idx as u64)
}
/// Given a string representing a function name, searches in the program for the id corresponding
/// to said function, and returns a reference to it.
#[track_caller]
pub fn find_function_id<'a>(program: &'a Program, function_name: &str) -> Option<&'a FunctionId> {
program
.funcs
.iter()
.find(|x| x.id.debug_name.as_deref() == Some(function_name))
.map(|func| &func.id)
}
/// Parse a numeric string into felt, wrapping negatives around the prime modulo.
pub fn felt252_str(value: &str) -> Felt {
let value = value
.parse::<BigInt>()
.expect("value must be a digit number");
let value = match value.sign() {
Sign::Minus => &*PRIME - value.magnitude(),
_ => value.magnitude().clone(),
};
value.into()
}
/// Parse any type that can be a bigint to a felt that can be used in the cairo-native input.
pub fn felt252_bigint(value: impl Into<BigInt>) -> Felt {
let value: BigInt = value.into();
let value = match value.sign() {
Sign::Minus => Cow::Owned(&*PRIME - value.magnitude()),
_ => Cow::Borrowed(value.magnitude()),
};
value.as_ref().into()
}
/// Parse a short string into a felt that can be used in the cairo-native input.
pub fn felt252_short_str(value: &str) -> Felt {
let values: Vec<_> = value
.chars()
.filter_map(|c| c.is_ascii().then_some(c as u8))
.collect();
assert!(values.len() < 32, "A felt can't longer than 32 bytes");
Felt::from_bytes_be_slice(&values)
}
/// Creates the execution engine, with all symbols registered.
pub fn create_engine(
module: &Module,
_metadata: &MetadataStorage,
opt_level: OptLevel,
) -> ExecutionEngine {
// Create the JIT engine.
let engine = ExecutionEngine::new(module, opt_level.into(), &[], false);
#[cfg(feature = "with-mem-tracing")]
self::mem_tracing::register_bindings(&engine);
engine
}
pub fn run_pass_manager(context: &Context, module: &mut Module) -> Result<(), Error> {
let pass_manager = PassManager::new(context);
pass_manager.enable_verifier(true);
pass_manager.add_pass(pass::transform::create_canonicalizer());
pass_manager.add_pass(pass::conversion::create_scf_to_control_flow()); // needed because to_llvm doesn't include it.
pass_manager.add_pass(pass::conversion::create_to_llvm());
pass_manager.run(module)
}
/// Return a type that calls a closure when formatted using [Debug](std::fmt::Debug).
pub fn debug_with<F>(fmt: F) -> impl fmt::Debug
where
F: Fn(&mut fmt::Formatter) -> fmt::Result,
{
struct FmtWrapper<F>(F)
where
F: Fn(&mut fmt::Formatter) -> fmt::Result;
impl<F> fmt::Debug for FmtWrapper<F>
where
F: Fn(&mut fmt::Formatter) -> fmt::Result,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0(f)
}
}
FmtWrapper(fmt)
}
/// Edit: Copied from the std lib.
///
/// Returns the amount of padding we must insert after `layout`
/// to ensure that the following address will satisfy `align`
/// (measured in bytes).
///
/// e.g., if `layout.size()` is 9, then `layout.padding_needed_for(4)`
/// returns 3, because that is the minimum number of bytes of
/// padding required to get a 4-aligned address (assuming that the
/// corresponding memory block starts at a 4-aligned address).
///
/// The return value of this function has no meaning if `align` is
/// not a power-of-two.
///
/// Note that the utility of the returned value requires `align`
/// to be less than or equal to the alignment of the starting
/// address for the whole allocated block of memory. One way to
/// satisfy this constraint is to ensure `align <= layout.align()`.
#[inline]
pub const fn padding_needed_for(layout: &Layout, align: usize) -> usize {
let len = layout.size();
// Rounded up value is:
// len_rounded_up = (len + align - 1) & !(align - 1);
// and then we return the padding difference: `len_rounded_up - len`.
//
// We use modular arithmetic throughout:
//
// 1. align is guaranteed to be > 0, so align - 1 is always
// valid.
//
// 2. `len + align - 1` can overflow by at most `align - 1`,
// so the &-mask with `!(align - 1)` will ensure that in the
// case of overflow, `len_rounded_up` will itself be 0.
// Thus the returned padding, when added to `len`, yields 0,
// which trivially satisfies the alignment `align`.
//
// (Of course, attempts to allocate blocks of memory whose
// size and padding overflow in the above manner should cause
// the allocator to yield an error anyway.)
let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
len_rounded_up.wrapping_sub(len)
}
#[derive(Clone, PartialEq, Eq, Debug, Error)]
pub struct LayoutError;
impl Display for LayoutError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("layout error")
}
}
/// Copied from std.
///
/// Creates a layout describing the record for `n` instances of
/// `self`, with a suitable amount of padding between each to
/// ensure that each instance is given its requested size and
/// alignment. On success, returns `(k, offs)` where `k` is the
/// layout of the array and `offs` is the distance between the start
/// of each element in the array.
///
/// On arithmetic overflow, returns `LayoutError`.
//#[unstable(feature = "alloc_layout_extra", issue = "55724")]
#[inline]
pub fn layout_repeat(layout: &Layout, n: usize) -> Result<(Layout, usize), LayoutError> {
// This cannot overflow. Quoting from the invariant of Layout:
// > `size`, when rounded up to the nearest multiple of `align`,
// > must not overflow isize (i.e., the rounded value must be
// > less than or equal to `isize::MAX`)
let padded_size = layout.size() + padding_needed_for(layout, layout.align());
let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError)?;
// The safe constructor is called here to enforce the isize size limit.
let layout = Layout::from_size_align(alloc_size, layout.align()).map_err(|_| LayoutError)?;
Ok((layout, padded_size))
}
/// Returns the total layout size for the given types.
pub fn get_types_total_size(
types_ids: &[ConcreteTypeId],
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
) -> crate::error::Result<usize> {
let mut total_size = 0;
for type_id in types_ids {
let type_concrete = registry.get_type(type_id)?;
let layout = type_concrete.layout(registry)?;
total_size += layout.size();
}
Ok(total_size)
}
#[cfg(test)]
mod tests {
use std::{
fmt::{self, Formatter},
io::Write,
path::Path,
};
use super::Felt;
use crate::utils::{
debug_with, felt252_short_str, felt252_str, find_entry_point, find_entry_point_by_idx,
find_function_id, generate_function_name, get_integer_layout, testing::cairo_to_sierra,
};
use cairo_lang_sierra::{
ids::FunctionId,
program::{FunctionSignature, GenFunction, Program, StatementIdx},
};
// ==============================
// == TESTS: get_integer_layout
// ==============================
/// Ensures that the host's `u8` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u8() {
assert_eq!(get_integer_layout(8).align(), 1);
}
/// Ensures that the host's `u16` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u16() {
assert_eq!(get_integer_layout(16).align(), 2);
}
/// Ensures that the host's `u32` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u32() {
assert_eq!(get_integer_layout(32).align(), 4);
}
/// Ensures that the host's `u64` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u64() {
assert_eq!(get_integer_layout(64).align(), 8);
}
/// Ensures that the host's `u128` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u128() {
assert_eq!(get_integer_layout(128).align(), 16);
}
/// Ensures that the host's `u256` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u256() {
assert_eq!(get_integer_layout(256).align(), 16);
}
/// Ensures that the host's `u512` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_u512() {
assert_eq!(get_integer_layout(512).align(), 16);
}
/// Ensures that the host's `Felt` is compatible with its compiled counterpart.
#[test]
fn test_alignment_compatibility_felt() {
assert_eq!(get_integer_layout(252).align(), 16);
}
// ==============================
// == TESTS: find_entry_point
// ==============================
#[test]
fn test_find_entry_point_with_empty_program() {
let program = Program {
type_declarations: vec![],
libfunc_declarations: vec![],
statements: vec![],
funcs: vec![],
};
let entry_point = find_entry_point(&program, "entry_point");
assert!(entry_point.is_none());
}
#[test]
fn test_entry_point_not_found() {
let program = Program {
type_declarations: vec![],
libfunc_declarations: vec![],
statements: vec![],
funcs: vec![GenFunction {
id: FunctionId {
id: 0,
debug_name: Some("not_entry_point".into()),
},
signature: FunctionSignature {
ret_types: vec![],
param_types: vec![],
},
params: vec![],
entry_point: StatementIdx(0),
}],
};
let entry_point = find_entry_point(&program, "entry_point");
assert!(entry_point.is_none());
}
#[test]
fn test_entry_point_found() {
let program = Program {
type_declarations: vec![],
libfunc_declarations: vec![],
statements: vec![],
funcs: vec![GenFunction {
id: FunctionId {
id: 0,
debug_name: Some("entry_point".into()),
},
signature: FunctionSignature {
ret_types: vec![],
param_types: vec![],
},
params: vec![],
entry_point: StatementIdx(0),
}],
};
let entry_point = find_entry_point(&program, "entry_point");
assert!(entry_point.is_some());
assert_eq!(entry_point.unwrap().id.id, 0);
}
// ====================================
// == TESTS: find_entry_point_by_idx
// ====================================
#[test]
fn test_find_entry_point_by_idx_with_empty_program() {
let program = Program {
type_declarations: vec![],
libfunc_declarations: vec![],
statements: vec![],
funcs: vec![],
};
let entry_point = find_entry_point_by_idx(&program, 0);
assert!(entry_point.is_none());
}
#[test]
fn test_entry_point_not_found_by_id() {
let program = Program {
type_declarations: vec![],
libfunc_declarations: vec![],
statements: vec![],
funcs: vec![GenFunction {
id: FunctionId {
id: 0,
debug_name: Some("some_name".into()),
},
signature: FunctionSignature {
ret_types: vec![],
param_types: vec![],
},
params: vec![],
entry_point: StatementIdx(0),
}],
};
let entry_point = find_entry_point_by_idx(&program, 1);
assert!(entry_point.is_none());
}
#[test]
fn test_entry_point_found_by_id() {
let program = Program {
type_declarations: vec![],
libfunc_declarations: vec![],
statements: vec![],
funcs: vec![GenFunction {
id: FunctionId {
id: 15,
debug_name: Some("some_name".into()),
},
signature: FunctionSignature {
ret_types: vec![],
param_types: vec![],
},
params: vec![],
entry_point: StatementIdx(0),
}],
};
let entry_point = find_entry_point_by_idx(&program, 15);
assert!(entry_point.is_some());
assert_eq!(entry_point.unwrap().id.id, 15);
}
#[test]
fn decode_error_message() {
// Checkout [issue 795](https://github.com/lambdaclass/cairo_native/issues/795) for context.
assert_eq!(
super::decode_error_message(&[
97, 114, 103, 101, 110, 116, 47, 109, 117, 108, 116, 105, 99, 97, 108, 108, 45,
102, 97, 105, 108, 101, 100, 3, 232, 78, 97, 116, 105, 118, 101, 32, 101, 120, 101,
99, 117, 116, 105, 111, 110, 32, 101, 114, 114, 111, 114, 58, 32, 69, 114, 114,
111, 114, 32, 97, 116, 32, 112, 99, 61, 48, 58, 49, 48, 52, 58, 10, 71, 111, 116,
32, 97, 110, 32, 101, 120, 99, 101, 112, 116, 105, 111, 110, 32, 119, 104, 105,
108, 101, 32, 101, 120, 101, 99, 117, 116, 105, 110, 103, 32, 97, 32, 104, 105,
110, 116, 58, 32, 69, 114, 114, 111, 114, 32, 97, 116, 32, 112, 99, 61, 48, 58, 49,
56, 52, 58, 10, 71, 111, 116, 32, 97, 110, 32, 101, 120, 99, 101, 112, 116, 105,
111, 110, 32, 119, 104, 105, 108, 101, 32, 101, 120, 101, 99, 117, 116, 105, 110,
103, 32, 97, 32, 104, 105, 110, 116, 58, 32, 69, 120, 99, 101, 101, 100, 101, 100,
32, 116, 104, 101, 32, 109, 97, 120, 105, 109, 117, 109, 32, 110, 117, 109, 98,
101, 114, 32, 111, 102, 32, 101, 118, 101, 110, 116, 115, 44, 32, 110, 117, 109,
98, 101, 114, 32, 101, 118, 101, 110, 116, 115, 58, 32, 49, 48, 48, 49, 44, 32,
109, 97, 120, 32, 110, 117, 109, 98, 101, 114, 32, 101, 118, 101, 110, 116, 115,
58, 32, 49, 48, 48, 48, 46, 10, 67, 97, 105, 114, 111, 32, 116, 114, 97, 99, 101,
98, 97, 99, 107, 32, 40, 109, 111, 115, 116, 32, 114, 101, 99, 101, 110, 116, 32,
99, 97, 108, 108, 32, 108, 97, 115, 116, 41, 58, 10, 85, 110, 107, 110, 111, 119,
110, 32, 108, 111, 99, 97, 116, 105, 111, 110, 32, 40, 112, 99, 61, 48, 58, 49, 52,
51, 52, 41, 10, 85, 110, 107, 110, 111, 119, 110, 32, 108, 111, 99, 97, 116, 105,
111, 110, 32, 40, 112, 99, 61, 48, 58, 49, 51, 57, 53, 41, 10, 85, 110, 107, 110,
111, 119, 110, 32, 108, 111, 99, 97, 116, 105, 111, 110, 32, 40, 112, 99, 61, 48,
58, 57, 53, 51, 41, 10, 85, 110, 107, 110, 111, 119, 110, 32, 108, 111, 99, 97,
116, 105, 111, 110, 32, 40, 112, 99, 61, 48, 58, 51, 51, 57, 41, 10, 10, 67, 97,
105, 114, 111, 32, 116, 114, 97, 99, 101, 98, 97, 99, 107, 32, 40, 109, 111, 115,
116, 32, 114, 101, 99, 101, 110, 116, 32, 99, 97, 108, 108, 32, 108, 97, 115, 116,
41, 58, 10, 85, 110, 107, 110, 111, 119, 110, 32, 108, 111, 99, 97, 116, 105, 111,
110, 32, 40, 112, 99, 61, 48, 58, 49, 54, 55, 56, 41, 10, 85, 110, 107, 110, 111,
119, 110, 32, 108, 111, 99, 97, 116, 105, 111, 110, 32, 40, 112, 99, 61, 48, 58,
49, 54, 54, 52, 41, 10
]),
"argent/multicall-failed\x03\\xe8Native execution error: Error at pc=0:104:\nGot an exception while executing a hint: Error at pc=0:184:\nGot an exception while executing a hint: Exceeded the maximum number of events, number events: 1001, max number events: 1000.\nCairo traceback (most recent call last):\nUnknown location (pc=0:1434)\nUnknown location (pc=0:1395)\nUnknown location (pc=0:953)\nUnknown location (pc=0:339)\n\nCairo traceback (most recent call last):\nUnknown location (pc=0:1678)\nUnknown location (pc=0:1664)\n",
);
}
// ==============================
// == TESTS: felt252_str
// ==============================
#[test]
#[should_panic(expected = "value must be a digit number")]
fn test_felt252_str_invalid_input() {
let value = "not_a_number";
felt252_str(value);
}
#[test]
fn test_felt252_str_positive_number() {
let value = "123";
let result = felt252_str(value);
assert_eq!(result, 123.into());
}
#[test]
fn test_felt252_str_negative_number() {
let value = "-123";
let result = felt252_str(value);
assert_eq!(
result,
Felt::from_dec_str(
"3618502788666131213697322783095070105623107215331596699973092056135872020358"
)
.unwrap()
);
}
#[test]
fn test_felt252_str_zero() {
let value = "0";
let result = felt252_str(value);
assert_eq!(result, Felt::ZERO);
}
// ==============================
// == TESTS: felt252_short_str
// ==============================
#[test]
fn test_felt252_short_str_short_numeric_string() {
let value = "12345";
let result = felt252_short_str(value);
assert_eq!(result, 211295614005u64.into());
}
#[test]
fn test_felt252_short_str_short_string_with_non_numeric_characters() {
let value = "hello";
let result = felt252_short_str(value);
assert_eq!(result, 448378203247u64.into());
}
#[test]
#[should_panic]
fn test_felt252_short_str_long_numeric_string() {
felt252_short_str("1234567890123456789012345678901234567890");
}
#[test]
fn test_felt252_short_str_empty_string() {
let value = "";
let result = felt252_short_str(value);
assert_eq!(result, Felt::ZERO);
}
#[test]
fn test_felt252_short_str_string_with_non_ascii_characters() {
let value = "h€llø";
let result = felt252_short_str(value);
assert_eq!(result, 6843500.into());
}
// ==============================
// == TESTS: debug_with
// ==============================
#[test]
fn test_debug_with_empty_closure() {
let closure = |_f: &mut Formatter| -> fmt::Result { Ok(()) };
let debug_wrapper = debug_with(closure);
assert_eq!(format!("{:?}", debug_wrapper), "");
}
#[test]
#[should_panic]
fn test_debug_with_error_closure() {
let closure = |_f: &mut Formatter| -> Result<(), fmt::Error> { Err(fmt::Error) };
let debug_wrapper = debug_with(closure);
let _ = format!("{:?}", debug_wrapper);
}
#[test]
fn test_debug_with_simple_closure() {
let closure = |f: &mut fmt::Formatter| write!(f, "Hello, world!");
let debug_wrapper = debug_with(closure);
assert_eq!(format!("{:?}", debug_wrapper), "Hello, world!");
}
#[test]
fn test_debug_with_complex_closure() {
let closure = |f: &mut fmt::Formatter| write!(f, "Name: William, Age: {}", 28);
let debug_wrapper = debug_with(closure);
assert_eq!(format!("{:?}", debug_wrapper), "Name: William, Age: 28");
}
#[test]
fn test_generate_function_name_debug_name() {
let function_id = FunctionId {
id: 123,
debug_name: Some("function_name".into()),
};
assert_eq!(
generate_function_name(&function_id, false),
"function_name(f123)"
);
}
#[test]
fn test_generate_function_name_debug_name_for_contract_executor() {
let function_id = FunctionId {
id: 123,
debug_name: Some("function_name".into()),
};
assert_eq!(generate_function_name(&function_id, true), "f123");
}
#[test]
fn test_generate_function_name_without_debug_name() {
let function_id = FunctionId {
id: 123,
debug_name: None,
};
assert_eq!(generate_function_name(&function_id, false), "f123");
}
#[test]
fn test_cairo_to_sierra_path() {
// Define the path to the cairo program.
let program_path = Path::new("programs/examples/hello.cairo");
// Compile the cairo program to sierra.
let sierra_program = cairo_to_sierra(program_path).unwrap();
// Define the entry point function for comparison.
let entry_point = "hello::hello::greet";
// Find the function ID of the entry point function in the sierra program.
let entry_point_id = find_function_id(&sierra_program, entry_point).unwrap();
// Assert that the debug name of the entry point function matches the expected value.
assert_eq!(
entry_point_id.debug_name,
Some("hello::hello::greet".into())
);
}
#[test]
fn test_cairo_to_sierra_source() {
// Define the content of the cairo program as a string.
let content = "type u8 = u8;";
// Create a named temporary file and write the content to it.
let mut file = tempfile::NamedTempFile::new().unwrap();
file.write_all(content.as_bytes()).unwrap();
// Get the path of the temporary file.
let file_path = file.path().to_path_buf();
// Compile the cairo program to sierra using the path of the temporary file.
let sierra_program = cairo_to_sierra(&file_path).unwrap();
// Assert that the sierra program has no library function declarations, statements, or functions.
assert!(sierra_program.libfunc_declarations.is_empty());
assert!(sierra_program.statements.is_empty());
assert!(sierra_program.funcs.is_empty());
// Assert that the debug name of the first type declaration matches the expected value.
assert_eq!(sierra_program.type_declarations.len(), 1);
assert_eq!(
sierra_program.type_declarations[0].id.debug_name,
Some("u8".into())
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/runtime.rs | src/runtime.rs | #![allow(non_snake_case)]
use crate::utils::BuiltinCosts;
use cairo_lang_sierra_gas::core_libfunc_cost::{
DICT_SQUASH_REPEATED_ACCESS_COST, DICT_SQUASH_UNIQUE_KEY_COST,
};
use itertools::Itertools;
use lazy_static::lazy_static;
use num_bigint::BigInt;
use num_traits::{ToPrimitive, Zero};
use rand::Rng;
use starknet_curve::curve_params::BETA;
use starknet_types_core::{
curve::{AffinePoint, ProjectivePoint},
felt::Felt,
hash::StarkHash,
};
use std::{
alloc::{dealloc, realloc, Layout},
cell::Cell,
collections::{hash_map::Entry, HashMap},
ffi::{c_int, c_void},
fs::File,
io::Write,
mem::{forget, ManuallyDrop},
ops::Shl,
os::fd::FromRawFd,
ptr,
rc::Rc,
};
use std::{ops::Mul, vec::IntoIter};
lazy_static! {
pub static ref HALF_PRIME: Felt = Felt::from_dec_str(
"1809251394333065606848661391547535052811553607665798349986546028067936010240"
)
.unwrap();
pub static ref DICT_GAS_REFUND_PER_ACCESS: u64 =
(DICT_SQUASH_UNIQUE_KEY_COST.cost() - DICT_SQUASH_REPEATED_ACCESS_COST.cost()) as u64;
}
/// Based on `cairo-lang-runner`'s implementation.
///
/// Source: <https://github.com/starkware-libs/cairo/blob/main/crates/cairo-lang-runner/src/casm_run/mod.rs#L1946-L1948>
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__debug__print(
target_fd: i32,
data: *const [u8; 32],
len: u32,
) -> i32 {
// Avoid closing `stdout` on all branches.
let mut target = ManuallyDrop::new(File::from_raw_fd(target_fd));
let mut items = Vec::with_capacity(len as usize);
for i in 0..len as usize {
let mut data = *data.add(i);
data[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let value = Felt::from_bytes_le(&data);
items.push(value);
}
let value = format_for_debug(items.into_iter());
if write!(target, "{}", value).is_err() {
return 1;
};
0
}
/// Compute `pedersen(lhs, rhs)` and store it into `dst`.
///
/// All its operands need the values in big endian.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__pedersen(
dst: &mut [u8; 32],
lhs: &[u8; 32],
rhs: &[u8; 32],
) {
// Extract arrays from the pointers.
let mut lhs = *lhs;
let mut rhs = *rhs;
lhs[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
rhs[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
// Convert to FieldElement.
let lhs = Felt::from_bytes_le(&lhs);
let rhs = Felt::from_bytes_le(&rhs);
// Compute pedersen hash and copy the result into `dst`.
let res = starknet_types_core::hash::Pedersen::hash(&lhs, &rhs);
*dst = res.to_bytes_le();
}
/// Compute `hades_permutation(op0, op1, op2)` and replace the operands with the results.
///
/// All operands need the values in big endian.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__hades_permutation(
op0: &mut [u8; 32],
op1: &mut [u8; 32],
op2: &mut [u8; 32],
) {
op0[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
op1[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
op2[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
// Convert to FieldElement.
let mut state = [
Felt::from_bytes_le(op0),
Felt::from_bytes_le(op1),
Felt::from_bytes_le(op2),
];
// Compute Poseidon permutation.
starknet_types_core::hash::Poseidon::hades_permutation(&mut state);
// Write back the results.
*op0 = state[0].to_bytes_le();
*op1 = state[1].to_bytes_le();
*op2 = state[2].to_bytes_le();
}
/// Felt252 type used in cairo native runtime
#[derive(Debug)]
pub struct FeltDict {
pub mappings: HashMap<[u8; 32], usize>,
pub layout: Layout,
pub elements: *mut (),
pub drop_fn: Option<extern "C" fn(*mut c_void)>,
pub count: u64,
}
impl Drop for FeltDict {
fn drop(&mut self) {
// Free the entries manually.
if let Some(drop_fn) = self.drop_fn {
for (_, &index) in self.mappings.iter() {
let value_ptr = unsafe {
self.elements
.byte_add(self.layout.pad_to_align().size() * index)
};
drop_fn(value_ptr.cast());
}
}
// Free the value data.
if !self.elements.is_null() {
unsafe {
dealloc(
self.elements.cast(),
Layout::from_size_align_unchecked(
self.layout.pad_to_align().size() * self.mappings.capacity(),
self.layout.align(),
),
)
};
}
}
}
/// Allocate a new dictionary.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__dict_new(
size: u64,
align: u64,
drop_fn: Option<extern "C" fn(*mut c_void)>,
) -> *const FeltDict {
Rc::into_raw(Rc::new(FeltDict {
mappings: HashMap::default(),
layout: Layout::from_size_align_unchecked(size as usize, align as usize),
elements: ptr::null_mut(),
drop_fn,
count: 0,
}))
}
/// Free a dictionary using an optional callback to drop each element.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
// Note: Using `Option<extern "C" fn(*mut c_void)>` is ffi-safe thanks to Option's null
// pointer optimization. Check out
// https://doc.rust-lang.org/nomicon/ffi.html#the-nullable-pointer-optimization for more info.
pub unsafe extern "C" fn cairo_native__dict_drop(ptr: *const FeltDict) {
drop(Rc::from_raw(ptr));
}
/// Duplicate a dictionary using a provided callback to clone each element.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__dict_dup(dict_ptr: *const FeltDict) -> *const FeltDict {
let old_dict = Rc::from_raw(dict_ptr);
let new_dict = Rc::clone(&old_dict);
forget(old_dict);
Rc::into_raw(new_dict)
}
/// Return a pointer to the entry's value pointer for a given key, inserting a null pointer if not
/// present. Increment the access count.
///
/// The null pointer will be either updated by `felt252_dict_entry_finalize` or removed (along with
/// everything else in the dict) by the entry's drop implementation.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__dict_get(
dict_ptr: *const FeltDict,
key: &[u8; 32],
value_ptr: *mut *mut c_void,
) -> c_int {
let dict_rc = Rc::from_raw(dict_ptr);
// there may me multiple reference to the same dictionary (snapshots), but
// as snapshots cannot access the inner dictionary, then it is safe to modify it
// without cloning it.
let dict = Rc::as_ptr(&dict_rc)
.cast_mut()
.as_mut()
.expect("rc inner pointer should never be null");
let num_mappings = dict.mappings.len();
let has_capacity = num_mappings != dict.mappings.capacity();
let (is_present, index) = match dict.mappings.entry(*key) {
Entry::Occupied(entry) => (true, *entry.get()),
Entry::Vacant(entry) => {
entry.insert(num_mappings);
(false, num_mappings)
}
};
// Maybe realloc (conditions: !has_capacity && !is_present).
if !has_capacity && !is_present {
dict.elements = realloc(
dict.elements.cast(),
Layout::from_size_align_unchecked(
dict.layout.pad_to_align().size() * dict.mappings.len(),
dict.layout.align(),
),
dict.layout.pad_to_align().size() * dict.mappings.capacity(),
)
.cast();
}
*value_ptr = dict
.elements
.byte_add(dict.layout.pad_to_align().size() * index)
.cast();
dict.count += 1;
forget(dict_rc);
is_present as c_int
}
/// Simulates the felt252_dict_squash libfunc.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__dict_squash(
dict_ptr: *const FeltDict,
range_check_ptr: &mut u64,
gas_ptr: &mut u64,
) {
let dict = Rc::from_raw(dict_ptr);
*gas_ptr +=
(dict.count.saturating_sub(dict.mappings.len() as u64)) * *DICT_GAS_REFUND_PER_ACCESS;
// Squashing a dictionary always uses the range check builtin at least twice.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L131-L136
*range_check_ptr += 2;
let no_big_keys = dict
.mappings
.keys()
.map(Felt::from_bytes_le)
.all(|key| key < Felt::from(BigInt::from(1).shl(128)));
let number_of_keys = dict.mappings.len() as u64;
// How we update the range check depends on whether we have any big key or not.
// - If there are no big keys, every unique key increases the range check by 3.
// - If there are big keys:
// - the first unique key increases the range check by 2.
// - the remaining unique keys increase the range check by 6.
// The sierra-to-casm implementation calls the `SquashDict` after some initial validation.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L159
//
// For each unique key, the `SquashDictInner` function is called, which
// loops over all accesses to that key. At the end, the function calls
// itself recursively until all keys have been iterated.
// If there are no big keys, the first range check usage is done by the
// caller of the inner function, which implies that it appears in two places:
// 1a. Once in `SquashDict`, right before calling the inner function for the first time.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L326
// 1b. Once at the end of `SquashDictInner`, right before recursing.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L507
if no_big_keys {
*range_check_ptr += number_of_keys;
}
// The next two range check usages are done always inside of the inner
// function (regardless of whether we have big keys or not).
// 2. https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L416
// 3. https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L480
*range_check_ptr += 2 * number_of_keys;
// If there are big keys, then we use the range check 4 additional times per key, except for the first key.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs#L669-L674
if !no_big_keys && number_of_keys > 1 {
*range_check_ptr += 4 * (number_of_keys - 1);
}
// For each non unique accessed key, we increase the range check an additional time.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L602
*range_check_ptr += dict.count.saturating_sub(dict.mappings.len() as u64);
forget(dict);
}
/// Compute `ec_point_from_x_nz(x)` and store it.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__ec__ec_point_from_x_nz(
point_ptr: &mut [[u8; 32]; 2],
) -> bool {
point_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let x = Felt::from_bytes_le(&point_ptr[0]);
// https://github.com/starkware-libs/cairo/blob/aaad921bba52e729dc24ece07fab2edf09ccfa15/crates/cairo-lang-sierra-to-casm/src/invocations/ec.rs#L63
let x2 = x * x;
let x3 = x2 * x;
let alpha_x_plus_beta = x + BETA;
let rhs = x3 + alpha_x_plus_beta;
// https://github.com/starkware-libs/cairo/blob/9b603b88c2e5a98eec1bb8f323260b7765e94911/crates/cairo-lang-runner/src/casm_run/mod.rs#L1825
let y = rhs
.sqrt()
.unwrap_or_else(|| (Felt::THREE * rhs).sqrt().unwrap());
let y = y.min(-y);
match AffinePoint::new(x, y) {
Ok(point) => {
point_ptr[1] = point.y().to_bytes_le();
true
}
Err(_) => false,
}
}
/// Compute `ec_point_try_new_nz(x)`.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__ec__ec_point_try_new_nz(
point_ptr: &mut [[u8; 32]; 2],
) -> bool {
point_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
point_ptr[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let x = Felt::from_bytes_le(&point_ptr[0]);
let y = Felt::from_bytes_le(&point_ptr[1]);
match AffinePoint::new(x, y) {
Ok(point) => {
point_ptr[0] = point.x().to_bytes_le();
point_ptr[1] = point.y().to_bytes_le();
true
}
Err(_) => false,
}
}
/// Compute `ec_state_init()` and store the state back.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__ec__ec_state_init(state_ptr: &mut [[u8; 32]; 4]) {
// https://github.com/starkware-libs/cairo/blob/aaad921bba52e729dc24ece07fab2edf09ccfa15/crates/cairo-lang-runner/src/casm_run/mod.rs#L1802
let mut rng = rand::rng();
let (random_x, random_y) = loop {
// Randominzing 31 bytes to make sure is in range.
let x_bytes: [u8; 31] = rng.random();
let random_x = Felt::from_bytes_be_slice(&x_bytes);
let random_y_squared = random_x * random_x * random_x + random_x + BETA;
if let Some(random_y) = random_y_squared.sqrt() {
break (random_x, random_y);
}
};
// We already made sure its a valid point.
let state = AffinePoint::new_unchecked(random_x, random_y);
state_ptr[0] = state.x().to_bytes_le();
state_ptr[1] = state.y().to_bytes_le();
state_ptr[2] = state_ptr[0];
state_ptr[3] = state_ptr[1];
}
/// Compute `ec_state_add(state, point)` and store the state back.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__ec__ec_state_add(
state_ptr: &mut [[u8; 32]; 4],
point_ptr: &[[u8; 32]; 2],
) {
state_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
state_ptr[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let mut point_ptr = *point_ptr;
point_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
point_ptr[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
// We use unchecked methods because the inputs must already be valid points.
let mut state = ProjectivePoint::from_affine_unchecked(
Felt::from_bytes_le(&state_ptr[0]),
Felt::from_bytes_le(&state_ptr[1]),
);
let point = AffinePoint::new_unchecked(
Felt::from_bytes_le(&point_ptr[0]),
Felt::from_bytes_le(&point_ptr[1]),
);
state += &point;
let state = state.to_affine().unwrap();
state_ptr[0] = state.x().to_bytes_le();
state_ptr[1] = state.y().to_bytes_le();
}
/// Compute `ec_state_add_mul(state, scalar, point)` and store the state back.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__ec__ec_state_add_mul(
state_ptr: &mut [[u8; 32]; 4],
scalar_ptr: &[u8; 32],
point_ptr: &[[u8; 32]; 2],
) {
state_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
state_ptr[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let mut point_ptr = *point_ptr;
point_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
point_ptr[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
let mut scalar_ptr = *scalar_ptr;
scalar_ptr[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
// Here the points should already be checked as valid, so we can use unchecked.
let mut state = ProjectivePoint::from_affine_unchecked(
Felt::from_bytes_le(&state_ptr[0]),
Felt::from_bytes_le(&state_ptr[1]),
);
let point = ProjectivePoint::from_affine_unchecked(
Felt::from_bytes_le(&point_ptr[0]),
Felt::from_bytes_le(&point_ptr[1]),
);
let scalar = Felt::from_bytes_le(&scalar_ptr);
state += &point.mul(scalar);
let state = state.to_affine().unwrap();
state_ptr[0] = state.x().to_bytes_le();
state_ptr[1] = state.y().to_bytes_le();
}
/// Compute `ec_state_try_finalize_nz(state)` and store the result.
///
/// # Panics
///
/// This function will panic if either operand is out of range for a felt.
///
/// # Safety
///
/// This function is intended to be called from MLIR, deals with pointers, and is therefore
/// definitely unsafe to use manually.
pub unsafe extern "C" fn cairo_native__libfunc__ec__ec_state_try_finalize_nz(
point_ptr: &mut [[u8; 32]; 2],
state_ptr: &[[u8; 32]; 4],
) -> bool {
let mut state_ptr = *state_ptr;
state_ptr[0][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
state_ptr[1][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
state_ptr[2][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
state_ptr[3][31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
// We use unchecked methods because the inputs must already be valid points.
let state = ProjectivePoint::from_affine_unchecked(
Felt::from_bytes_le(&state_ptr[0]),
Felt::from_bytes_le(&state_ptr[1]),
);
let random = ProjectivePoint::from_affine_unchecked(
Felt::from_bytes_le(&state_ptr[2]),
Felt::from_bytes_le(&state_ptr[3]),
);
if state.x() == random.x() && state.y() == random.y() {
false
} else {
let point = &state - &random;
let point = point.to_affine().unwrap();
point_ptr[0] = point.x().to_bytes_le();
point_ptr[1] = point.y().to_bytes_le();
true
}
}
thread_local! {
pub(crate) static BUILTIN_COSTS: Cell<BuiltinCosts> = const {
// These default values shouldn't be accessible, they will be overriden before entering
// compiled code.
Cell::new(BuiltinCosts {
r#const: 0,
pedersen: 0,
bitwise: 0,
ecop: 0,
poseidon: 0,
add_mod: 0,
mul_mod: 0,
})
};
}
/// Get the costs builtin from the internal thread local.
pub extern "C" fn cairo_native__get_costs_builtin() -> *const [u64; 7] {
BUILTIN_COSTS.with(|x| x.as_ptr()) as *const [u64; 7]
}
// Utility methods for the print runtime function
/// Formats the given felts as a debug string.
fn format_for_debug(mut felts: IntoIter<Felt>) -> String {
let mut items = Vec::new();
while let Some(item) = format_next_item(&mut felts) {
items.push(item);
}
if let [item] = &items[..] {
if item.is_string {
return item.item.clone();
}
}
items
.into_iter()
.map(|item| {
if item.is_string {
format!("{}\n", item.item)
} else {
format!("[DEBUG]\t{}\n", item.item)
}
})
.join("")
}
/// A formatted string representation of anything formattable (e.g. ByteArray, felt, short-string).
pub struct FormattedItem {
/// The formatted string representing the item.
item: String,
/// Whether the item is a string.
is_string: bool,
}
impl FormattedItem {
/// Returns the formatted item as is.
#[must_use]
pub fn get(self) -> String {
self.item
}
/// Wraps the formatted item with quote, if it's a string. Otherwise returns it as is.
#[must_use]
pub fn quote_if_string(self) -> String {
if self.is_string {
format!("\"{}\"", self.item)
} else {
self.item
}
}
}
pub const BYTE_ARRAY_MAGIC: &str =
"46a6158a16a947e5916b2a2ca68501a45e93d7110e81aa2d6438b1c57c879a3";
pub const BYTES_IN_WORD: usize = 31;
/// Formats a string or a short string / `felt252`. Returns the formatted string and a boolean
/// indicating whether it's a string. If can't format the item, returns None.
pub fn format_next_item<T>(values: &mut T) -> Option<FormattedItem>
where
T: Iterator<Item = Felt> + Clone,
{
let first_felt = values.next()?;
if first_felt == Felt::from_hex(BYTE_ARRAY_MAGIC).unwrap() {
if let Some(string) = try_format_string(values) {
return Some(FormattedItem {
item: string,
is_string: true,
});
}
}
Some(FormattedItem {
item: format_short_string(&first_felt),
is_string: false,
})
}
/// Formats a `Felt252`, as a short string if possible.
fn format_short_string(value: &Felt) -> String {
let hex_value = value.to_biguint();
match as_cairo_short_string(value) {
Some(as_string) => format!("{hex_value:#x} ('{as_string}')"),
None => format!("{hex_value:#x}"),
}
}
/// Tries to format a string, represented as a sequence of `Felt252`s.
/// If the sequence is not a valid serialization of a `ByteArray`, returns None and doesn't change the
/// given iterator (`values`).
fn try_format_string<T>(values: &mut T) -> Option<String>
where
T: Iterator<Item = Felt> + Clone,
{
// Clone the iterator and work with the clone. If the extraction of the string is successful,
// change the original iterator to the one we worked with. If not, continue with the
// original iterator at the original point.
let mut cloned_values_iter = values.clone();
let num_full_words = cloned_values_iter.next()?.to_usize()?;
let full_words = cloned_values_iter
.by_ref()
.take(num_full_words)
.collect_vec();
let pending_word = cloned_values_iter.next()?;
let pending_word_len = cloned_values_iter.next()?.to_usize()?;
let full_words_string = full_words
.into_iter()
.map(|word| as_cairo_short_string_ex(&word, BYTES_IN_WORD))
.collect::<Option<Vec<String>>>()?
.join("");
let pending_word_string = as_cairo_short_string_ex(&pending_word, pending_word_len)?;
// Extraction was successful, change the original iterator to the one we worked with.
*values = cloned_values_iter;
Some(format!("{full_words_string}{pending_word_string}"))
}
/// Converts a bigint representing a felt252 to a Cairo short-string.
#[must_use]
pub fn as_cairo_short_string(value: &Felt) -> Option<String> {
let mut as_string = String::default();
let mut is_end = false;
for byte in value.to_biguint().to_bytes_be() {
if byte == 0 {
is_end = true;
} else if is_end {
return None;
} else if byte.is_ascii_graphic() || byte.is_ascii_whitespace() {
as_string.push(byte as char);
} else {
return None;
}
}
Some(as_string)
}
/// Converts a bigint representing a felt252 to a Cairo short-string of the given length.
/// Nulls are allowed and length must be <= 31.
#[must_use]
pub fn as_cairo_short_string_ex(value: &Felt, length: usize) -> Option<String> {
if length == 0 {
return if value.is_zero() {
Some(String::new())
} else {
None
};
}
if length > 31 {
// A short string can't be longer than 31 bytes.
return None;
}
// We pass through biguint as felt252.to_bytes_be() does not trim leading zeros.
let bytes = value.to_biguint().to_bytes_be();
let bytes_len = bytes.len();
if bytes_len > length {
// `value` has more bytes than expected.
return None;
}
let mut as_string = String::new();
for byte in bytes {
if byte == 0 {
as_string.push_str(r"\0");
} else if byte.is_ascii_graphic() || byte.is_ascii_whitespace() {
as_string.push(byte as char);
} else {
as_string.push_str(format!(r"\x{:02x}", byte).as_str());
}
}
// `to_bytes_be` misses starting nulls. Prepend them as needed.
let missing_nulls = length - bytes_len;
as_string.insert_str(0, &r"\0".repeat(missing_nulls));
Some(as_string)
}
#[cfg(test)]
mod tests {
use super::*;
use std::{
env, fs,
io::{Read, Seek},
os::fd::AsRawFd,
};
pub fn felt252_short_str(value: &str) -> Felt {
let values: Vec<_> = value
.chars()
.filter_map(|c| c.is_ascii().then_some(c as u8))
.collect();
assert!(values.len() < 32);
Felt::from_bytes_be_slice(&values)
}
#[test]
fn test_debug_print() {
let dir = env::temp_dir();
fs::remove_file(dir.join("print.txt")).ok();
let mut file = File::create_new(dir.join("print.txt")).unwrap();
{
let fd = file.as_raw_fd();
let data = felt252_short_str("hello world");
let data = data.to_bytes_le();
unsafe { cairo_native__libfunc__debug__print(fd, &data, 1) };
}
file.seek(std::io::SeekFrom::Start(0)).unwrap();
let mut result = String::new();
file.read_to_string(&mut result).unwrap();
assert_eq!(
result,
"[DEBUG]\t0x68656c6c6f20776f726c64 ('hello world')\n"
);
}
#[test]
fn test_pederesen() {
let mut dst = [0; 32];
let lhs = Felt::from(1).to_bytes_le();
let rhs = Felt::from(3).to_bytes_le();
unsafe {
cairo_native__libfunc__pedersen(&mut dst, &lhs, &rhs);
}
assert_eq!(
dst,
[
84, 98, 174, 134, 3, 124, 237, 179, 166, 110, 159, 98, 170, 35, 83, 237, 130, 154,
236, 0, 205, 134, 200, 185, 39, 92, 0, 228, 132, 217, 130, 5
]
)
}
#[test]
fn test_hades_permutation() {
let mut op0 = Felt::from(1).to_bytes_le();
let mut op1 = Felt::from(1).to_bytes_le();
let mut op2 = Felt::from(1).to_bytes_le();
unsafe {
cairo_native__libfunc__hades_permutation(&mut op0, &mut op1, &mut op2);
}
assert_eq!(
Felt::from_bytes_le(&op0),
Felt::from_hex("0x4ebdde1149fcacbb41e4fc342432a48c97994fd045f432ad234ae9279269779")
.unwrap()
);
assert_eq!(
Felt::from_bytes_le(&op1),
Felt::from_hex("0x7f4cec57dd08b69414f7de7dffa230fc90fa3993673c422408af05831e0cc98")
.unwrap()
);
assert_eq!(
Felt::from_bytes_le(&op2),
Felt::from_hex("0x5b5d00fd09caade43caffe70527fa84d5d9cd51e22c2ce115693ecbb5854d6a")
.unwrap()
);
}
#[test]
fn test_dict() {
let dict = unsafe {
cairo_native__dict_new(size_of::<u64>() as u64, align_of::<u64>() as u64, None)
};
let key = Felt::ONE.to_bytes_le();
let mut ptr = ptr::null_mut::<u64>();
assert_eq!(
unsafe { cairo_native__dict_get(dict, &key, (&raw mut ptr).cast()) },
0,
);
assert!(!ptr.is_null());
unsafe { *ptr = 24 };
assert_eq!(
unsafe { cairo_native__dict_get(dict, &key, (&raw mut ptr).cast()) },
1,
);
assert!(!ptr.is_null());
assert_eq!(unsafe { *ptr }, 24);
unsafe { *ptr = 42 };
let mut range_check = 0;
let mut gas = 0;
unsafe { cairo_native__dict_squash(dict, &mut range_check, &mut gas) };
assert_eq!(gas, 4050);
let cloned_dict = unsafe { cairo_native__dict_dup(dict) };
unsafe { cairo_native__dict_drop(dict) };
assert_eq!(
unsafe { cairo_native__dict_get(cloned_dict, &key, (&raw mut ptr).cast()) },
1,
);
assert!(!ptr.is_null());
assert_eq!(unsafe { *ptr }, 42);
unsafe { cairo_native__dict_drop(cloned_dict) };
}
#[test]
fn test_ec__ec_point() {
let mut state = [
Felt::ZERO.to_bytes_le(),
Felt::ZERO.to_bytes_le(),
Felt::ZERO.to_bytes_le(),
Felt::ZERO.to_bytes_le(),
];
unsafe { cairo_native__libfunc__ec__ec_state_init(&mut state) };
let points: &mut [[u8; 32]; 2] = (&mut state[..2]).try_into().unwrap();
let result = unsafe { cairo_native__libfunc__ec__ec_point_try_new_nz(points) };
// point should be valid since it was made with state init
assert!(result);
}
#[test]
fn test_ec__ec_point_add() {
// Test values taken from starknet-rs
let mut state = [
Felt::from_dec_str(
"874739451078007766457464989774322083649278607533249481151382481072868806602",
)
.unwrap()
.to_bytes_le(),
Felt::from_dec_str(
"152666792071518830868575557812948353041420400780739481342941381225525861407",
)
.unwrap()
.to_bytes_le(),
Felt::from_dec_str(
"874739451078007766457464989774322083649278607533249481151382481072868806602",
)
.unwrap()
.to_bytes_le(),
Felt::from_dec_str(
"152666792071518830868575557812948353041420400780739481342941381225525861407",
)
.unwrap()
.to_bytes_le(),
];
let point = [
Felt::from_dec_str(
"874739451078007766457464989774322083649278607533249481151382481072868806602",
)
.unwrap()
.to_bytes_le(),
Felt::from_dec_str(
"152666792071518830868575557812948353041420400780739481342941381225525861407",
)
.unwrap()
.to_bytes_le(),
];
unsafe {
cairo_native__libfunc__ec__ec_state_add(&mut state, &point);
};
assert_eq!(
state[0],
Felt::from_dec_str(
"3324833730090626974525872402899302150520188025637965566623476530814354734325",
)
.unwrap()
.to_bytes_le()
);
assert_eq!(
state[1],
Felt::from_dec_str(
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/executor.rs | src/executor.rs | //! # Executors
//!
//! This module provides methods to execute the programs, either via JIT or compiled ahead
//! of time. It also provides a cache to avoid recompiling previously compiled programs.
pub use self::{aot::AotNativeExecutor, contract::AotContractExecutor, jit::JitNativeExecutor};
use crate::{
arch::{AbiArgument, ValueWithInfoWrapper},
error::{panic::ToNativeAssertError, Error},
execution_result::{
BuiltinStats, ExecutionResult, ADD_MOD_BUILTIN_SIZE, BITWISE_BUILTIN_SIZE,
EC_OP_BUILTIN_SIZE, MUL_MOD_BUILTIN_SIZE, PEDERSEN_BUILTIN_SIZE, POSEIDON_BUILTIN_SIZE,
RANGE_CHECK96_BUILTIN_SIZE, RANGE_CHECK_BUILTIN_SIZE, SEGMENT_ARENA_BUILTIN_SIZE,
},
native_panic,
runtime::BUILTIN_COSTS,
starknet::{handler::StarknetSyscallHandlerCallbacks, StarknetSyscallHandler},
types::TypeBuilder,
utils::{libc_free, BuiltinCosts, RangeExt},
values::Value,
};
use bumpalo::Bump;
use cairo_lang_sierra::{
extensions::{
circuit::CircuitTypeConcrete,
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
starknet::StarknetTypeConcrete,
ConcreteType,
},
ids::ConcreteTypeId,
program::FunctionSignature,
program_registry::ProgramRegistry,
};
use libc::c_void;
use num_bigint::BigInt;
use num_traits::One;
use std::{alloc::Layout, arch::global_asm, ptr::NonNull};
mod aot;
mod contract;
mod jit;
#[cfg(target_arch = "aarch64")]
global_asm!(include_str!("arch/aarch64.s"));
#[cfg(target_arch = "x86_64")]
global_asm!(include_str!("arch/x86_64.s"));
extern "C" {
/// Invoke an AOT or JIT-compiled function.
///
/// The `ret_ptr` argument is only used when the first argument (the actual return pointer) is
/// unused. Used for u8, u16, u32, u64, u128 and felt252, but not for arrays, enums or structs.
#[cfg_attr(not(target_os = "macos"), link_name = "_invoke_trampoline")]
fn invoke_trampoline(
fn_ptr: *const c_void,
args_ptr: *const u64,
args_len: usize,
ret_ptr: *mut u64,
);
}
/// Internal method.
///
/// Invokes the given function by constructing the function call depending on the arguments given.
/// Usually calling a function requires knowing it's signature at compile time, but we need to be
/// able to call any given function provided it's signatue (arguments and return type) at runtime,
/// to do so we have a "trampoline" in the given platform assembly (x86_64, aarch64) which
/// constructs the function call in place.
///
/// To pass the arguments, they are stored in a arena.
#[allow(clippy::too_many_arguments)]
fn invoke_dynamic(
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
function_ptr: *const c_void,
function_signature: &FunctionSignature,
args: &[Value],
gas: u64,
mut syscall_handler: Option<impl StarknetSyscallHandler>,
find_dict_drop_override: impl Copy + Fn(&ConcreteTypeId) -> Option<extern "C" fn(*mut c_void)>,
) -> Result<ExecutionResult, Error> {
tracing::info!("Invoking function with signature: {function_signature:?}.");
let arena = Bump::new();
let mut invoke_data = Vec::<u8>::new();
// Generate return pointer (if necessary).
//
// Generated when either:
// - There are more than one non-zst return values.
// - All builtins except GasBuiltin and Starknet are ZST.
// - The unit struct is a ZST.
// - The return argument is complex.
let mut ret_types_iter = function_signature
.ret_types
.iter()
.filter_map(|id| {
let type_info = match registry.get_type(id) {
Ok(x) => x,
Err(e) => return Some(Err(e.into())),
};
let is_zst = match type_info.is_zst(registry) {
Ok(x) => x,
Err(e) => return Some(Err(e)),
};
Ok((!(type_info.is_builtin() && is_zst)).then_some(id)).transpose()
})
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.peekable();
let num_return_args = ret_types_iter.clone().count();
// If there is more than one return value, or the return value is _complex_,
// as defined by the architecture ABI, then we pass a return pointer as
// the first argument to the program entrypoint.
let mut return_ptr = if num_return_args > 1
|| ret_types_iter
.peek()
.map(|id| registry.get_type(id)?.is_complex(registry))
.transpose()?
== Some(true)
{
// The return pointer should be able to hold all the return values.
let layout = ret_types_iter.try_fold(Layout::new::<()>(), |layout, id| {
let type_info = registry.get_type(id)?;
Result::<_, Error>::Ok(layout.extend(type_info.layout(registry)?)?.0)
})?;
let return_ptr = arena.alloc_layout(layout).cast::<()>();
return_ptr
.as_ptr()
.to_bytes(&mut invoke_data, |_| unreachable!())?;
Some(return_ptr)
} else {
None
};
// The Cairo compiler doesn't specify that the cheatcode syscall needs the syscall handler,
// so we must always allocate it in case it needs it, regardless of whether it's passed
// as an argument to the entry point or not.
let mut syscall_handler = syscall_handler
.as_mut()
.map(|syscall_handler| StarknetSyscallHandlerCallbacks::new(syscall_handler));
// We only care for the previous syscall handler if we actually modify it
#[cfg(feature = "with-cheatcode")]
let syscall_handler_guard = syscall_handler
.as_mut()
.map(|syscall_handler| SyscallHandlerGuard::install(syscall_handler as *mut _));
// We may be inside a recursive contract, save the possible saved builtin costs to restore it after our call.
let builtin_costs = BuiltinCosts::default();
let builtin_costs_guard = BuiltinCostsGuard::install(builtin_costs);
// Generate argument list.
let mut iter = args.iter();
for item in function_signature.param_types.iter().filter_map(|type_id| {
let type_info = match registry.get_type(type_id) {
Ok(x) => x,
Err(e) => return Some(Err(e.into())),
};
match type_info.is_zst(registry) {
Ok(x) => (!x).then_some(Ok((type_id, type_info))),
Err(e) => Some(Err(e)),
}
}) {
let (type_id, type_info) = item?;
// Process gas requirements and syscall handler.
match type_info {
CoreTypeConcrete::GasBuiltin(_) => {
gas.to_bytes(&mut invoke_data, |_| unreachable!())?
}
CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_)) => {
let syscall_handler = syscall_handler
.as_mut()
.to_native_assert_error("syscall handler should be available")?;
(syscall_handler as *mut StarknetSyscallHandlerCallbacks<_>)
.to_bytes(&mut invoke_data, |_| unreachable!())?;
}
CoreTypeConcrete::BuiltinCosts(_) => {
builtin_costs.to_bytes(&mut invoke_data, |_| unreachable!())?;
}
type_info if type_info.is_builtin() => {
0u64.to_bytes(&mut invoke_data, |_| unreachable!())?
}
type_info => ValueWithInfoWrapper {
value: iter
.next()
.to_native_assert_error("entrypoint argument is missing")?,
type_id,
info: type_info,
arena: &arena,
registry,
}
.to_bytes(&mut invoke_data, find_dict_drop_override)?,
}
}
// Pad invoke data to the 16 byte boundary avoid segfaults.
#[cfg(target_arch = "aarch64")]
const REGISTER_BYTES: usize = 64;
#[cfg(target_arch = "x86_64")]
const REGISTER_BYTES: usize = 48;
if invoke_data.len() > REGISTER_BYTES {
invoke_data.resize(
REGISTER_BYTES + (invoke_data.len() - REGISTER_BYTES).next_multiple_of(16),
0,
);
}
// Invoke the trampoline.
#[cfg(target_arch = "x86_64")]
let mut ret_registers = [0; 2];
#[cfg(target_arch = "aarch64")]
let mut ret_registers = [0; 4];
#[allow(unused_mut)]
let mut run_trampoline = || unsafe {
invoke_trampoline(
function_ptr,
invoke_data.as_ptr().cast(),
invoke_data.len() >> 3,
ret_registers.as_mut_ptr(),
);
};
#[cfg(feature = "with-segfault-catcher")]
crate::utils::safe_runner::run_safely(run_trampoline).map_err(Error::SafeRunner)?;
#[cfg(not(feature = "with-segfault-catcher"))]
run_trampoline();
// Restore the previous syscall handler and builtin costs.
#[cfg(feature = "with-cheatcode")]
drop(syscall_handler_guard);
drop(builtin_costs_guard);
// Parse final gas.
unsafe fn read_value<T>(ptr: &mut NonNull<()>) -> &T {
let align_offset = ptr
.cast::<u8>()
.as_ptr()
.align_offset(std::mem::align_of::<T>());
let value_ptr = ptr.cast::<u8>().as_ptr().add(align_offset).cast::<T>();
*ptr = NonNull::new_unchecked(value_ptr.add(1)).cast();
&*value_ptr
}
let mut remaining_gas = None;
let mut builtin_stats = BuiltinStats::default();
for type_id in &function_signature.ret_types {
let type_info = registry.get_type(type_id)?;
match type_info {
CoreTypeConcrete::GasBuiltin(_) => {
remaining_gas = Some(match &mut return_ptr {
Some(return_ptr) => unsafe { *read_value::<u64>(return_ptr) },
None => {
// If there's no return ptr then the function only returned the gas. We don't
// need to bother with the syscall handler builtin.
ret_registers[0]
}
});
}
CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_)) => {
if let Some(return_ptr) = &mut return_ptr {
unsafe {
let ptr = return_ptr.cast::<*mut ()>();
*return_ptr = NonNull::new_unchecked(ptr.as_ptr().add(1)).cast();
}
}
}
_ if type_info.is_builtin() => {
if !type_info.is_zst(registry)? {
if let CoreTypeConcrete::BuiltinCosts(_) = type_info {
// todo: should we use this value? See: https://github.com/lambdaclass/cairo_native/issues/1219
let _value = match &mut return_ptr {
Some(return_ptr) => unsafe { *read_value::<*mut u64>(return_ptr) },
None => ret_registers[0] as *mut u64,
};
} else {
let value = match &mut return_ptr {
Some(return_ptr) => unsafe { *read_value::<u64>(return_ptr) },
None => ret_registers[0],
} as usize;
match type_info {
CoreTypeConcrete::RangeCheck(_) => {
builtin_stats.range_check = value / RANGE_CHECK_BUILTIN_SIZE
}
CoreTypeConcrete::Pedersen(_) => {
builtin_stats.pedersen = value / PEDERSEN_BUILTIN_SIZE
}
CoreTypeConcrete::Bitwise(_) => {
builtin_stats.bitwise = value / BITWISE_BUILTIN_SIZE
}
CoreTypeConcrete::EcOp(_) => {
builtin_stats.ec_op = value / EC_OP_BUILTIN_SIZE
}
CoreTypeConcrete::Poseidon(_) => {
builtin_stats.poseidon = value / POSEIDON_BUILTIN_SIZE
}
CoreTypeConcrete::SegmentArena(_) => {
builtin_stats.segment_arena = value / SEGMENT_ARENA_BUILTIN_SIZE
}
CoreTypeConcrete::RangeCheck96(_) => {
builtin_stats.range_check96 = value / RANGE_CHECK96_BUILTIN_SIZE
}
CoreTypeConcrete::Circuit(CircuitTypeConcrete::AddMod(_)) => {
builtin_stats.add_mod = value / ADD_MOD_BUILTIN_SIZE
}
CoreTypeConcrete::Circuit(CircuitTypeConcrete::MulMod(_)) => {
builtin_stats.mul_mod = value / MUL_MOD_BUILTIN_SIZE
}
_ => native_panic!("given type should be a builtin: {type_id:?}"),
}
}
}
}
_ => break,
}
}
// Parse return values.
let return_value = function_signature
.ret_types
.last()
.and_then(|ret_type| {
let type_info = match registry.get_type(ret_type) {
Ok(x) => x,
Err(e) => return Some(Err(e.into())),
};
if type_info.is_builtin() {
None
} else {
Some(parse_result(ret_type, registry, return_ptr, ret_registers))
}
})
.transpose()?
.unwrap_or_else(|| Value::Struct {
fields: vec![],
debug_name: None,
});
#[cfg(feature = "with-mem-tracing")]
crate::utils::mem_tracing::report_stats();
Ok(ExecutionResult {
remaining_gas,
return_value,
builtin_stats,
})
}
#[cfg(feature = "with-cheatcode")]
#[derive(Debug)]
struct SyscallHandlerGuard(*mut ());
#[cfg(feature = "with-cheatcode")]
impl SyscallHandlerGuard {
// NOTE: It is the caller's responsibility to ensure that the syscall handler is alive until the
// guard is dropped.
pub fn install<T>(value: *mut T) -> Self {
let previous_value = crate::starknet::SYSCALL_HANDLER_VTABLE.get();
let syscall_handler_ptr = value as *mut ();
crate::starknet::SYSCALL_HANDLER_VTABLE.set(syscall_handler_ptr);
Self(previous_value)
}
}
#[cfg(feature = "with-cheatcode")]
impl Drop for SyscallHandlerGuard {
fn drop(&mut self) {
crate::starknet::SYSCALL_HANDLER_VTABLE.set(self.0);
}
}
#[derive(Debug)]
struct BuiltinCostsGuard(BuiltinCosts);
impl BuiltinCostsGuard {
pub fn install(value: BuiltinCosts) -> Self {
Self(BUILTIN_COSTS.replace(value))
}
}
impl Drop for BuiltinCostsGuard {
fn drop(&mut self) {
BUILTIN_COSTS.set(self.0);
}
}
/// Parses the result by reading from the return ptr the given type.
fn parse_result(
type_id: &ConcreteTypeId,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
mut return_ptr: Option<NonNull<()>>,
#[cfg(target_arch = "x86_64")] mut ret_registers: [u64; 2],
#[cfg(target_arch = "aarch64")] mut ret_registers: [u64; 4],
) -> Result<Value, Error> {
let type_info = registry.get_type(type_id)?;
// Align the pointer to the actual return value.
if let Some(return_ptr) = &mut return_ptr {
let layout = type_info.layout(registry)?;
let align_offset = return_ptr
.cast::<u8>()
.as_ptr()
.align_offset(layout.align());
*return_ptr = unsafe {
NonNull::new(return_ptr.cast::<u8>().as_ptr().add(align_offset))
.to_native_assert_error("return pointer should not be null")?
.cast()
};
}
match type_info {
CoreTypeConcrete::Array(_) => Ok(Value::from_ptr(
return_ptr.to_native_assert_error("return pointer should be valid")?,
type_id,
registry,
true,
)?),
CoreTypeConcrete::Box(info) => unsafe {
let ptr =
return_ptr.unwrap_or_else(|| NonNull::new_unchecked(ret_registers[0] as *mut ()));
let value = Value::from_ptr(ptr, &info.ty, registry, true)?;
libc_free(ptr.cast().as_ptr());
Ok(value)
},
CoreTypeConcrete::EcPoint(_) | CoreTypeConcrete::EcState(_) => Ok(Value::from_ptr(
return_ptr.to_native_assert_error("return pointer should be valid")?,
type_id,
registry,
true,
)?),
CoreTypeConcrete::Felt252(_)
| CoreTypeConcrete::Starknet(
StarknetTypeConcrete::ClassHash(_)
| StarknetTypeConcrete::ContractAddress(_)
| StarknetTypeConcrete::StorageAddress(_)
| StarknetTypeConcrete::StorageBaseAddress(_),
) => match return_ptr {
Some(return_ptr) => Ok(Value::from_ptr(return_ptr, type_id, registry, true)?),
None => {
#[cfg(target_arch = "x86_64")]
// Since x86_64's return values hold at most two different 64bit registers,
// everything bigger than u128 will be returned by memory, therefore making
// this branch is unreachable on that architecture.
return Err(Error::ParseAttributeError);
#[cfg(target_arch = "aarch64")]
Ok(Value::Felt252({
let data = unsafe {
std::mem::transmute::<&mut [u64; 4], &mut [u8; 32]>(&mut ret_registers)
};
data[31] &= 0x0F; // Filter out first 4 bits (they're outside an i252).
starknet_types_core::felt::Felt::from_bytes_le(data)
}))
}
},
CoreTypeConcrete::Bytes31(_) => match return_ptr {
Some(return_ptr) => Ok(Value::from_ptr(return_ptr, type_id, registry, true)?),
None => {
#[cfg(target_arch = "x86_64")]
// Since x86_64's return values hold at most two different 64bit registers,
// everything bigger than u128 will be returned by memory, therefore making
// this branch is unreachable on that architecture.
return Err(Error::ParseAttributeError);
#[cfg(target_arch = "aarch64")]
Ok(Value::Bytes31(unsafe {
*std::mem::transmute::<&[u64; 4], &[u8; 31]>(&ret_registers)
}))
}
},
CoreTypeConcrete::BoundedInt(info) => match return_ptr {
Some(return_ptr) => Ok(Value::from_ptr(return_ptr, type_id, registry, true)?),
None => {
let mut data = if info.range.offset_bit_width() <= 64 {
BigInt::from(ret_registers[0])
} else {
BigInt::from(((ret_registers[1] as u128) << 64) | ret_registers[0] as u128)
};
data &= (BigInt::one() << info.range.offset_bit_width()) - BigInt::one();
data += &info.range.lower;
Ok(Value::BoundedInt {
value: data.into(),
range: info.range.clone(),
})
}
},
CoreTypeConcrete::Uint8(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint8(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Uint8(ret_registers[0] as u8)),
},
CoreTypeConcrete::Uint16(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint16(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Uint16(ret_registers[0] as u16)),
},
CoreTypeConcrete::Uint32(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint32(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Uint32(ret_registers[0] as u32)),
},
CoreTypeConcrete::Uint64(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint64(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Uint64(ret_registers[0])),
},
CoreTypeConcrete::Uint128(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint128(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Uint128(
((ret_registers[1] as u128) << 64) | ret_registers[0] as u128,
)),
},
CoreTypeConcrete::Sint8(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Sint8(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Sint8(ret_registers[0] as i8)),
},
CoreTypeConcrete::Sint16(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Sint16(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Sint16(ret_registers[0] as i16)),
},
CoreTypeConcrete::Sint32(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Sint32(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Sint32(ret_registers[0] as i32)),
},
CoreTypeConcrete::Sint64(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint64(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Sint64(ret_registers[0] as i64)),
},
CoreTypeConcrete::Sint128(_) => match return_ptr {
Some(return_ptr) => Ok(Value::Uint128(unsafe { *return_ptr.cast().as_ref() })),
None => Ok(Value::Sint128(
((ret_registers[1] as i128) << 64) | ret_registers[0] as i128,
)),
},
CoreTypeConcrete::NonZero(info) => {
parse_result(&info.ty, registry, return_ptr, ret_registers)
}
CoreTypeConcrete::Nullable(info) => unsafe {
let ptr = return_ptr.map_or(ret_registers[0] as *mut (), |x| {
*x.cast::<*mut ()>().as_ref()
});
if ptr.is_null() {
Ok(Value::Null)
} else {
let ptr = NonNull::new_unchecked(ptr);
let value = Value::from_ptr(ptr, &info.ty, registry, true)?;
libc_free(ptr.as_ptr().cast());
Ok(value)
}
},
CoreTypeConcrete::Enum(info) => {
let (_, tag_layout, variant_layouts) =
crate::types::r#enum::get_layout_for_variants(registry, &info.variants)?;
let (tag, ptr) = if type_info.is_memory_allocated(registry)? || return_ptr.is_some() {
let ptr = return_ptr.to_native_assert_error("return pointer should be valid")?;
let tag = unsafe {
match tag_layout.size() {
0 => 0,
1 => *ptr.cast::<u8>().as_ref() as usize,
2 => *ptr.cast::<u16>().as_ref() as usize,
4 => *ptr.cast::<u32>().as_ref() as usize,
8 => *ptr.cast::<u64>().as_ref() as usize,
_ => return Err(Error::ParseAttributeError),
}
};
// Filter out bits that are not part of the enum's tag.
let tag = tag
& 1usize
.wrapping_shl(info.variants.len().next_power_of_two().trailing_zeros())
.wrapping_sub(1);
(
tag,
Ok(unsafe {
NonNull::new_unchecked(
ptr.cast::<u8>()
.as_ptr()
.add(tag_layout.extend(variant_layouts[tag])?.1),
)
.cast()
}),
)
} else {
match info.variants.len() {
0 | 1 => (0, Err(0)),
_ => (
match tag_layout.size() {
1 => ret_registers[0] as u8 as usize,
2 => ret_registers[0] as u16 as usize,
4 => ret_registers[0] as u32 as usize,
8 => ret_registers[0] as usize,
_ => return Err(Error::ParseAttributeError),
},
Err(1),
),
}
};
let value = match ptr {
Ok(ptr) => Box::new(Value::from_ptr(ptr, &info.variants[tag], registry, true)?),
Err(offset) => {
ret_registers.copy_within(offset.., 0);
Box::new(parse_result(
&info.variants[tag],
registry,
None,
ret_registers,
)?)
}
};
Ok(Value::Enum {
tag,
value,
debug_name: Some(type_info.info().long_id.to_string()),
})
}
CoreTypeConcrete::Struct(info) => {
if info.members.is_empty() {
Ok(Value::Struct {
fields: Vec::new(),
debug_name: Some(type_info.info().long_id.to_string()),
})
} else {
Ok(Value::from_ptr(
return_ptr.to_native_assert_error("return pointer should be valid")?,
type_id,
registry,
true,
)?)
}
}
CoreTypeConcrete::Felt252Dict(_) | CoreTypeConcrete::SquashedFelt252Dict(_) => unsafe {
let ptr = return_ptr
.unwrap_or_else(|| NonNull::new_unchecked((&raw mut ret_registers[0]) as *mut ()));
Ok(Value::from_ptr(ptr, type_id, registry, true)?)
},
CoreTypeConcrete::Snapshot(info) => {
parse_result(&info.ty, registry, return_ptr, ret_registers)
}
// Builtins are handled before the call to parse_result
// and should not be reached here.
CoreTypeConcrete::Bitwise(_)
| CoreTypeConcrete::Const(_)
| CoreTypeConcrete::EcOp(_)
| CoreTypeConcrete::GasBuiltin(_)
| CoreTypeConcrete::BuiltinCosts(_)
| CoreTypeConcrete::RangeCheck(_)
| CoreTypeConcrete::Pedersen(_)
| CoreTypeConcrete::Poseidon(_)
| CoreTypeConcrete::SegmentArena(_)
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_)) => {
native_panic!("builtins should have been handled before")
}
CoreTypeConcrete::Felt252DictEntry(_)
| CoreTypeConcrete::Span(_)
| CoreTypeConcrete::Uninitialized(_)
| CoreTypeConcrete::Coupon(_)
| CoreTypeConcrete::Starknet(_)
| CoreTypeConcrete::Uint128MulGuarantee(_)
| CoreTypeConcrete::Circuit(_)
| CoreTypeConcrete::RangeCheck96(_) => {
native_panic!("range check 96 not yet implemented as results")
}
// 2.9.0
CoreTypeConcrete::IntRange(_) => native_panic!("int range not yet implemented as results"),
// 2.11.1
CoreTypeConcrete::Blake(_) => native_panic!("blake not yet implemented as results"),
// 2.12.0
CoreTypeConcrete::QM31(_) => native_panic!("qm31 not yet implemented as results"),
CoreTypeConcrete::GasReserve(_) => {
native_panic!("gas reserve not yet implemented as results")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
context::NativeContext, load_cairo, load_starknet, starknet_stub::StubSyscallHandler,
OptLevel,
};
use cairo_lang_sierra::program::Program;
use rstest::*;
use starknet_types_core::felt::Felt;
#[fixture]
fn program() -> Program {
let (_, program) = load_cairo! {
use starknet::{SyscallResultTrait, get_block_hash_syscall};
fn run_test() -> felt252 {
42
}
fn get_block_hash() -> felt252 {
get_block_hash_syscall(1).unwrap_syscall()
}
};
program
}
#[fixture]
fn starknet_program() -> Program {
let (_, program) = load_starknet! {
#[starknet::interface]
trait ISimpleStorage<TContractState> {
fn get(self: @TContractState) -> u128;
}
#[starknet::contract]
mod contract {
#[storage]
struct Storage {}
#[abi(embed_v0)]
impl ISimpleStorageImpl of super::ISimpleStorage<ContractState> {
fn get(self: @ContractState) -> u128 {
42
}
}
}
};
program
}
#[rstest]
fn test_invoke_dynamic_aot_native_executor(program: Program) {
let native_context = NativeContext::new();
let module = native_context
.compile(&program, false, Some(Default::default()), None)
.expect("failed to compile context");
let executor = AotNativeExecutor::from_native_module(module, OptLevel::default()).unwrap();
// The first function in the program is `run_test`.
let entrypoint_function_id = &program.funcs.first().expect("should have a function").id;
let result = executor
.invoke_dynamic(entrypoint_function_id, &[], Some(u64::MAX))
.unwrap();
assert_eq!(result.return_value, Value::Felt252(Felt::from(42)));
}
#[rstest]
fn test_invoke_dynamic_jit_native_executor(program: Program) {
let native_context = NativeContext::new();
let module = native_context
.compile(&program, false, None, None)
.expect("failed to compile context");
let executor = JitNativeExecutor::from_native_module(module, OptLevel::default()).unwrap();
// The first function in the program is `run_test`.
let entrypoint_function_id = &program.funcs.first().expect("should have a function").id;
let result = executor
.invoke_dynamic(entrypoint_function_id, &[], Some(u64::MAX))
.unwrap();
assert_eq!(result.return_value, Value::Felt252(Felt::from(42)));
}
#[rstest]
fn test_invoke_contract_dynamic_aot(starknet_program: Program) {
let native_context = NativeContext::new();
let module = native_context
.compile(&starknet_program, false, Some(Default::default()), None)
.expect("failed to compile context");
let executor = AotNativeExecutor::from_native_module(module, OptLevel::default()).unwrap();
let entrypoint_function_id = &starknet_program
.funcs
.iter()
.find(|f| {
f.id.debug_name
.as_ref()
.map(|name| name.contains("__wrapper__ISimpleStorageImpl__get"))
.unwrap_or_default()
})
.expect("should have a function")
.id;
let result = executor
.invoke_contract_dynamic(
entrypoint_function_id,
&[],
Some(u64::MAX),
&mut StubSyscallHandler::default(),
)
.unwrap();
assert_eq!(result.return_values, vec![Felt::from(42)]);
}
#[rstest]
fn test_invoke_contract_dynamic_jit(starknet_program: Program) {
let native_context = NativeContext::new();
let module = native_context
.compile(&starknet_program, false, Some(Default::default()), None)
.expect("failed to compile context");
let executor = JitNativeExecutor::from_native_module(module, OptLevel::default()).unwrap();
let entrypoint_function_id = &starknet_program
.funcs
.iter()
.find(|f| {
f.id.debug_name
.as_ref()
.map(|name| name.contains("__wrapper__ISimpleStorageImpl__get"))
.unwrap_or_default()
})
.expect("should have a function")
.id;
let result = executor
.invoke_contract_dynamic(
entrypoint_function_id,
&[],
Some(u64::MAX),
&mut StubSyscallHandler::default(),
)
.unwrap();
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/cache.rs | src/cache.rs | pub use self::{aot::AotProgramCache, jit::JitProgramCache};
use std::hash::Hash;
pub mod aot;
pub mod jit;
#[derive(Debug)]
pub enum ProgramCache<'a, K>
where
K: PartialEq + Eq + Hash,
{
Aot(AotProgramCache<'a, K>),
Jit(JitProgramCache<'a, K>),
}
impl<'a, K> From<AotProgramCache<'a, K>> for ProgramCache<'a, K>
where
K: PartialEq + Eq + Hash,
{
fn from(value: AotProgramCache<'a, K>) -> Self {
Self::Aot(value)
}
}
impl<'a, K> From<JitProgramCache<'a, K>> for ProgramCache<'a, K>
where
K: PartialEq + Eq + Hash,
{
fn from(value: JitProgramCache<'a, K>) -> Self {
Self::Jit(value)
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/context.rs | src/context.rs | use crate::{
clone_option_mut,
error::{panic::ToNativeAssertError, Error},
ffi::{get_data_layout_rep, get_target_triple},
metadata::{gas::GasMetadata, runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
module::NativeModule,
native_assert,
statistics::Statistics,
utils::{run_pass_manager, walk_ir::walk_mlir_operations},
};
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
program::Program,
program_registry::ProgramRegistry,
};
use cairo_lang_sierra_to_casm::metadata::MetadataComputationConfig;
use cairo_lang_sierra_type_size::ProgramRegistryInfo;
use llvm_sys::target::{
LLVM_InitializeAllAsmPrinters, LLVM_InitializeAllTargetInfos, LLVM_InitializeAllTargetMCs,
LLVM_InitializeAllTargets,
};
use melior::{
dialect::DialectRegistry,
ir::{
attribute::StringAttribute,
operation::{OperationBuilder, OperationPrintingFlags},
Attribute, AttributeLike, Block, Identifier, Location, Module, Region,
},
utility::{register_all_dialects, register_all_llvm_translations, register_all_passes},
Context,
};
use mlir_sys::{
mlirDisctinctAttrCreate, mlirLLVMDICompileUnitAttrGet, mlirLLVMDIFileAttrGet,
mlirLLVMDIModuleAttrGet, MlirLLVMDIEmissionKind_MlirLLVMDIEmissionKindFull,
MlirLLVMDINameTableKind_MlirLLVMDINameTableKindDefault,
};
use std::{sync::OnceLock, time::Instant};
/// Context of IRs, dialects and passes for Cairo programs compilation.
#[derive(Debug, Eq, PartialEq)]
pub struct NativeContext {
context: Context,
}
unsafe impl Send for NativeContext {}
unsafe impl Sync for NativeContext {}
impl Default for NativeContext {
fn default() -> Self {
Self::new()
}
}
impl NativeContext {
pub fn new() -> Self {
let context = initialize_mlir();
Self { context }
}
pub const fn context(&self) -> &Context {
&self.context
}
/// Compiles a sierra program into MLIR and then lowers to LLVM.
/// Returns the corresponding NativeModule struct.
///
/// If `ignore_debug_names` is true then debug names will not be added to function names.
/// Mainly useful for the ContractExecutor.
pub fn compile(
&'_ self,
program: &Program,
ignore_debug_names: bool,
gas_metadata_config: Option<MetadataComputationConfig>,
stats: Option<&mut Statistics>,
) -> Result<NativeModule<'_>, Error> {
static INITIALIZED: OnceLock<()> = OnceLock::new();
INITIALIZED.get_or_init(|| unsafe {
LLVM_InitializeAllTargets();
LLVM_InitializeAllTargetInfos();
LLVM_InitializeAllTargetMCs();
LLVM_InitializeAllAsmPrinters();
tracing::debug!("initialized llvm targets");
});
let target_triple = get_target_triple();
let module_region = Region::new();
module_region.append_block(Block::new(&[]));
let data_layout_ret = &get_data_layout_rep()?;
let di_unit_id = unsafe {
let id = StringAttribute::new(&self.context, "compile_unit_id").to_raw();
mlirDisctinctAttrCreate(id)
};
let op = OperationBuilder::new(
"builtin.module",
Location::fused(
&self.context,
&[Location::new(&self.context, "program.sierra", 0, 0)],
{
let file_attr = unsafe {
Attribute::from_raw(mlirLLVMDIFileAttrGet(
self.context.to_raw(),
StringAttribute::new(&self.context, "program.sierra").to_raw(),
StringAttribute::new(&self.context, "").to_raw(),
))
};
unsafe {
let di_unit = mlirLLVMDICompileUnitAttrGet(
self.context.to_raw(),
di_unit_id,
0x1c, // rust
file_attr.to_raw(),
StringAttribute::new(&self.context, "cairo-native").to_raw(),
false,
MlirLLVMDIEmissionKind_MlirLLVMDIEmissionKindFull,
MlirLLVMDINameTableKind_MlirLLVMDINameTableKindDefault,
);
let context = &self.context;
let di_module = mlirLLVMDIModuleAttrGet(
context.to_raw(),
file_attr.to_raw(),
di_unit,
StringAttribute::new(context, "LLVMDialectModule").to_raw(),
StringAttribute::new(context, "").to_raw(),
StringAttribute::new(context, "").to_raw(),
StringAttribute::new(context, "").to_raw(),
0,
false,
);
Attribute::from_raw(di_module)
}
},
),
)
.add_attributes(&[
(
Identifier::new(&self.context, "llvm.target_triple"),
StringAttribute::new(&self.context, &target_triple).into(),
),
(
Identifier::new(&self.context, "llvm.data_layout"),
StringAttribute::new(&self.context, data_layout_ret).into(),
),
])
.add_regions([module_region])
.build()?;
native_assert!(op.verify(), "module operation should be valid");
let mut module = Module::from_operation(op)
.to_native_assert_error("value should be module operation")?;
let mut metadata = MetadataStorage::new();
// Make the runtime library available.
metadata.insert(RuntimeBindingsMeta::default());
// We assume that GasMetadata will be always present when the program uses the gas builtin.
let gas_metadata = GasMetadata::new(
program,
&ProgramRegistryInfo::new(program)?,
gas_metadata_config,
)?;
// Unwrapping here is not necessary since the insertion will only fail if there was
// already some metadata of the same type.
metadata.insert(gas_metadata);
#[cfg(feature = "with-libfunc-profiling")]
metadata.insert(crate::metadata::profiler::ProfilerMeta::new());
// Create the Sierra program registry
let registry = ProgramRegistry::<CoreType, CoreLibfunc>::new(program)?;
let pre_sierra_to_mlir_instant = Instant::now();
crate::compile(
&self.context,
&module,
program,
®istry,
&mut metadata,
unsafe { Attribute::from_raw(di_unit_id) },
ignore_debug_names,
clone_option_mut!(stats),
)?;
let sierra_to_mlir_time = pre_sierra_to_mlir_instant.elapsed().as_millis();
if let Some(&mut ref mut stats) = stats {
stats.compilation_sierra_to_mlir_time_ms = Some(sierra_to_mlir_time);
}
if let Ok(x) = std::env::var("NATIVE_DEBUG_DUMP") {
if x == "1" || x == "true" {
std::fs::write("dump-prepass.mlir", module.as_operation().to_string())?;
std::fs::write(
"dump-prepass-debug-valid.mlir",
module.as_operation().to_string_with_flags(
OperationPrintingFlags::new().enable_debug_info(true, false),
)?,
)?;
std::fs::write(
"dump-prepass-debug-pretty.mlir",
module.as_operation().to_string_with_flags(
OperationPrintingFlags::new().enable_debug_info(true, false),
)?,
)?;
}
}
if let Some(&mut ref mut stats) = stats {
let mut operations = 0;
walk_mlir_operations(module.as_operation(), &mut |_| operations += 1);
stats.mlir_operation_count = Some(operations)
}
let pre_mlir_passes_instant = Instant::now();
run_pass_manager(&self.context, &mut module)?;
let mlir_passes_time = pre_mlir_passes_instant.elapsed().as_millis();
if let Some(&mut ref mut stats) = stats {
stats.compilation_mlir_passes_time_ms = Some(mlir_passes_time);
}
if let Ok(x) = std::env::var("NATIVE_DEBUG_DUMP") {
if x == "1" || x == "true" {
std::fs::write("dump.mlir", module.as_operation().to_string())?;
std::fs::write(
"dump-debug-pretty.mlir",
module.as_operation().to_string_with_flags(
OperationPrintingFlags::new().enable_debug_info(true, false),
)?,
)?;
std::fs::write(
"dump-debug.mlir",
module.as_operation().to_string_with_flags(
OperationPrintingFlags::new().enable_debug_info(true, false),
)?,
)?;
}
}
Ok(NativeModule::new(module, registry, metadata))
}
}
/// Initialize an MLIR context.
pub fn initialize_mlir() -> Context {
let context = Context::new();
context.append_dialect_registry(&{
let registry = DialectRegistry::new();
register_all_dialects(®istry);
registry
});
context.load_all_available_dialects();
register_all_passes();
register_all_llvm_translations(&context);
context
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/docs.rs | src/docs.rs | //! # Cairo Native Compiler and Execution Engine
#![cfg(doc)]
#[allow(clippy::needless_doctest_main)]
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/overview.md")]
pub mod section01 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/compilation_walkthrough.md")]
pub mod section02 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/execution_walkthrough.md")]
pub mod section03 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/gas_builtin_accounting.md")]
pub mod section04 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/implementing_libfuncs.md")]
pub mod section05 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/debugging.md")]
pub mod section06 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/sierra.md")]
pub mod section07 {}
#[cfg_attr(doc, aquamarine::aquamarine)]
#[doc = include_str!("../docs/mlir.md")]
pub mod section08 {}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata.rs | src/metadata.rs | //! # Code generation metadata
//!
//! The metadata is used for various stuff that would be otherwise difficult or plain impossible
//! with the current design, such as:
//! - Pass compile-time constants that would otherwise have to be hardcoded in various places (ex.
//! [PrimeModuloMeta](self::prime_modulo)).
//! - Declare FFI bindings to external libraries (ex.
//! [ReallocBindingsMeta](self::realloc_bindings)).
//! - Pass extra compilation info to the libfunc generators (ex.
//! [TailRecursionMeta](self::tail_recursion)).
use std::{
any::{Any, TypeId},
collections::{hash_map::Entry, HashMap},
};
pub mod auto_breakpoint;
pub mod debug_utils;
pub mod drop_overrides;
pub mod dup_overrides;
pub mod enum_snapshot_variants;
pub mod felt252_dict;
pub mod gas;
pub mod profiler;
pub mod realloc_bindings;
pub mod runtime_bindings;
pub mod tail_recursion;
pub mod trace_dump;
/// Metadata container.
#[cfg_attr(not(feature = "with-debug-utils"), derive(Default))]
#[derive(Debug)]
pub struct MetadataStorage {
entries: HashMap<TypeId, Box<dyn Any>>,
}
impl MetadataStorage {
/// Create an empty metadata container.
pub fn new() -> Self {
Self::default()
}
/// Insert some metadata and return a mutable reference.
///
/// The insertion will fail if there is already some metadata with the same type, in which case
/// it'll return `None`.
pub fn insert<T>(&mut self, meta: T) -> Option<&mut T>
where
T: Any,
{
if let Entry::Vacant(e) = self.entries.entry(TypeId::of::<T>()) {
e.insert(Box::new(meta));
self.get_mut::<T>()
} else {
None
}
}
/// Remove some metadata and return its last value.
///
/// The removal will fail if there is no metadata with the requested type, in which case it'll
/// return `None`.
pub fn remove<T>(&mut self) -> Option<T>
where
T: Any,
{
self.entries.remove(&TypeId::of::<T>()).map(|meta| {
*(Box::<(dyn Any + 'static)>::downcast::<T>(meta).expect(
"attempt to downcast a boxed value to a type which does not match the actual",
))
})
}
/// Retrieve a reference to some metadata.
///
/// The retrieval will fail if there is no metadata with the requested type, in which case it'll
/// return `None`.
pub fn get<T>(&self) -> Option<&T>
where
T: Any,
{
self.entries.get(&TypeId::of::<T>()).map(|meta| {
meta.downcast_ref::<T>().expect(
"attempt to downcast a boxed value to a type which does not match the actual",
)
})
}
/// Retrieve a mutable reference to some metadata.
///
/// The retrieval will fail if there is no metadata with the requested type, in which case it'll
/// return `None`.
pub fn get_mut<T>(&mut self) -> Option<&mut T>
where
T: Any,
{
self.entries.get_mut(&TypeId::of::<T>()).map(|meta| {
meta.downcast_mut::<T>()
.expect("the given type does not match the actual")
})
}
pub fn get_or_insert_with<T>(&mut self, meta_gen: impl FnOnce() -> T) -> &mut T
where
T: Any,
{
self.entries
.entry(TypeId::of::<T>())
.or_insert_with(|| Box::new(meta_gen()))
.downcast_mut::<T>()
.expect("the given type does not match the actual")
}
}
#[cfg(feature = "with-debug-utils")]
impl Default for MetadataStorage {
fn default() -> Self {
let mut metadata = Self {
entries: Default::default(),
};
metadata.insert(debug_utils::DebugUtils::default());
metadata
}
}
#[cfg(test)]
mod test {
use super::{runtime_bindings::RuntimeBindingsMeta, *};
#[test]
fn runtime_library_insert_works() {
// Create a new instance of MetadataStorage.
let mut metadata = MetadataStorage::new();
// Insert a new instance of RuntimeBindingsMeta.
let ret = metadata.insert(RuntimeBindingsMeta::default());
// Check that the insertion was successful by asserting that `ret` is not `None`.
assert!(ret.is_some());
}
#[test]
fn runtime_library_insert_non_vacant() {
// Create a new instance of MetadataStorage.
let mut metadata = MetadataStorage::new();
// Insert a new instance of RuntimeBindingsMeta.
let ret_vacant = metadata.insert(RuntimeBindingsMeta::default());
// Check that the first insertion was successful.
assert!(ret_vacant.is_some());
// Attempt to insert another instance of RuntimeBindingsMeta.
let ret_non_vacant = metadata.insert(RuntimeBindingsMeta::default());
// Check that the second insertion failed because an instance of the same type already exists.
assert!(ret_non_vacant.is_none());
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/statistics.rs | src/statistics.rs | use cairo_lang_sierra::extensions::circuit::{CircuitInfo, GateOffsets};
use serde::Serialize;
use std::collections::BTreeMap;
use crate::{error::Result, native_panic};
/// A set of compilation statistics gathered during the compilation.
/// It should be completely filled at the end of the compilation.
#[derive(Default, Serialize)]
pub struct Statistics {
/// Number of types defined in the Sierra code.
pub sierra_type_count: Option<usize>,
/// Number of libfuncs defined in the Sierra code.
pub sierra_libfunc_count: Option<usize>,
/// Number of statements contained in the Sierra code.
pub sierra_statement_count: Option<usize>,
/// Number of user functions defined in the Sierra code.
pub sierra_func_count: Option<usize>,
/// Stats of the declared types in Sierra.
pub sierra_declared_types_stats: BTreeMap<u64, SierraDeclaredTypeStats>,
/// Stats about params and return types of each Sierra function.
pub sierra_func_stats: BTreeMap<u64, SierraFuncStats>,
/// Number of statements for each distinct libfunc.
pub sierra_libfunc_frequency: BTreeMap<String, u128>,
/// Number of times each circuit gate is used.
pub sierra_circuit_gates_count: CircuitGatesStats,
/// Number of MLIR operations generated.
pub mlir_operation_count: Option<u128>,
/// Number of MLIR operations generated for each distinct libfunc.
pub mlir_operations_by_libfunc: BTreeMap<String, u128>,
/// Number of LLVMIR instructions generated.
pub llvmir_instruction_count: Option<u128>,
/// Number of LLVMIR virtual registers defined.
pub llvmir_virtual_register_count: Option<u128>,
/// Number of LLVMIR instructions for each distinct opcode.
pub llvmir_opcode_frequency: BTreeMap<String, u128>,
/// Total compilation time.
pub compilation_total_time_ms: Option<u128>,
/// Time spent at Sierra to MLIR.
pub compilation_sierra_to_mlir_time_ms: Option<u128>,
/// Time spent at MLIR passes.
pub compilation_mlir_passes_time_ms: Option<u128>,
/// Time spent at MLIR to LLVMIR translation.
pub compilation_mlir_to_llvm_time_ms: Option<u128>,
/// Time spent at LLVM passes.
pub compilation_llvm_passes_time_ms: Option<u128>,
/// Time spent at LLVM to object compilation.
pub compilation_llvm_to_object_time_ms: Option<u128>,
/// Time spent at linking the shared library.
pub compilation_linking_time_ms: Option<u128>,
/// Size of the compiled object.
pub object_size_bytes: Option<usize>,
}
/// Contains the following stats about a Sierra function:
#[derive(Debug, Default, Serialize)]
pub struct SierraFuncStats {
/// Total size of all the params
pub params_total_size: usize,
/// Total size of all the return types
pub return_types_total_size: usize,
/// Total of times the function is called
pub times_called: usize,
}
/// Contains the stats for each Sierra declared type:
#[derive(Debug, Default, Serialize)]
pub struct SierraDeclaredTypeStats {
/// Layout size of the whole type
pub size: usize,
/// Number of times the type is used as a param in a libfunc
pub as_param_count: usize,
}
/// Contains the quantity of each circuit gate
/// in a program
#[derive(Debug, Default, Serialize)]
pub struct CircuitGatesStats {
add_gate_count: usize,
sub_gate_count: usize,
mul_gate_count: usize,
inverse_gate_count: usize,
}
impl Statistics {
pub fn validate(&self) -> bool {
self.sierra_type_count.is_some()
&& self.sierra_libfunc_count.is_some()
&& self.sierra_statement_count.is_some()
&& self.sierra_func_count.is_some()
&& self.mlir_operation_count.is_some()
&& self.llvmir_instruction_count.is_some()
&& self.llvmir_virtual_register_count.is_some()
&& self.compilation_total_time_ms.is_some()
&& self.compilation_sierra_to_mlir_time_ms.is_some()
&& self.compilation_mlir_passes_time_ms.is_some()
&& self.compilation_mlir_to_llvm_time_ms.is_some()
&& self.compilation_llvm_passes_time_ms.is_some()
&& self.compilation_llvm_to_object_time_ms.is_some()
&& self.compilation_linking_time_ms.is_some()
&& self.object_size_bytes.is_some()
}
/// Counts the gates in a circuit. It uses the same algorithm used
/// to evaluate the gates on a circuit when evaluating it.
pub fn add_circuit_gates(&mut self, info: &CircuitInfo) -> Result<()> {
let mut known_gates = vec![false; 1 + info.n_inputs + info.values.len()];
known_gates[0] = true;
for i in 0..info.n_inputs {
known_gates[i + 1] = true;
}
let mut add_offsets = info.add_offsets.iter().peekable();
let mut mul_offsets = info.mul_offsets.iter();
loop {
while let Some(&add_gate_offset) = add_offsets.peek() {
let lhs = known_gates[add_gate_offset.lhs].to_owned();
let rhs = known_gates[add_gate_offset.rhs].to_owned();
let output = known_gates[add_gate_offset.output].to_owned();
match (lhs, rhs, output) {
(true, true, false) => {
// ADD
self.sierra_circuit_gates_count.add_gate_count += 1;
known_gates[add_gate_offset.output] = true;
}
(false, true, true) => {
// SUB
self.sierra_circuit_gates_count.sub_gate_count += 1;
known_gates[add_gate_offset.lhs] = true;
}
_ => break,
}
add_offsets.next();
}
if let Some(&GateOffsets { lhs, rhs, output }) = mul_offsets.next() {
let lhs_value = known_gates[lhs];
let rhs_value = known_gates[rhs];
let output_value = known_gates[output];
match (lhs_value, rhs_value, output_value) {
(true, true, false) => {
// MUL
self.sierra_circuit_gates_count.mul_gate_count += 1;
known_gates[output] = true;
}
(false, true, true) => {
self.sierra_circuit_gates_count.inverse_gate_count += 1;
known_gates[lhs] = true;
}
_ => native_panic!("Imposible circuit"), // It should never reach this point, since it would have failed in the compilation before
}
} else {
break;
}
}
Ok(())
}
}
/// Clones a variable of type `Option<&mut T>` without consuming self
///
/// # Example
///
/// The following example would fail to compile otherwise.
///
/// ```
/// # use cairo_native::clone_option_mut;
/// fn consume(v: Option<&mut Vec<u8>>) {}
///
/// let mut vec = Vec::new();
/// let option = Some(&mut vec);
/// consume(clone_option_mut!(option));
/// consume(option);
/// ```
#[macro_export]
macro_rules! clone_option_mut {
( $var:ident ) => {
match $var {
None => None,
Some(&mut ref mut s) => Some(s),
}
};
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/arch.rs | src/arch.rs | use crate::{
error::Result,
native_panic,
starknet::{ArrayAbi, Secp256k1Point, Secp256r1Point},
types::TypeBuilder,
utils::libc_malloc,
values::Value,
};
use bumpalo::Bump;
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
starknet::{secp256::Secp256PointTypeConcrete, StarknetTypeConcrete},
},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use std::{
ffi::c_void,
ptr::{null, NonNull},
};
mod aarch64;
mod x86_64;
/// Implemented by all supported argument types.
pub trait AbiArgument {
/// Serialize the argument into the buffer. This method should keep track of arch-dependent
/// stuff like register vs stack allocation.
fn to_bytes(
&self,
buffer: &mut Vec<u8>,
find_dict_drop_override: impl Copy + Fn(&ConcreteTypeId) -> Option<extern "C" fn(*mut c_void)>,
) -> Result<()>;
}
/// A wrapper that implements `AbiArgument` for `Value`s. It contains all the required stuff to
/// serialize all possible `Value`s.
pub struct ValueWithInfoWrapper<'a> {
pub value: &'a Value,
pub type_id: &'a ConcreteTypeId,
pub info: &'a CoreTypeConcrete,
pub arena: &'a Bump,
pub registry: &'a ProgramRegistry<CoreType, CoreLibfunc>,
}
impl<'a> ValueWithInfoWrapper<'a> {
fn map<'b>(
&'b self,
value: &'b Value,
type_id: &'b ConcreteTypeId,
) -> Result<ValueWithInfoWrapper<'b>>
where
'b: 'a,
{
Ok(Self {
value,
type_id,
info: self.registry.get_type(type_id)?,
arena: self.arena,
registry: self.registry,
})
}
}
impl AbiArgument for ValueWithInfoWrapper<'_> {
fn to_bytes(
&self,
buffer: &mut Vec<u8>,
find_dict_drop_override: impl Copy + Fn(&ConcreteTypeId) -> Option<extern "C" fn(*mut c_void)>,
) -> Result<()> {
match (self.value, self.info) {
(value, CoreTypeConcrete::Box(info)) => {
let ptr = value.to_ptr(
self.arena,
self.registry,
self.type_id,
find_dict_drop_override,
)?;
let layout = self.registry.get_type(&info.ty)?.layout(self.registry)?;
let heap_ptr = unsafe {
let heap_ptr = libc_malloc(layout.size());
libc::memcpy(heap_ptr, ptr.as_ptr().cast(), layout.size());
heap_ptr
};
heap_ptr.to_bytes(buffer, find_dict_drop_override)?;
}
(value, CoreTypeConcrete::Nullable(info)) => {
if matches!(value, Value::Null) {
null::<()>().to_bytes(buffer, find_dict_drop_override)?;
} else {
let ptr = value.to_ptr(
self.arena,
self.registry,
self.type_id,
find_dict_drop_override,
)?;
let layout = self.registry.get_type(&info.ty)?.layout(self.registry)?;
let heap_ptr = unsafe {
let heap_ptr = libc_malloc(layout.size());
libc::memcpy(heap_ptr, ptr.as_ptr().cast(), layout.size());
heap_ptr
};
heap_ptr.to_bytes(buffer, find_dict_drop_override)?;
}
}
(value, CoreTypeConcrete::NonZero(info) | CoreTypeConcrete::Snapshot(info)) => self
.map(value, &info.ty)?
.to_bytes(buffer, find_dict_drop_override)?,
(Value::Array(_), CoreTypeConcrete::Array(_)) => {
// TODO: Assert that `info.ty` matches all the values' types. See: https://github.com/lambdaclass/cairo_native/issues/1216
let abi_ptr = self.value.to_ptr(
self.arena,
self.registry,
self.type_id,
find_dict_drop_override,
)?;
let abi = unsafe { abi_ptr.cast::<ArrayAbi<()>>().as_ref() };
abi.ptr.to_bytes(buffer, find_dict_drop_override)?;
abi.since.to_bytes(buffer, find_dict_drop_override)?;
abi.until.to_bytes(buffer, find_dict_drop_override)?;
abi.capacity.to_bytes(buffer, find_dict_drop_override)?;
}
(Value::BoundedInt { .. }, CoreTypeConcrete::BoundedInt(_)) => {
// See: https://github.com/lambdaclass/cairo_native/issues/1217
native_panic!("todo: implement AbiArgument for Value::BoundedInt case")
}
(Value::Bytes31(value), CoreTypeConcrete::Bytes31(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::EcPoint(x, y), CoreTypeConcrete::EcPoint(_)) => {
x.to_bytes(buffer, find_dict_drop_override)?;
y.to_bytes(buffer, find_dict_drop_override)?;
}
(Value::EcState(x, y, x0, y0), CoreTypeConcrete::EcState(_)) => {
x.to_bytes(buffer, find_dict_drop_override)?;
y.to_bytes(buffer, find_dict_drop_override)?;
x0.to_bytes(buffer, find_dict_drop_override)?;
y0.to_bytes(buffer, find_dict_drop_override)?;
}
(Value::Enum { tag, value, .. }, CoreTypeConcrete::Enum(info)) => {
if self.info.is_memory_allocated(self.registry)? {
let abi_ptr = self.value.to_ptr(
self.arena,
self.registry,
self.type_id,
find_dict_drop_override,
)?;
let abi_ptr = unsafe { *abi_ptr.cast::<NonNull<()>>().as_ref() };
abi_ptr.as_ptr().to_bytes(buffer, find_dict_drop_override)?;
} else {
match info
.variants
.len()
.next_power_of_two()
.trailing_zeros()
.div_ceil(8)
{
0 => {}
_ => (*tag as u64).to_bytes(buffer, find_dict_drop_override)?,
}
self.map(value, &info.variants[*tag])?
.to_bytes(buffer, find_dict_drop_override)?;
}
}
(
Value::Felt252(value),
CoreTypeConcrete::Felt252(_)
| CoreTypeConcrete::Starknet(
StarknetTypeConcrete::ClassHash(_)
| StarknetTypeConcrete::ContractAddress(_)
| StarknetTypeConcrete::StorageAddress(_)
| StarknetTypeConcrete::StorageBaseAddress(_),
),
) => value.to_bytes(buffer, find_dict_drop_override)?,
(Value::Felt252Dict { .. }, CoreTypeConcrete::Felt252Dict(_)) => {
// TODO: Assert that `info.ty` matches all the values' types.
self.value
.to_ptr(
self.arena,
self.registry,
self.type_id,
find_dict_drop_override,
)?
.as_ptr()
.to_bytes(buffer, find_dict_drop_override)?
}
(
Value::Secp256K1Point(Secp256k1Point { x, y, is_infinity }),
CoreTypeConcrete::Starknet(StarknetTypeConcrete::Secp256Point(
Secp256PointTypeConcrete::K1(_),
)),
)
| (
Value::Secp256R1Point(Secp256r1Point { x, y, is_infinity }),
CoreTypeConcrete::Starknet(StarknetTypeConcrete::Secp256Point(
Secp256PointTypeConcrete::R1(_),
)),
) => {
x.to_bytes(buffer, find_dict_drop_override)?;
y.to_bytes(buffer, find_dict_drop_override)?;
is_infinity.to_bytes(buffer, find_dict_drop_override)?;
}
(Value::Sint128(value), CoreTypeConcrete::Sint128(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Sint16(value), CoreTypeConcrete::Sint16(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Sint32(value), CoreTypeConcrete::Sint32(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Sint64(value), CoreTypeConcrete::Sint64(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Sint8(value), CoreTypeConcrete::Sint8(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Struct { fields, .. }, CoreTypeConcrete::Struct(info)) => {
fields
.iter()
.zip(&info.members)
.map(|(value, type_id)| self.map(value, type_id))
.try_for_each(|wrapper| wrapper?.to_bytes(buffer, find_dict_drop_override))?;
}
(Value::Uint128(value), CoreTypeConcrete::Uint128(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Uint16(value), CoreTypeConcrete::Uint16(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Uint32(value), CoreTypeConcrete::Uint32(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Uint64(value), CoreTypeConcrete::Uint64(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
(Value::Uint8(value), CoreTypeConcrete::Uint8(_)) => {
value.to_bytes(buffer, find_dict_drop_override)?
}
// The catchall includes all unreachable combinations, as well
// as some combination that may be reachable, and haven't been
// encountered yet. Adding support for additional input arguments
// may require implementing this function for new combinations.
_ => native_panic!(
"abi argument unimplemented for ({:?}, {:?})",
self.value,
self.type_id
),
}
Ok(())
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/compiler.rs | src/compiler.rs | //! # Compilation process
//!
//! A Sierra program is compiled one function at a time. Each function has a pre-entry block that
//! will be ran only once, even in tail-recursive functions. All libfuncs are intended to place
//! their stack-allocating operations there so as to not grow the stack when recursing.
//!
//! After the pre-entry block, there is an entry block, which is in charge of preparing the first
//! statement's arguments and jumping into it. From here on, all the statements's
//! [builders](crate::libfuncs::LibfuncBuilder) are invoked. Every libfunc builder must end its
//! execution calling a branch function from the helper, which will generate the operations required
//! to jump to next statements. This simplifies the branching design, especially when a libfunc has
//! multiple target branches.
//!
//! > Note: Libfunc builders must have a branching operation out into each possible branch, even if
//! > it's unreachable. This is required to keep the state consistent. More on that later.
//!
//! Some statements do require a special landing block. Those are the ones which are the branching
//! target of more than a single statement. In other words, if a statement can be reached (directly)
//! from more than a single place, it needs a landing block.
//!
//! The landing blocks are in charge of synchronizing the Sierra state. The state is just a
//! dictionary mapping variable ids to their values. Since the values can come from a single branch,
//! this landing block is required.
//!
//! In order to generate the libfuncs's blocks, all the libfunc's entry blocks are required. That is
//! why they are generated all beforehand. The order in which they are generated follows a
//! breadth-first ordering; that is, the compiler uses a [BFS algorithm]. This algorithm should
//! generate the libfuncs in the same order as they appear in Sierra. As expected, the algorithm
//! forks the path each time a branching libfunc is found, which dies once a return statement is
//! detected.
//!
//! ## Function nomenclature transforms
//!
//! When compiling from Cairo, or from a Sierra source with debug information (the `-r` flag on
//! `cairo-compile`), those identifiers are the function's exported symbol. However, Sierra programs
//! are not required to contain that information. In those cases, the
//! (`generate_function_name`)[generate_function_name] will generate a new symbol name based on its
//! function id.
//!
//! ## Tail-recursive functions
//!
//! Part of the tail-recursion handling algorithm is implemented here, but tail-recursive functions
//! are better explained in [their metadata section](crate::metadata::tail_recursion).
//!
//! [BFS algorithm]: https://en.wikipedia.org/wiki/Breadth-first_search
use crate::{
clone_option_mut,
debug::libfunc_to_name,
error::{panic::ToNativeAssertError, Error},
libfuncs::{BranchArg, LibfuncBuilder, LibfuncHelper},
metadata::{
gas::{GasCost, GasMetadata},
tail_recursion::TailRecursionMeta,
MetadataStorage,
},
native_assert, native_panic,
statistics::Statistics,
types::TypeBuilder,
utils::{generate_function_name, walk_ir::walk_mlir_block},
};
use bumpalo::Bump;
use cairo_lang_sierra::{
edit_state,
extensions::{
core::{CoreConcreteLibfunc, CoreLibfunc, CoreType},
ConcreteLibfunc,
},
ids::{ConcreteTypeId, VarId},
program::{Function, Invocation, Program, Statement, StatementIdx},
program_registry::ProgramRegistry,
};
use cairo_lang_utils::ordered_hash_map::OrderedHashMap;
use itertools::Itertools;
use melior::{
dialect::{
arith::CmpiPredicate,
cf, func, index,
llvm::{self, LoadStoreOptions},
memref,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{
DenseI64ArrayAttribute, FlatSymbolRefAttribute, IntegerAttribute, StringAttribute,
TypeAttribute,
},
operation::OperationBuilder,
r#type::{FunctionType, IntegerType, MemRefType},
Attribute, AttributeLike, Block, BlockLike, BlockRef, Identifier, Location, Module, Region,
Type, Value,
},
Context,
};
use mlir_sys::{
mlirDisctinctAttrCreate, mlirLLVMDICompileUnitAttrGet, mlirLLVMDIFileAttrGet,
mlirLLVMDIModuleAttrGet, mlirLLVMDIModuleAttrGetScope, mlirLLVMDISubprogramAttrGet,
mlirLLVMDISubroutineTypeAttrGet, MlirLLVMDIEmissionKind_MlirLLVMDIEmissionKindFull,
MlirLLVMDINameTableKind_MlirLLVMDINameTableKindDefault,
};
use std::{
cell::Cell,
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
ops::Deref,
};
/// The [BlockStorage] type is used to map each statement into its own entry block (on the right),
/// and its landing block (on the left) if required.
///
/// The landing block contains also the variable ids that must be present when jumping into it,
/// otherwise it's a compiler error due to an inconsistent variable state.
type BlockStorage<'c, 'a> =
HashMap<StatementIdx, (Option<(BlockRef<'c, 'a>, Vec<VarId>)>, BlockRef<'c, 'a>)>;
/// Run the compiler on a program. The compiled program is stored in the MLIR module.
///
/// The generics `TType` and `TLibfunc` contain the information required to generate the MLIR types
/// and statement operations. Most of the time you'll want to use the default ones, which are
/// [CoreType](cairo_lang_sierra::extensions::core::CoreType) and
/// [CoreLibfunc](cairo_lang_sierra::extensions::core::CoreLibfunc) respectively.
///
/// This function needs the program and the program's registry, which doesn't need to have AP
/// tracking information.
///
/// Additionally, it needs a reference to the MLIR context, the output module and the metadata
/// storage. The last one is passed externally so that stuff can be initialized if necessary.
#[allow(clippy::too_many_arguments)]
pub fn compile(
context: &Context,
module: &Module,
program: &Program,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
di_compile_unit_id: Attribute,
ignore_debug_names: bool,
stats: Option<&mut Statistics>,
) -> Result<(), Error> {
if let Ok(x) = std::env::var("NATIVE_DEBUG_DUMP") {
if x == "1" || x == "true" {
std::fs::write("program.sierra", program.to_string())?;
}
}
// Sierra programs have the following structure:
// 1. Type declarations, one per line.
// 2. Libfunc declarations, one per line.
// 3. All the program statements, one per line.
// 4. Function declarations, one per line.
// The four sections are separated by a single blank line.
let num_types = program.type_declarations.len() + 1;
let n_libfuncs = program.libfunc_declarations.len() + 1;
let sierra_stmt_start_offset = num_types + n_libfuncs + 1;
for function in &program.funcs {
tracing::info!("Compiling function `{}`.", function.id);
compile_func(
context,
module,
registry,
function,
&program.statements,
metadata,
di_compile_unit_id,
sierra_stmt_start_offset,
ignore_debug_names,
clone_option_mut!(stats),
)?;
}
tracing::info!("The program was compiled successfully.");
Ok(())
}
/// Compile a single Sierra function.
///
/// The function accepts a `Function` argument, which provides the function's entry point, signature
/// and name. Check out [compile](self::compile) for a description of the other arguments.
///
/// The [module docs](self) contain more information about the compilation process.
///
/// If [`ignore_debug_names`] is true, then the function name will always be `f{id}` without any debug name info if it ever exists.
#[allow(clippy::too_many_arguments)]
fn compile_func(
context: &Context,
module: &Module,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
function: &Function,
statements: &[Statement],
metadata: &mut MetadataStorage,
di_compile_unit_id: Attribute,
sierra_stmt_start_offset: usize,
ignore_debug_names: bool,
stats: Option<&mut Statistics>,
) -> Result<(), Error> {
let fn_location = Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + function.entry_point.0,
0,
);
let region = Region::new();
let blocks_arena = Bump::new();
let mut arg_types = extract_types(
context,
module,
&function.signature.param_types,
registry,
metadata,
)
.collect::<Result<Vec<_>, _>>()?;
let mut return_types = extract_types(
context,
module,
&function.signature.ret_types,
registry,
metadata,
)
.collect::<Result<Vec<_>, _>>()?;
#[cfg(feature = "with-trace-dump")]
let mut var_types: HashMap<VarId, ConcreteTypeId> = HashMap::new();
// Replace memory-allocated arguments with pointers.
for (ty, type_info) in
arg_types
.iter_mut()
.zip(function.signature.param_types.iter().filter_map(|type_id| {
let type_info = match registry.get_type(type_id) {
Ok(x) => x,
Err(e) => return Some(Err(e.into())),
};
let is_zst = match type_info.is_zst(registry) {
Ok(x) => x,
Err(e) => return Some(Err(e)),
};
if type_info.is_builtin() && is_zst {
None
} else {
Some(Ok(type_info))
}
}))
{
let type_info = type_info?;
if type_info.is_memory_allocated(registry)? {
*ty = llvm::r#type::pointer(context, 0);
}
}
// Extract memory-allocated return types from return_types and insert them in arg_types as a
// pointer.
let return_type_infos = function
.signature
.ret_types
.iter()
.filter_map(|type_id| {
let type_info = match registry.get_type(type_id) {
Ok(x) => x,
Err(e) => return Some(Err(e.into())),
};
let is_zst = match type_info.is_zst(registry) {
Ok(x) => x,
Err(e) => return Some(Err(e)),
};
if type_info.is_builtin() && is_zst {
None
} else {
Some(Ok((type_id, type_info)))
}
})
.collect::<Result<Vec<_>, _>>()?;
// Possible values:
// None => Doesn't return anything.
// Some(false) => Has a complex return type.
// Some(true) => Has a manual return type which is in `arg_types[0]`.
let has_return_ptr = if return_type_infos.len() > 1 {
Some(false)
} else if return_type_infos
.first()
.map(|(_, type_info)| type_info.is_memory_allocated(registry))
.transpose()?
== Some(true)
{
assert_eq!(return_types.len(), 1);
return_types.remove(0);
arg_types.insert(0, llvm::r#type::pointer(context, 0));
Some(true)
} else {
None
};
let function_name = generate_function_name(&function.id, ignore_debug_names);
// Don't care about whether it is for the contract executor for inner impls
// so we don't have to pass the boolean to the function call libfunc.
let function_name_for_inner = generate_function_name(&function.id, false);
let di_subprogram = unsafe {
// Various DWARF debug attributes for this function.
// The unsafe is because this is a method not yet found in upstream LLVM nor melior, so
// we are using our own bindings to the C++ API.
let file_attr = Attribute::from_raw(mlirLLVMDIFileAttrGet(
context.to_raw(),
StringAttribute::new(context, "program.sierra").to_raw(),
StringAttribute::new(context, ".").to_raw(),
));
let compile_unit = {
Attribute::from_raw(mlirLLVMDICompileUnitAttrGet(
context.to_raw(),
di_compile_unit_id.to_raw(),
0x0002, // lang C (there is no language sierra in DWARF)
file_attr.to_raw(),
StringAttribute::new(context, "cairo-native").to_raw(),
false,
MlirLLVMDIEmissionKind_MlirLLVMDIEmissionKindFull,
MlirLLVMDINameTableKind_MlirLLVMDINameTableKindDefault,
))
};
let di_module = mlirLLVMDIModuleAttrGet(
context.to_raw(),
file_attr.to_raw(),
compile_unit.to_raw(),
StringAttribute::new(context, "LLVMDialectModule").to_raw(),
StringAttribute::new(context, "").to_raw(),
StringAttribute::new(context, "").to_raw(),
StringAttribute::new(context, "").to_raw(),
0,
false,
);
let module_scope = mlirLLVMDIModuleAttrGetScope(di_module);
Attribute::from_raw({
let id = mlirDisctinctAttrCreate(
StringAttribute::new(context, &format!("fn_{}", function.id.id)).to_raw(),
);
// Don't add argument types since its not useful, we only use the debugger for source locations.
let ty = mlirLLVMDISubroutineTypeAttrGet(
context.to_raw(),
0x0, // call conv: C
0,
std::ptr::null(),
);
mlirLLVMDISubprogramAttrGet(
context.to_raw(),
id,
module_scope,
file_attr.to_raw(),
StringAttribute::new(context, &function_name).to_raw(),
StringAttribute::new(context, &function_name).to_raw(),
file_attr.to_raw(),
(sierra_stmt_start_offset + function.entry_point.0) as u32,
(sierra_stmt_start_offset + function.entry_point.0) as u32,
0x8, // dwarf subprogram flag: definition
ty,
)
})
};
tracing::debug!("Generating function structure (region with blocks).");
let (entry_block, blocks, is_recursive) = generate_function_structure(
context,
module,
®ion,
registry,
function,
statements,
metadata,
sierra_stmt_start_offset,
)?;
tracing::debug!("Generating the function implementation.");
// Workaround for the `entry block of region may not have predecessors` error:
let pre_entry_block_args = arg_types
.iter()
.map(|ty| {
(
*ty,
Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + function.entry_point.0,
0,
),
)
})
.collect::<Vec<_>>();
let pre_entry_block =
region.insert_block_before(entry_block, Block::new(&pre_entry_block_args));
let initial_state = edit_state::put_results(OrderedHashMap::<_, Value>::default(), {
let mut values = Vec::new();
let mut count = 0;
for param in &function.params {
let type_info = registry.get_type(¶m.ty)?;
let location = Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + function.entry_point.0,
0,
);
values.push((
¶m.id,
if type_info.is_builtin() && type_info.is_zst(registry)? {
pre_entry_block
.append_operation(llvm::undef(
type_info.build(context, module, registry, metadata, ¶m.ty)?,
location,
))
.result(0)?
.into()
} else {
let value = entry_block.argument(count)?.into();
count += 1;
value
},
));
#[cfg(feature = "with-trace-dump")]
var_types.insert(param.id.clone(), param.ty.clone());
}
values.into_iter()
})?;
tracing::trace!("Implementing the entry block.");
entry_block.append_operation(cf::br(
&blocks[&function.entry_point].1,
&match &statements[function.entry_point.0] {
Statement::Invocation(x) => &x.args,
Statement::Return(x) => x,
}
.iter()
.map(|x| initial_state[x])
.collect::<Vec<_>>(),
{
Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + function.entry_point.0,
0,
)
},
));
let mut tailrec_state = Option::<(Value, BlockRef)>::None;
foreach_statement_in_function::<_, Error>(
statements,
function.entry_point,
initial_state,
|statement_idx, mut state| {
if let Some(gas_metadata) = metadata.get::<GasMetadata>() {
let gas_cost = gas_metadata.get_gas_costs_for_statement(statement_idx);
metadata.remove::<GasCost>();
metadata.insert(GasCost(gas_cost));
}
let (landing_block, block) = &blocks[&statement_idx];
if let Some((landing_block, _)) = landing_block {
tracing::trace!("Implementing the statement {statement_idx}'s landing block.");
state = edit_state::put_results(
OrderedHashMap::default(),
state
.keys()
.sorted_by_key(|x| x.id)
.enumerate()
.map(|(idx, var_id)| Ok((var_id, landing_block.argument(idx)?.into())))
.collect::<Result<Vec<_>, Error>>()?
.into_iter(),
)?;
landing_block.append_operation(cf::br(
block,
&edit_state::take_args(
state.clone(),
match &statements[statement_idx.0] {
Statement::Invocation(x) => &x.args,
Statement::Return(x) => x,
}
.iter(),
)?
.1,
Location::name(
context,
&format!("landing_block(stmt_idx={})", statement_idx),
fn_location,
),
));
}
Ok(match &statements[statement_idx.0] {
Statement::Invocation(invocation) => {
tracing::trace!(
"Implementing the invocation statement at {statement_idx}: {}.",
invocation.libfunc_id
);
let location = Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + statement_idx.0,
0,
);
#[cfg(feature = "with-debug-utils")]
{
// If this env var exists and is a valid statement, insert a debug trap before the libfunc call.
// Only on when using with-debug-utils feature.
if let Ok(x) = std::env::var("NATIVE_DEBUG_TRAP_AT_STMT") {
if x.eq_ignore_ascii_case(&statement_idx.0.to_string()) {
block.append_operation(
melior::dialect::ods::llvm::intr_debugtrap(context, location)
.into(),
);
}
}
}
let libfunc_name = if invocation.libfunc_id.debug_name.is_some() {
format!("{}(stmt_idx={})", invocation.libfunc_id, statement_idx)
} else {
let libf = registry.get_libfunc(&invocation.libfunc_id)?;
format!("{}(stmt_idx={})", libfunc_to_name(libf), statement_idx)
};
#[cfg(feature = "with-trace-dump")]
crate::utils::trace_dump::build_state_snapshot(
context,
registry,
module,
block,
location,
metadata,
statement_idx,
&state,
&var_types,
);
let (state, _) = edit_state::take_args(state, invocation.args.iter())?;
let libfunc = registry.get_libfunc(&invocation.libfunc_id)?;
if is_recursive {
if let Some(target) = libfunc.is_function_call() {
if target == &function.id && state.is_empty() {
let location = Location::name(
context,
&format!("recursion_counter({})", libfunc_name),
location,
);
let op0 = pre_entry_block.insert_operation(
0,
memref::alloca(
context,
MemRefType::new(Type::index(context), &[], None, None),
&[],
&[],
None,
location,
),
);
let op1 = pre_entry_block.insert_operation_after(
op0,
index::constant(
context,
IntegerAttribute::new(Type::index(context), 0),
location,
),
);
pre_entry_block.insert_operation_after(
op1,
memref::store(
op1.result(0)?.into(),
op0.result(0)?.into(),
&[],
location,
),
);
metadata
.insert(TailRecursionMeta::new(
op0.result(0)?.into(),
&entry_block,
))
.to_native_assert_error(
"tail recursion metadata shouldn't be inserted",
)?;
}
}
}
#[allow(unused_mut)]
let mut helper = LibfuncHelper {
module,
init_block: &pre_entry_block,
region: ®ion,
blocks_arena: &blocks_arena,
last_block: Cell::new(block),
branches: generate_branching_targets(
&blocks,
statements,
statement_idx,
invocation,
&state,
),
results: invocation
.branches
.iter()
.map(|x| vec![Cell::new(None); x.results.len()])
.collect::<Vec<_>>(),
#[cfg(feature = "with-libfunc-profiling")]
profiler: match libfunc {
CoreConcreteLibfunc::FunctionCall(_) => {
// Tail-recursive function calls are broken beacuse a stack of timestamps is required,
// which would invalidate tail recursion. Also, since each libfunc is measured individually,
// it doesn't make sense to take function calls into account, therefore it's ignored on purpose.
None
}
_ => match metadata.remove::<crate::metadata::profiler::ProfilerMeta>()
{
Some(profiler_meta) => {
let t0 = profiler_meta
.measure_timestamp(context, block, location)?;
Some((profiler_meta, statement_idx, t0))
}
None => None,
},
},
};
libfunc.build(
context,
registry,
block,
Location::name(context, &libfunc_name, location),
&helper,
metadata,
)?;
// When statistics are enabled, we iterate from the start
// to the end block of the compiled libfunc, and count all the operations.
if let Some(&mut ref mut stats) = stats {
let mut operations = 0;
walk_mlir_block(*block, *helper.last_block.get(), &mut |_| operations += 1);
let name = libfunc_to_name(libfunc).to_string();
*stats.mlir_operations_by_libfunc.entry(name).or_insert(0) += operations;
}
native_assert!(
block.terminator().is_some(),
"libfunc {} had no terminator",
libfunc_name
);
#[cfg(feature = "with-libfunc-profiling")]
if let Some((profiler_meta, _, _)) = helper.profiler.take() {
metadata.insert(profiler_meta);
}
if let Some(tailrec_meta) = metadata.remove::<TailRecursionMeta>() {
if let Some(return_block) = tailrec_meta.return_target() {
tailrec_state = Some((tailrec_meta.depth_counter(), return_block));
}
}
#[cfg(feature = "with-trace-dump")]
for (branch_signature, branch_info) in
libfunc.branch_signatures().iter().zip(&invocation.branches)
{
for (var_info, var_id) in
branch_signature.vars.iter().zip(&branch_info.results)
{
var_types.insert(var_id.clone(), var_info.ty.clone());
}
}
StatementCompileResult::Processed(
invocation
.branches
.iter()
.zip(helper.results()?)
.map(|(branch_info, result_values)| {
native_assert!(
branch_info.results.len() == result_values.len(),
"Mismatched number of returned values from branch."
);
Ok(edit_state::put_results(
state.clone(),
branch_info.results.iter().zip(result_values.into_iter()),
)?)
})
.collect::<Result<_, Error>>()?,
)
}
Statement::Return(var_ids) => {
tracing::trace!("Implementing the return statement at {statement_idx}");
let location = Location::name(
context,
&format!("return(stmt_idx={})", statement_idx),
Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + statement_idx.0,
0,
),
);
#[cfg(feature = "with-trace-dump")]
if !is_recursive || tailrec_state.is_some() {
crate::utils::trace_dump::build_state_snapshot(
context,
registry,
module,
block,
location,
metadata,
statement_idx,
&state,
&var_types,
);
}
let (_, mut values) = edit_state::take_args(state, var_ids.iter())?;
let mut block = *block;
if is_recursive {
match tailrec_state {
None => {
// If this block is reached it means that a return has been detected
// within a tail-recursive function before the recursive call has
// been generated. Since we don't have the return target block at
// this point we need to defer this return statement's generation.
return Ok(StatementCompileResult::Deferred);
}
Some((depth_counter, recursion_target)) => {
let location = Location::name(
context,
&format!("return(stmt_idx={}, tail_recursion)", statement_idx),
Location::new(
context,
"program.sierra",
sierra_stmt_start_offset + statement_idx.0,
0,
),
);
// Perform tail recursion.
let cont_block = region.insert_block_after(block, Block::new(&[]));
let depth_counter_value = block.append_op_result(memref::load(
depth_counter,
&[],
location,
))?;
let k0 = block.const_int_from_type(
context,
location,
0,
Type::index(context),
)?;
let is_zero_depth = block.append_op_result(index::cmp(
context,
CmpiPredicate::Eq,
depth_counter_value,
k0,
location,
))?;
let k1 = block.const_int_from_type(
context,
location,
1,
Type::index(context),
)?;
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/struct.rs | src/libfuncs/struct.rs | //! # Struct-related libfuncs
use super::LibfuncHelper;
use crate::{
error::Result,
libfuncs::r#box::{into_box, unbox},
metadata::{realloc_bindings::ReallocBindingsMeta, MetadataStorage},
native_panic,
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
lib_func::SignatureOnlyConcreteLibfunc,
structure::{ConcreteStructBoxedDeconstructLibfunc, StructConcreteLibfunc},
ConcreteLibfunc,
},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use itertools::Itertools;
use melior::{
dialect::llvm::{self},
helpers::{BuiltinBlockExt, LlvmBlockExt},
ir::{Block, BlockLike, Location, Value},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &StructConcreteLibfunc,
) -> Result<()> {
match selector {
StructConcreteLibfunc::Construct(info) => {
build_construct(context, registry, entry, location, helper, metadata, info)
}
StructConcreteLibfunc::Deconstruct(info)
| StructConcreteLibfunc::SnapshotDeconstruct(info) => {
build_deconstruct(context, registry, entry, location, helper, metadata, info)
}
StructConcreteLibfunc::BoxedDeconstruct(info) => {
build_boxed_deconstruct(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `struct_construct` libfunc.
pub fn build_construct<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let mut fields = Vec::new();
for (i, _) in info.param_signatures().iter().enumerate() {
fields.push(entry.argument(i)?.into());
}
let value = build_struct_value(
context,
registry,
entry,
location,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
&fields,
)?;
helper.br(entry, 0, &[value], location)
}
/// Generate MLIR operations for the `struct_construct` libfunc.
#[allow(clippy::too_many_arguments)]
pub fn build_struct_value<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
struct_type: &ConcreteTypeId,
fields: &[Value<'ctx, 'this>],
) -> Result<Value<'ctx, 'this>> {
let struct_ty = registry.build_type(context, helper, metadata, struct_type)?;
let struct_type = registry.get_type(struct_type)?;
// LLVM fails when inserting zero-sized types into a struct.
// See: https://github.com/llvm/llvm-project/issues/107198.
// We will manually skip ZST fields, as inserting a ZST is a noop, and there
// is no point on building that operation.
let zst_fields = match struct_type {
CoreTypeConcrete::Struct(info) => {
// If the type is a struct, we check for each member if its a ZST.
info.members
.iter()
.map(|member| registry.get_type(member)?.is_zst(registry))
.try_collect()?
}
_ => {
// There are many Sierra types represented as an LLVM struct, but
// are not of Sierra struct type. In these cases we assume that
// their members are not ZST.
vec![false; fields.len()]
}
};
let mut accumulator = entry.append_op_result(llvm::undef(struct_ty, location))?;
for (idx, field) in fields.iter().enumerate() {
if zst_fields[idx] {
continue;
}
accumulator = entry.insert_value(context, location, accumulator, *field, idx)?
}
Ok(accumulator)
}
/// Generate MLIR operations for the `struct_deconstruct` libfunc.
pub fn build_deconstruct<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let container = entry.arg(0)?;
let mut fields = Vec::<Value>::with_capacity(info.branch_signatures()[0].vars.len());
for (i, var_info) in info.branch_signatures()[0].vars.iter().enumerate() {
let type_info = registry.get_type(&var_info.ty)?;
let field_ty = type_info.build(context, helper, registry, metadata, &var_info.ty)?;
let value = entry.extract_value(context, location, container, field_ty, i)?;
fields.push(value);
}
helper.br(entry, 0, &fields, location)
}
/// Generate MLIR operations for the `struct_boxed_deconstruct` libfunc.
///
/// Receives a `Struct` inside a `Box` and returns a tuple containing each member
/// of the `Struct` wrapped inside a `Box`.
///
/// # Signature
///
/// ```cairo
/// struct MyStruct {
/// x: u8,
/// y: felt252,
/// }
///
/// extern fn struct_boxed_deconstruct<T>(
/// value: Box<T>
/// ) -> (Box<u8>, Box<felt252>) nopanic;
/// ```
pub fn build_boxed_deconstruct<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &ConcreteStructBoxedDeconstructLibfunc,
) -> Result<()> {
metadata.get_or_insert_with(|| ReallocBindingsMeta::new(context, helper));
// Unbox the container
let CoreTypeConcrete::Box(box_info) = registry.get_type(&info.param_signatures()[0].ty)? else {
native_panic!("Should receibe a Box type as argument");
};
let container = unbox(
context,
registry,
entry,
location,
helper,
metadata,
&box_info.ty,
)?;
let mut fields = Vec::<Value>::with_capacity(info.members.len());
for (i, member_type_id) in info.members.iter().enumerate() {
let type_info = registry.get_type(member_type_id)?;
let field_ty = type_info.build(context, helper, registry, metadata, member_type_id)?;
let member = entry.extract_value(context, location, container, field_ty, i)?;
let (_, member_layout) =
registry.build_type_with_layout(context, helper, metadata, member_type_id)?;
// Box the member
let member = into_box(context, entry, location, member, member_layout)?;
fields.push(member);
}
helper.br(entry, 0, &fields, location)
}
#[cfg(test)]
mod test {
use crate::{jit_struct, load_cairo, utils::testing::run_program_assert_output, Value};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
lazy_static! {
static ref BOXED_DECONSTRUCT_PROGRAM: (String, Program) = load_cairo! {
mod decons_3_fields {
extern fn struct_boxed_deconstruct<T>(value: Box<T>) -> (Box<felt252>, Box<u8>, Box<u128>) nopanic;
}
mod decons_1_field {
extern fn struct_boxed_deconstruct<T>(value: Box<T>) -> (Box<u8>,) nopanic;
}
mod decons_empty_struct {
extern fn struct_boxed_deconstruct<T>(value: Box<T>) -> () nopanic;
}
mod decons_struct_snapshot {
extern fn struct_boxed_deconstruct<T>(value: Box<T>) -> (Box<@felt252>, Box<@u8>, Box<@u128>) nopanic;
}
struct ThreeFields {
x: felt252,
y: u8,
z: u128,
}
struct OneField {
x: u8,
}
struct EmptyStruct { }
fn deconstruct_struct_3_fields() -> (Box<felt252>, Box<u8>, Box<u128>) {
decons_3_fields::struct_boxed_deconstruct(BoxTrait::new(ThreeFields {x: 2, y: 2, z: 2}))
}
fn deconstruct_struct_1_field() -> (Box<u8>,) {
decons_1_field::struct_boxed_deconstruct(BoxTrait::new(OneField {x: 2}))
}
fn deconstruct_empty_struct() -> () {
decons_empty_struct::struct_boxed_deconstruct(BoxTrait::new(EmptyStruct { }))
}
fn deconstruct_struct_snapshot() -> (Box<@felt252>, Box<@u8>, Box<@u128>) {
decons_struct_snapshot::struct_boxed_deconstruct(BoxTrait::new(ThreeFields {x: 2, y: 2, z: 2}))
}
};
}
#[test]
fn boxed_deconstruct_3_fields() {
run_program_assert_output(
&BOXED_DECONSTRUCT_PROGRAM,
"deconstruct_struct_3_fields",
&[],
jit_struct!(Value::Felt252(2.into()), Value::Uint8(2), Value::Uint128(2)),
);
}
#[test]
fn boxed_deconstruct_1_field() {
run_program_assert_output(
&BOXED_DECONSTRUCT_PROGRAM,
"deconstruct_struct_1_field",
&[],
jit_struct!(Value::Uint8(2)),
);
}
#[test]
fn boxed_deconstruct_empty_struct() {
run_program_assert_output(
&BOXED_DECONSTRUCT_PROGRAM,
"deconstruct_empty_struct",
&[],
jit_struct!(),
);
}
#[test]
fn boxed_deconstruct_struct_snapshot() {
run_program_assert_output(
&BOXED_DECONSTRUCT_PROGRAM,
"deconstruct_struct_snapshot",
&[],
jit_struct!(Value::Felt252(2.into()), Value::Uint8(2), Value::Uint128(2)),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/circuit.rs | src/libfuncs/circuit.rs | //! # Circuit libfuncs
//!
//! Relevant casm code: https://github.com/starkware-libs/cairo/blob/v2.10.0/crates/cairo-lang-sierra-to-casm/src/invocations/circuit.rs
use super::{increment_builtin_counter_by, LibfuncHelper};
use crate::{
error::{panic::ToNativeAssertError, Result, SierraAssertError},
execution_result::{ADD_MOD_BUILTIN_SIZE, MUL_MOD_BUILTIN_SIZE, RANGE_CHECK96_BUILTIN_SIZE},
libfuncs::r#struct::build_struct_value,
metadata::{
drop_overrides::DropOverridesMeta,
realloc_bindings::ReallocBindingsMeta,
runtime_bindings::{CircuitArithOperationType, RuntimeBindingsMeta},
MetadataStorage,
},
native_panic,
types::{circuit::build_u384_struct_type, TypeBuilder},
utils::{get_integer_layout, layout_repeat, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
circuit::{
self, CircuitConcreteLibfunc, CircuitTypeConcrete, ConcreteGetOutputLibFunc,
ConcreteU96LimbsLessThanGuaranteeVerifyLibfunc, MOD_BUILTIN_INSTANCE_SIZE, VALUE_SIZE,
},
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
lib_func::{SignatureAndTypeConcreteLibfunc, SignatureOnlyConcreteLibfunc},
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf, llvm,
},
helpers::{ArithBlockExt, BuiltinBlockExt, GepIndex, LlvmBlockExt},
ir::{r#type::IntegerType, Block, BlockLike, Location, Type, Value},
Context,
};
use num_traits::Signed;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &CircuitConcreteLibfunc,
) -> Result<()> {
match selector {
CircuitConcreteLibfunc::AddInput(info) => {
build_add_input(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::Eval(info) => {
build_eval(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::GetDescriptor(info) => {
build_get_descriptor(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::InitCircuitData(info) => {
build_init_circuit_data(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::GetOutput(info) => {
build_get_output(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::TryIntoCircuitModulus(info) => build_try_into_circuit_modulus(
context, registry, entry, location, helper, metadata, info,
),
CircuitConcreteLibfunc::FailureGuaranteeVerify(info) => build_failure_guarantee_verify(
context, registry, entry, location, helper, metadata, info,
),
CircuitConcreteLibfunc::IntoU96Guarantee(info) => {
build_into_u96_guarantee(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::U96SingleLimbLessThanGuaranteeVerify(info) => {
build_u96_single_limb_less_than_guarantee_verify(
context, registry, entry, location, helper, metadata, info,
)
}
CircuitConcreteLibfunc::U96GuaranteeVerify(info) => {
build_u96_guarantee_verify(context, registry, entry, location, helper, metadata, info)
}
CircuitConcreteLibfunc::U96LimbsLessThanGuaranteeVerify(info) => {
build_u96_limbs_less_than_guarantee_verify(
context, registry, entry, location, helper, metadata, info,
)
}
}
}
/// Generate MLIR operations for the `init_circuit_data` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_init_circuit_data<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let circuit_info = match registry.get_type(&info.ty)? {
CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(info)) => &info.circuit_info,
_ => return Err(SierraAssertError::BadTypeInfo.into()),
};
let rc = increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
circuit_info.rc96_usage(),
)?;
// Calculate full capacity for array.
let capacity = circuit_info.n_inputs;
let u384_layout = get_integer_layout(384);
let capacity_bytes = layout_repeat(&u384_layout, capacity)?
.0
.pad_to_align()
.size();
let capacity_bytes_value = entry.const_int(context, location, capacity_bytes, 64)?;
// Alloc memory for array.
let ptr_ty = llvm::r#type::pointer(context, 0);
let ptr = entry.append_op_result(llvm::zero(ptr_ty, location))?;
let ptr = entry.append_op_result(ReallocBindingsMeta::realloc(
context,
ptr,
capacity_bytes_value,
location,
)?)?;
// Create accumulator struct.
let k0 = entry.const_int(context, location, 0, 64)?;
let accumulator_ty = &info.branch_signatures()[0].vars[1].ty;
let accumulator = build_struct_value(
context,
registry,
entry,
location,
helper,
metadata,
accumulator_ty,
&[k0, ptr],
)?;
helper.br(entry, 0, &[rc, accumulator], location)
}
/// Generate MLIR operations for the `add_circuit_input` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_add_input<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let n_inputs = match registry.get_type(&info.ty)? {
CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(info)) => info.circuit_info.n_inputs,
_ => return Err(SierraAssertError::BadTypeInfo.into()),
};
let accumulator: Value = entry.arg(0)?;
// Get accumulator current length
let current_length = entry.extract_value(
context,
location,
accumulator,
IntegerType::new(context, 64).into(),
0,
)?;
// Calculate next length: next_length = current_length + 1
let k1 = entry.const_int(context, location, 1, 64)?;
let next_length = entry.addi(current_length, k1, location)?;
// Insert next_length into accumulator
let accumulator = entry.insert_value(context, location, accumulator, next_length, 0)?;
// Get pointer to inputs array
let inputs_ptr = entry.extract_value(
context,
location,
accumulator,
llvm::r#type::pointer(context, 0),
1,
)?;
// Get pointer to next input to insert
let next_input_ptr = entry.gep(
context,
location,
inputs_ptr,
&[GepIndex::Value(current_length)],
IntegerType::new(context, 384).into(),
)?;
// Interpret u384 struct (input) as u384 integer
let u384_struct = entry.arg(1)?;
let new_input = u384_struct_to_integer(context, entry, location, u384_struct)?;
// Store the u384 into next input pointer
entry.store(context, location, next_input_ptr, new_input)?;
// Check if last_insert: next_length == number_of_inputs
let n_inputs = entry.const_int(context, location, n_inputs, 64)?;
let last_insert = entry.cmpi(
context,
arith::CmpiPredicate::Eq,
next_length,
n_inputs,
location,
)?;
let middle_insert_block = helper.append_block(Block::new(&[]));
let last_insert_block = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
last_insert,
last_insert_block,
middle_insert_block,
&[],
&[],
location,
));
// If not last insert, then return accumulator
{
helper.br(middle_insert_block, 1, &[accumulator], location)?;
}
// If is last insert, then return accumulator.pointer
{
// Get pointer to inputs array
let inputs_ptr = last_insert_block.extract_value(
context,
location,
accumulator,
llvm::r#type::pointer(context, 0),
1,
)?;
helper.br(last_insert_block, 0, &[inputs_ptr], location)?;
}
Ok(())
}
/// Generate MLIR operations for the `try_into_circuit_modulus` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_try_into_circuit_modulus<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let modulus = u384_struct_to_integer(context, entry, location, entry.arg(0)?)?;
let k1 = entry.const_int(context, location, 1, 384)?;
let is_valid = entry.cmpi(context, arith::CmpiPredicate::Ugt, modulus, k1, location)?;
helper.cond_br(
context,
entry,
is_valid,
[0, 1],
[&[modulus], &[]],
location,
)
}
/// Generate MLIR operations for the `get_circuit_descriptor` libfunc.
/// NOOP
#[allow(clippy::too_many_arguments)]
fn build_get_descriptor<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let descriptor_type_id = &info.branch_signatures()[0].vars[0].ty;
let descriptor_type = registry.build_type(context, helper, metadata, descriptor_type_id)?;
let unit = entry.append_op_result(llvm::undef(descriptor_type, location))?;
helper.br(entry, 0, &[unit], location)
}
/// Generate MLIR operations for the `eval_circuit` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_eval<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let circuit_info = match registry.get_type(&info.ty)? {
CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(info)) => &info.circuit_info,
_ => return Err(SierraAssertError::BadTypeInfo.into()),
};
let add_mod = entry.arg(0)?;
let mul_mod = entry.arg(1)?;
let circuit_data = entry.arg(3)?;
let circuit_modulus = entry.arg(4)?;
// Arguments 5 and 6 are used to build the gate 0 (with constant value 1).
// let zero = entry.argument(5)?;
// let one = entry.argument(6)?;
// Always increase the add mod builtin pointer, regardless of the evaluation result.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/circuit.rs?plain=1#L257
let add_mod = increment_builtin_counter_by(
context,
entry,
location,
add_mod,
circuit_info.add_offsets.len() * ADD_MOD_BUILTIN_SIZE,
)?;
let ([ok_block, err_block], gates) = build_gate_evaluation(
context,
entry,
location,
helper,
metadata,
circuit_info,
circuit_data,
circuit_modulus,
)?;
// Ok case
{
// We drop circuit_data, as its consumed by this libfunc.
DropOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
ok_block,
location,
metadata,
&info.signature.param_signatures[3].ty,
circuit_data,
)?;
// Increase the mul mod builtin pointer by the number of evaluated gates.
// If the evaluation succedes, then we assume that every gate was evaluated.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/circuit.rs?plain=1#L261
let mul_mod = increment_builtin_counter_by(
context,
ok_block,
location,
mul_mod,
circuit_info.mul_offsets.len() * MUL_MOD_BUILTIN_SIZE,
)?;
// Calculate capacity for array.
let outputs_capacity = circuit_info.values.len();
let u384_integer_layout = get_integer_layout(384);
let outputs_layout = layout_repeat(&u384_integer_layout, outputs_capacity)?.0;
let outputs_capacity_bytes = outputs_layout.pad_to_align().size();
let outputs_capacity_bytes_value =
ok_block.const_int(context, location, outputs_capacity_bytes, 64)?;
// Alloc memory for array.
let ptr_ty = llvm::r#type::pointer(context, 0);
let outputs_ptr = ok_block.append_op_result(llvm::zero(ptr_ty, location))?;
let outputs_ptr = ok_block.append_op_result(ReallocBindingsMeta::realloc(
context,
outputs_ptr,
outputs_capacity_bytes_value,
location,
)?)?;
// Insert evaluated gates into the array.
for (i, gate) in gates.into_iter().enumerate() {
let value_ptr = ok_block.gep(
context,
location,
outputs_ptr,
&[GepIndex::Const(i as i32)],
IntegerType::new(context, 384).into(),
)?;
ok_block.store(context, location, value_ptr, gate)?;
}
let modulus_struct = u384_integer_to_struct(context, ok_block, location, circuit_modulus)?;
// Build output struct
let outputs_type_id = &info.branch_signatures()[0].vars[2].ty;
let outputs = build_struct_value(
context,
registry,
ok_block,
location,
helper,
metadata,
outputs_type_id,
&[outputs_ptr, modulus_struct],
)?;
helper.br(ok_block, 0, &[add_mod, mul_mod, outputs], location)?;
}
// Error case
{
// We drop circuit_data, as its consumed by this libfunc.
DropOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
err_block,
location,
metadata,
&info.signature.param_signatures[3].ty,
circuit_data,
)?;
// We only consider mul gates evaluated before failure
// Increase the mul mod builtin pointer by the number of evaluated gates.
// As the evaluation failed, we read the number of evaluated gates from
// the first argument of the error block.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/circuit.rs?plain=1#L261
let mul_mod = {
let mul_mod_usage = err_block.muli(
err_block.arg(0)?,
err_block.const_int(context, location, MUL_MOD_BUILTIN_SIZE, 64)?,
location,
)?;
err_block.addi(mul_mod, mul_mod_usage, location)
}?;
let partial_type_id = &info.branch_signatures()[1].vars[2].ty;
let partial = err_block.append_op_result(llvm::undef(
registry.build_type(context, helper, metadata, partial_type_id)?,
location,
))?;
let failure_type_id = &info.branch_signatures()[1].vars[3].ty;
let failure = err_block.append_op_result(llvm::undef(
registry.build_type(context, helper, metadata, failure_type_id)?,
location,
))?;
helper.br(
err_block,
1,
&[add_mod, mul_mod, partial, failure],
location,
)?;
}
Ok(())
}
/// Receives the circuit inputs, and builds the evaluation of the full circuit.
///
/// Returns two branches. The success block and the error block respectively.
/// - The success block receives nothing.
/// - The error block receives:
/// - The index of the first gate that could not be computed.
///
/// The evaluated gates are returned separately, as a vector of `MLIR` values.
/// Note that in the case of error, not all MLIR values are guaranteed to have been computed,
/// and should not be used carelessly.
///
/// TODO: Consider returning the evaluated gates through the block directly:
/// - As a pointer to a heap allocated array of gates.
/// - As a llvm struct/array of evaluted gates (its size could get really big).
/// - As arguments to the block (one argument per block).
///
/// The original Cairo hint evaluates all gates, even in case of failure.
/// This implementation exits on first error, as there is no need for the partial outputs yet.
#[allow(clippy::too_many_arguments)]
fn build_gate_evaluation<'ctx, 'this>(
context: &'this Context,
mut block: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
circuit_info: &circuit::CircuitInfo,
circuit_data: Value<'ctx, 'ctx>,
circuit_modulus: Value<'ctx, 'ctx>,
) -> Result<([&'this Block<'ctx>; 2], Vec<Value<'ctx, 'ctx>>)> {
let runtime_bindings_meta = metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("Unable to get the RuntimeBindingsMeta from MetadataStorage")?;
// Each gate is represented as a MLIR value, and identified by an offset in the gate vector.
// - `None` implies that the gate value *has not* been compiled yet.
// - `Some` implies that the gate values *has* already been compiled, and therefore can be safely used.
// Initially, some gate values are already known.
let mut gates = vec![None; 1 + circuit_info.n_inputs + circuit_info.values.len()];
// The first gate always has a value of 1. It is implicity referred by some gate offsets.
gates[0] = Some(block.const_int(context, location, 1, 384)?);
let u384_type = IntegerType::new(context, 384).into();
// The input gates are also known at the start. We take them from the `circuit_data` array.
for i in 0..circuit_info.n_inputs {
let value_ptr = block.gep(
context,
location,
circuit_data,
&[GepIndex::Const(i as i32)],
u384_type,
)?;
gates[i + 1] = Some(block.load(context, location, value_ptr, u384_type)?);
}
let err_block = helper.append_block(Block::new(&[(
IntegerType::new(context, 64).into(),
location,
)]));
let ok_block = helper.append_block(Block::new(&[]));
let mut add_offsets = circuit_info.add_offsets.iter().peekable();
let mut mul_offsets = circuit_info.mul_offsets.iter().enumerate();
// We loop until all gates have been solved
loop {
// We iterate the add gate offsets as long as we can
while let Some(&gate_offset) = add_offsets.peek() {
let lhs_value = gates[gate_offset.lhs].to_owned();
let rhs_value = gates[gate_offset.rhs].to_owned();
let output_value = gates[gate_offset.output].to_owned();
// Depending on the values known at the time, we can deduce if we are dealing with an ADD gate or a SUB gate.
match (lhs_value, rhs_value, output_value) {
// ADD: lhs + rhs = out
(Some(lhs_value), Some(rhs_value), None) => {
let value = runtime_bindings_meta.circuit_arith_operation(
context,
helper.module,
block,
location,
CircuitArithOperationType::Add,
lhs_value,
rhs_value,
circuit_modulus,
)?;
gates[gate_offset.output] = Some(value);
}
// SUB: lhs = out - rhs
(None, Some(rhs_value), Some(output_value)) => {
let value = runtime_bindings_meta.circuit_arith_operation(
context,
helper.module,
block,
location,
CircuitArithOperationType::Sub,
output_value,
rhs_value,
circuit_modulus,
)?;
gates[gate_offset.lhs] = Some(value);
}
// We can't solve this add gate yet, so we break from the loop
_ => break,
}
add_offsets.next();
}
// If we can't advance any more with add gate offsets, then we solve the next mul gate offset and go back to the start of the loop (solving add gate offsets).
if let Some((gate_offset_idx, gate_offset)) = mul_offsets.next() {
let lhs_value = gates[gate_offset.lhs].to_owned();
let rhs_value = gates[gate_offset.rhs].to_owned();
let output_value = gates[gate_offset.output].to_owned();
// Depending on the values known at the time, we can deduce if we are dealing with an MUL gate or a INV gate.
match (lhs_value, rhs_value, output_value) {
// MUL: lhs * rhs = out
(Some(lhs_value), Some(rhs_value), None) => {
let value = runtime_bindings_meta.circuit_arith_operation(
context,
helper.module,
block,
location,
CircuitArithOperationType::Mul,
lhs_value,
rhs_value,
circuit_modulus,
)?;
gates[gate_offset.output] = Some(value)
}
// INV: lhs = 1 / rhs
(None, Some(rhs_value), Some(_)) => {
// Apply egcd to find gcd and inverse
let euclidean_result = runtime_bindings_meta.extended_euclidean_algorithm(
context,
helper.module,
block,
location,
rhs_value,
circuit_modulus,
)?;
// Extract the values from the result struct
let gcd =
block.extract_value(context, location, euclidean_result, u384_type, 0)?;
let inverse =
block.extract_value(context, location, euclidean_result, u384_type, 1)?;
// if the gcd is not 1, then fail (a and b are not coprimes)
let one = block.const_int_from_type(context, location, 1, u384_type)?;
let gate_offset_idx_value = block.const_int_from_type(
context,
location,
gate_offset_idx,
IntegerType::new(context, 64).into(),
)?;
let has_inverse = block.cmpi(context, CmpiPredicate::Eq, gcd, one, location)?;
let has_inverse_block = helper.append_block(Block::new(&[]));
block.append_operation(cf::cond_br(
context,
has_inverse,
has_inverse_block,
err_block,
&[],
&[gate_offset_idx_value],
location,
));
block = has_inverse_block;
// if the inverse is negative, then add modulus
let zero = block.const_int_from_type(context, location, 0, u384_type)?;
let is_negative = block
.append_operation(arith::cmpi(
context,
CmpiPredicate::Slt,
inverse,
zero,
location,
))
.result(0)?
.into();
let wrapped_inverse = block.addi(inverse, circuit_modulus, location)?;
let inverse = block.append_op_result(arith::select(
is_negative,
wrapped_inverse,
inverse,
location,
))?;
gates[gate_offset.lhs] = Some(inverse);
}
// The imposibility to solve this mul gate offset would render the circuit unsolvable
_ => return Err(SierraAssertError::ImpossibleCircuit.into()),
}
} else {
// If there are no mul gate offsets left, then we have the finished evaluation.
break;
}
}
block.append_operation(cf::br(ok_block, &[], location));
// Validate all values have been calculated
// Should only fail if the circuit is not solvable (bad form)
let evaluated_gates = gates
.into_iter()
.skip(1 + circuit_info.n_inputs)
.collect::<Option<Vec<Value>>>()
.ok_or(SierraAssertError::ImpossibleCircuit)?;
Ok(([ok_block, err_block], evaluated_gates))
}
/// Generate MLIR operations for the `circuit_failure_guarantee_verify` libfunc.
/// NOOP
#[allow(clippy::too_many_arguments)]
fn build_failure_guarantee_verify<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let rc = entry.arg(0)?;
let mul_mod = entry.arg(1)?;
let rc = increment_builtin_counter_by(context, entry, location, rc, 2 + VALUE_SIZE)?;
let mul_mod =
increment_builtin_counter_by(context, entry, location, mul_mod, MOD_BUILTIN_INSTANCE_SIZE)?;
let guarantee_type_id = &info.branch_signatures()[0].vars[2].ty;
let guarantee_type = registry.build_type(context, helper, metadata, guarantee_type_id)?;
let guarantee = entry.append_op_result(llvm::undef(guarantee_type, location))?;
helper.br(entry, 0, &[rc, mul_mod, guarantee], location)
}
/// Generate MLIR operations for the `u96_limbs_less_than_guarantee_verify` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_u96_limbs_less_than_guarantee_verify<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &ConcreteU96LimbsLessThanGuaranteeVerifyLibfunc,
) -> Result<()> {
let guarantee = entry.arg(0)?;
let limb_count = info.limb_count;
let u96_type = IntegerType::new(context, 96).into();
let limb_struct_type = llvm::r#type::r#struct(context, &vec![u96_type; limb_count], false);
// extract gate and modulus from input value
let gate = entry.extract_value(context, location, guarantee, limb_struct_type, 0)?;
let modulus = entry.extract_value(context, location, guarantee, limb_struct_type, 1)?;
// extract last limb from gate and modulus
let gate_last_limb = entry.extract_value(context, location, gate, u96_type, limb_count - 1)?;
let modulus_last_limb =
entry.extract_value(context, location, modulus, u96_type, limb_count - 1)?;
// calcualte diff between limbs
let diff = entry.append_op_result(arith::subi(modulus_last_limb, gate_last_limb, location))?;
let k0 = entry.const_int_from_type(context, location, 0, u96_type)?;
let has_diff = entry.cmpi(context, CmpiPredicate::Ne, diff, k0, location)?;
let diff_block = helper.append_block(Block::new(&[]));
let next_block = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
has_diff,
diff_block,
next_block,
&[],
&[],
location,
));
{
// if there is diff, return it
helper.br(diff_block, 1, &[diff], location)?;
}
{
// if there is no diff, build a new guarantee, skipping last limb
let new_limb_struct_type =
llvm::r#type::r#struct(context, &vec![u96_type; limb_count - 1], false);
let new_gate = build_array_slice(
context,
next_block,
location,
gate,
u96_type,
new_limb_struct_type,
0,
limb_count - 1,
)?;
let new_modulus = build_array_slice(
context,
next_block,
location,
modulus,
u96_type,
new_limb_struct_type,
0,
limb_count - 1,
)?;
let guarantee_type_id = &info.branch_signatures()[0].vars[0].ty;
let new_guarantee = build_struct_value(
context,
registry,
next_block,
location,
helper,
metadata,
guarantee_type_id,
&[new_gate, new_modulus],
)?;
helper.br(next_block, 0, &[new_guarantee], location)?;
}
Ok(())
}
fn build_u96_single_limb_less_than_guarantee_verify<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let guarantee = entry.arg(0)?;
let u96_type = IntegerType::new(context, 96).into();
// this libfunc will always receive gate and modulus with single limb
let limb_struct_type = llvm::r#type::r#struct(context, &[u96_type; 1], false);
// extract gate and modulus from input value
let gate = entry.extract_value(context, location, guarantee, limb_struct_type, 0)?;
let modulus = entry.extract_value(context, location, guarantee, limb_struct_type, 1)?;
// extract the only limb from gate and modulus
let gate_limb = entry.extract_value(context, location, gate, u96_type, 0)?;
let modulus_limb = entry.extract_value(context, location, modulus, u96_type, 0)?;
// calcualte diff between limbs
let diff = entry.append_op_result(arith::subi(modulus_limb, gate_limb, location))?;
helper.br(entry, 0, &[diff], location)
}
/// Generate MLIR operations for the `get_circuit_output` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_get_output<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &ConcreteGetOutputLibFunc,
) -> Result<()> {
let circuit_info = match registry.get_type(&info.circuit_ty)? {
CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(info)) => &info.circuit_info,
_ => return Err(SierraAssertError::BadTypeInfo.into()),
};
let output_type_id = &info.output_ty;
let u384_type = IntegerType::new(context, 384).into();
let output_offset_idx = *circuit_info
.values
.get(output_type_id)
.ok_or(SierraAssertError::BadTypeInfo)?;
let output_idx = output_offset_idx - circuit_info.n_inputs - 1;
let outputs = entry.arg(0)?;
let circuit_ptr = entry.extract_value(
context,
location,
outputs,
llvm::r#type::pointer(context, 0),
0,
)?;
let modulus_struct = entry.extract_value(
context,
location,
outputs,
build_u384_struct_type(context),
1,
)?;
let output_integer_ptr = entry.gep(
context,
location,
circuit_ptr,
&[GepIndex::Const(output_idx as i32)],
u384_type,
)?;
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/array.rs | src/libfuncs/array.rs | //! # Array libfuncs
use super::LibfuncHelper;
use crate::{
error::{Error, Result, SierraAssertError},
metadata::{
drop_overrides::DropOverridesMeta, dup_overrides::DupOverridesMeta,
realloc_bindings::ReallocBindingsMeta, MetadataStorage,
},
native_assert,
types::array::calc_data_prefix_offset,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
array::{ArrayConcreteLibfunc, ConcreteMultiPopLibfunc},
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
lib_func::{SignatureAndTypeConcreteLibfunc, SignatureOnlyConcreteLibfunc},
types::InfoAndTypeConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf, llvm, ods, scf,
},
helpers::{ArithBlockExt, BuiltinBlockExt, GepIndex, LlvmBlockExt},
ir::{
attribute::IntegerAttribute, r#type::IntegerType, Block, BlockLike, Location, Region, Value,
},
Context,
};
use std::alloc::Layout;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &ArrayConcreteLibfunc,
) -> Result<()> {
match selector {
ArrayConcreteLibfunc::New(info) => {
build_new(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::SpanFromTuple(info) => {
build_span_from_tuple(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::TupleFromSpan(info) => {
build_tuple_from_span(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::Append(info) => {
build_append(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::PopFront(info) => build_pop::<false, false>(
context,
registry,
entry,
location,
helper,
metadata,
PopInfo::Single(info),
),
ArrayConcreteLibfunc::PopFrontConsume(info) => build_pop::<true, false>(
context,
registry,
entry,
location,
helper,
metadata,
PopInfo::Single(info),
),
ArrayConcreteLibfunc::Get(info) => {
build_get(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::Slice(info) => {
build_slice(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::Len(info) => {
build_len(context, registry, entry, location, helper, metadata, info)
}
ArrayConcreteLibfunc::SnapshotPopFront(info) => build_pop::<false, false>(
context,
registry,
entry,
location,
helper,
metadata,
PopInfo::Single(info),
),
ArrayConcreteLibfunc::SnapshotPopBack(info) => build_pop::<false, true>(
context,
registry,
entry,
location,
helper,
metadata,
PopInfo::Single(info),
),
ArrayConcreteLibfunc::SnapshotMultiPopFront(info) => build_pop::<false, false>(
context,
registry,
entry,
location,
helper,
metadata,
PopInfo::Multi(info),
),
ArrayConcreteLibfunc::SnapshotMultiPopBack(info) => build_pop::<false, true>(
context,
registry,
entry,
location,
helper,
metadata,
PopInfo::Multi(info),
),
}
}
/// Builds a new array with no initial capacity.
///
/// # Cairo Signature
///
/// ```cairo
/// extern fn array_new<T>() -> Array<T> nopanic;
/// ```
pub fn build_new<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let ptr_ty = llvm::r#type::pointer(context, 0);
let len_ty = IntegerType::new(context, 32).into();
let nullptr = entry.append_op_result(llvm::zero(ptr_ty, location))?;
let k0 = entry.const_int_from_type(context, location, 0, len_ty)?;
let value = entry.append_op_result(llvm::undef(
llvm::r#type::r#struct(context, &[ptr_ty, len_ty, len_ty, len_ty], false),
location,
))?;
let value = entry.insert_values(context, location, value, &[nullptr, k0, k0, k0])?;
helper.br(entry, 0, &[value], location)
}
/// Buils a span (a cairo native array) from a boxed tuple of same-type elements.
///
/// Note: The `&info.ty` field has the entire `[T; N]` tuple. It is not the `T` in `Array<T>`.
///
/// # Cairo Signature
///
/// ```cairo
/// extern fn span_from_tuple<T, impl Info: FixedSizedArrayInfo<T>>(
/// struct_like: Box<@T>
/// ) -> @Array<Info::Element> nopanic;
/// ```
pub fn build_span_from_tuple<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
metadata.get_or_insert_with(|| ReallocBindingsMeta::new(context, helper));
let tuple_len = {
let CoreTypeConcrete::Struct(info) = registry.get_type(&info.ty)? else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
info.members.len()
};
let ptr_ty = llvm::r#type::pointer(context, 0);
let len_ty = IntegerType::new(context, 32).into();
let (_, tuple_layout) = registry.build_type_with_layout(context, helper, metadata, &info.ty)?;
let array_len_bytes = tuple_layout.pad_to_align().size();
let array_len_bytes_with_offset = entry.const_int(
context,
location,
array_len_bytes + calc_data_prefix_offset(tuple_layout),
64,
)?;
let array_len_bytes = entry.const_int(context, location, array_len_bytes, 64)?;
let array_len = entry.const_int_from_type(context, location, tuple_len, len_ty)?;
let k0 = entry.const_int_from_type(context, location, 0, len_ty)?;
let k1 = entry.const_int_from_type(context, location, 1, len_ty)?;
// Allocate space for the array.
let allocation_ptr = entry.append_op_result(llvm::zero(ptr_ty, location))?;
let allocation_ptr = entry.append_op_result(ReallocBindingsMeta::realloc(
context,
allocation_ptr,
array_len_bytes_with_offset,
location,
)?)?;
// Write the array data prefix.
let data_prefix = entry.append_op_result(llvm::undef(
llvm::r#type::r#struct(context, &[len_ty, len_ty], false),
location,
))?;
let data_prefix = entry.insert_values(context, location, data_prefix, &[k1, array_len])?;
entry.store(context, location, allocation_ptr, data_prefix)?;
let array_ptr = entry.gep(
context,
location,
allocation_ptr,
&[GepIndex::Const(calc_data_prefix_offset(tuple_layout) as i32)],
IntegerType::new(context, 8).into(),
)?;
// Move the data into the array and free the original tuple. Since the tuple and the array are
// represented the same way, a simple memcpy is enough.
entry.memcpy(
context,
location,
entry.argument(0)?.into(),
array_ptr,
array_len_bytes,
);
entry.append_operation(ReallocBindingsMeta::free(
context,
entry.argument(0)?.into(),
location,
)?);
// Build the array representation.
let k8 = entry.const_int(context, location, 8, 64)?;
let array_ptr_ptr =
entry.append_op_result(llvm::zero(llvm::r#type::pointer(context, 0), location))?;
let array_ptr_ptr: Value<'ctx, '_> = entry.append_op_result(ReallocBindingsMeta::realloc(
context,
array_ptr_ptr,
k8,
location,
)?)?;
entry.store(context, location, array_ptr_ptr, array_ptr)?;
let value = entry.append_op_result(llvm::undef(
llvm::r#type::r#struct(context, &[ptr_ty, len_ty, len_ty, len_ty], false),
location,
))?;
let value = entry.insert_values(
context,
location,
value,
&[array_ptr_ptr, k0, array_len, array_len],
)?;
helper.br(entry, 0, &[value], location)
}
/// Buils a tuple (struct) from an span (a cairo native array)
///
/// Note: The `&info.ty` field has the entire `[T; N]` tuple. It is not the `T` in `Array<T>`.
/// The tuple size `N` must match the span length.
///
/// # Cairo Signature
///
/// ```cairo
/// fn tuple_from_span<T, impl Info: FixedSizedArrayInfo<T>>(span: @Array<Info::Element> -> Option<@Box<T>> nopanic;
/// ```
pub fn build_tuple_from_span<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
metadata.get_or_insert_with(|| ReallocBindingsMeta::new(context, helper));
let elem_id = {
let CoreTypeConcrete::Snapshot(info) =
registry.get_type(&info.signature.param_signatures[0].ty)?
else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
let CoreTypeConcrete::Array(info) = registry.get_type(&info.ty)? else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
&info.ty
};
let tuple_len_const = {
let CoreTypeConcrete::Struct(param) = registry.get_type(&info.ty)? else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
param.members.len()
};
let ptr_ty = llvm::r#type::pointer(context, 0);
let len_ty = IntegerType::new(context, 32).into();
let (_, elem_layout) = registry.build_type_with_layout(context, helper, metadata, elem_id)?;
let (tuple_ty, tuple_layout) =
registry.build_type_with_layout(context, helper, metadata, &info.ty)?;
let array_ptr_ptr =
entry.extract_value(context, location, entry.argument(0)?.into(), ptr_ty, 0)?;
let array_start =
entry.extract_value(context, location, entry.argument(0)?.into(), len_ty, 1)?;
let array_end = entry.extract_value(context, location, entry.argument(0)?.into(), len_ty, 2)?;
let array_len = entry.append_op_result(arith::subi(array_end, array_start, location))?;
let tuple_len = entry.const_int_from_type(context, location, tuple_len_const, len_ty)?;
let len_matches = entry.append_op_result(arith::cmpi(
context,
CmpiPredicate::Eq,
array_len,
tuple_len,
location,
))?;
// Ensure the tuple's length matches the array's.
let valid_block = helper.append_block(Block::new(&[]));
let error_block = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
len_matches,
valid_block,
error_block,
&[],
&[],
location,
));
// Ensure the type's clone and drop implementations are registered.
registry.build_type(
context,
helper,
metadata,
&info.signature.param_signatures[0].ty,
)?;
// Branch for when the lengths match:
{
let value_size = valid_block.const_int(context, location, tuple_layout.size(), 64)?;
let value = valid_block.append_op_result(llvm::zero(ptr_ty, location))?;
let value = valid_block.append_op_result(ReallocBindingsMeta::realloc(
context, value, value_size, location,
)?)?;
let array_start_offset = valid_block.append_op_result(arith::extui(
array_start,
IntegerType::new(context, 64).into(),
location,
))?;
let array_start_offset = valid_block.append_op_result(arith::muli(
array_start_offset,
valid_block.const_int(context, location, elem_layout.pad_to_align().size(), 64)?,
location,
))?;
let array_ptr = valid_block.load(context, location, array_ptr_ptr, ptr_ty)?;
let array_data_start_ptr = valid_block.gep(
context,
location,
array_ptr,
&[GepIndex::Value(array_start_offset)],
IntegerType::new(context, 8).into(),
)?;
// Check if the array is shared.
let is_shared = is_shared(context, valid_block, location, array_ptr_ptr, elem_layout)?;
valid_block.append_operation(scf::r#if(
is_shared,
&[],
{
// When the array is shared, we should clone the entire data.
let region = Region::new();
let block = region.append_block(Block::new(&[]));
if DupOverridesMeta::is_overriden(metadata, &info.ty) {
let src_ptr = array_data_start_ptr;
let dst_ptr = value;
let value = block.load(context, location, src_ptr, tuple_ty)?;
// Invoke the tuple's clone mechanism, which will take care of copying or
// cloning each item in the array.
let values = DupOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
&block,
location,
metadata,
&info.ty,
value,
)?;
block.store(context, location, src_ptr, values.0)?;
block.store(context, location, dst_ptr, values.1)?;
} else {
block.memcpy(context, location, array_data_start_ptr, value, value_size)
}
// Drop the original array (by decreasing its reference counter).
DropOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
&block,
location,
metadata,
&info.signature.param_signatures[0].ty,
entry.argument(0)?.into(),
)?;
block.append_operation(scf::r#yield(&[], location));
region
},
{
// When the array is not shared, we can just move the data to the new tuple and free
// the array.
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.memcpy(context, location, array_data_start_ptr, value, value_size);
// NOTE: If the target tuple has no elements, and the array is not shared, then we
// would attempt to free 0xfffffffffffffff0. This is not possible and disallowed by
// the Cairo compiler.
// TODO: Drop elements before array_start and between array_end and max length.
let data_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(
-(calc_data_prefix_offset(elem_layout) as i32),
)],
IntegerType::new(context, 8).into(),
)?;
block.append_operation(ReallocBindingsMeta::free(context, data_ptr, location)?);
block.append_operation(ReallocBindingsMeta::free(
context,
array_ptr_ptr,
location,
)?);
block.append_operation(scf::r#yield(&[], location));
region
},
location,
));
helper.br(valid_block, 0, &[value], location)?;
}
{
// When there's a length mismatch, just consume (drop) the array.
DropOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
error_block,
location,
metadata,
&info.signature.param_signatures[0].ty,
entry.argument(0)?.into(),
)?;
helper.br(error_block, 1, &[], location)
}
}
/// Generate MLIR operations for the `array_append` libfunc.
pub fn build_append<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
metadata.get_or_insert_with(|| ReallocBindingsMeta::new(context, helper));
let self_ty = registry.build_type(
context,
helper,
metadata,
&info.signature.param_signatures[0].ty,
)?;
let ptr_ty = llvm::r#type::pointer(context, 0);
let len_ty = IntegerType::new(context, 32).into();
let (_, elem_layout) = registry.build_type_with_layout(context, helper, metadata, &info.ty)?;
let elem_stride = entry.const_int(context, location, elem_layout.pad_to_align().size(), 64)?;
fn compute_next_capacity<'ctx, 'this>(
context: &'ctx Context,
block: &'this Block<'ctx>,
location: Location<'ctx>,
elem_stride: Value<'ctx, 'this>,
array_capacity: Value<'ctx, 'this>,
) -> Result<(Value<'ctx, 'this>, Value<'ctx, 'this>)> {
let len_ty = IntegerType::new(context, 32).into();
let k1 = block.const_int_from_type(context, location, 1, len_ty)?;
let k8 = block.const_int_from_type(context, location, 8, len_ty)?;
let k1024 = block.const_int_from_type(context, location, 1024, len_ty)?;
let realloc_len = block.append_op_result(arith::shli(array_capacity, k1, location))?;
let realloc_len = block.append_op_result(arith::minui(realloc_len, k1024, location))?;
let realloc_len =
block.append_op_result(arith::addi(realloc_len, array_capacity, location))?;
let realloc_len = block.append_op_result(arith::maxui(realloc_len, k8, location))?;
let realloc_size = block.append_op_result(arith::extui(
realloc_len,
IntegerType::new(context, 64).into(),
location,
))?;
let realloc_size =
block.append_op_result(arith::muli(realloc_size, elem_stride, location))?;
Result::Ok((realloc_len, realloc_size))
}
let data_prefix_size = calc_data_prefix_offset(elem_layout);
let array_capacity =
entry.extract_value(context, location, entry.argument(0)?.into(), len_ty, 3)?;
let k0 = entry.const_int_from_type(context, location, 0, len_ty)?;
let is_empty = entry.cmpi(context, CmpiPredicate::Eq, array_capacity, k0, location)?;
let array_obj = entry.append_op_result(scf::r#if(
is_empty,
&[self_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let (array_capacity, realloc_len) =
compute_next_capacity(context, &block, location, elem_stride, array_capacity)?;
let data_prefix_size_value =
block.const_int(context, location, data_prefix_size, 64)?;
let realloc_len = block.addi(realloc_len, data_prefix_size_value, location)?;
let null_ptr = block.append_op_result(llvm::zero(ptr_ty, location))?;
let array_ptr = block.append_op_result(ReallocBindingsMeta::realloc(
context,
null_ptr,
realloc_len,
location,
)?)?;
let k1 = block.const_int_from_type(context, location, 1, len_ty)?;
block.store(context, location, array_ptr, k1)?;
let max_len_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(size_of::<u32>() as i32)],
IntegerType::new(context, 8).into(),
)?;
block.store(context, location, max_len_ptr, k0)?;
let array_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(data_prefix_size as i32)],
IntegerType::new(context, 8).into(),
)?;
let k8 = block.const_int(context, location, 8, 64)?;
let array_ptr_ptr = block.append_op_result(ReallocBindingsMeta::realloc(
context, null_ptr, k8, location,
)?)?;
block.store(context, location, array_ptr_ptr, array_ptr)?;
let array_obj = entry.argument(0)?.into();
let array_obj = block.insert_value(context, location, array_obj, array_ptr_ptr, 0)?;
let array_obj = block.insert_value(context, location, array_obj, array_capacity, 3)?;
block.append_operation(scf::r#yield(&[array_obj], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let array_end =
block.extract_value(context, location, entry.argument(0)?.into(), len_ty, 2)?;
let has_space = block.cmpi(
context,
CmpiPredicate::Ult,
array_end,
array_capacity,
location,
)?;
let array_obj = block.append_op_result(scf::r#if(
has_space,
&[self_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(&[entry.argument(0)?.into()], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let (array_capacity, realloc_len) = compute_next_capacity(
context,
&block,
location,
elem_stride,
array_capacity,
)?;
let data_prefix_size_value =
block.const_int(context, location, data_prefix_size, 64)?;
let realloc_len = block.addi(realloc_len, data_prefix_size_value, location)?;
let array_ptr_ptr = block.extract_value(
context,
location,
entry.argument(0)?.into(),
ptr_ty,
0,
)?;
let array_ptr = block.load(context, location, array_ptr_ptr, ptr_ty)?;
let array_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(-(data_prefix_size as i32))],
IntegerType::new(context, 8).into(),
)?;
let array_ptr = block.append_op_result(ReallocBindingsMeta::realloc(
context,
array_ptr,
realloc_len,
location,
)?)?;
let array_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(data_prefix_size as i32)],
IntegerType::new(context, 8).into(),
)?;
block.store(context, location, array_ptr_ptr, array_ptr)?;
let array_obj = block.insert_value(
context,
location,
entry.argument(0)?.into(),
array_capacity,
3,
)?;
block.append_operation(scf::r#yield(&[array_obj], location));
region
},
location,
))?;
block.append_operation(scf::r#yield(&[array_obj], location));
region
},
location,
))?;
let array_ptr_ptr = entry.extract_value(context, location, array_obj, ptr_ty, 0)?;
let array_ptr = entry.load(context, location, array_ptr_ptr, ptr_ty)?;
// Insert the value.
let target_offset = entry.extract_value(context, location, array_obj, len_ty, 2)?;
let target_offset = entry.extui(
target_offset,
IntegerType::new(context, 64).into(),
location,
)?;
let target_offset = entry.muli(target_offset, elem_stride, location)?;
let target_ptr = entry.gep(
context,
location,
array_ptr,
&[GepIndex::Value(target_offset)],
IntegerType::new(context, 8).into(),
)?;
entry.store(context, location, target_ptr, entry.argument(1)?.into())?;
// Update array.
let k1 = entry.const_int_from_type(context, location, 1, len_ty)?;
let array_end = entry.extract_value(context, location, array_obj, len_ty, 2)?;
let array_end = entry.addi(array_end, k1, location)?;
let array_obj = entry.insert_value(context, location, array_obj, array_end, 2)?;
// Update max length.
let max_len_ptr = entry.gep(
context,
location,
array_ptr,
&[GepIndex::Const(
-((crate::types::array::calc_data_prefix_offset(elem_layout) - size_of::<u32>())
as i32),
)],
IntegerType::new(context, 8).into(),
)?;
entry.store(context, location, max_len_ptr, array_end)?;
helper.br(entry, 0, &[array_obj], location)
}
#[derive(Clone, Copy)]
enum PopInfo<'a> {
Single(&'a SignatureAndTypeConcreteLibfunc),
Multi(&'a ConcreteMultiPopLibfunc),
}
/// Generate MLIR operations for the `array_pop_*` libfuncs.
///
/// Template arguments:
/// - Consume: Whether to consume or not the array on failure.
/// - Reverse: False for front-popping, true for back-popping.
///
/// The `info` argument contains how many items to pop.
fn build_pop<'ctx, 'this, const CONSUME: bool, const REVERSE: bool>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: PopInfo,
) -> Result<()> {
metadata.get_or_insert_with(|| ReallocBindingsMeta::new(context, helper));
let ptr_ty = llvm::r#type::pointer(context, 0);
let len_ty = IntegerType::new(context, 32).into();
let (self_ty, elem_ty, array_obj, extract_len, branch_values) = match info {
PopInfo::Single(info) => (
&info.signature.param_signatures[0].ty,
&info.ty,
entry.argument(0)?.into(),
1,
Vec::new(),
),
PopInfo::Multi(ConcreteMultiPopLibfunc {
popped_ty,
signature,
}) => {
let range_check = super::increment_builtin_counter(
context,
entry,
location,
entry.argument(0)?.into(),
)?;
let CoreTypeConcrete::Snapshot(InfoAndTypeConcreteType { ty, .. }) =
registry.get_type(&signature.param_signatures[1].ty)?
else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
let CoreTypeConcrete::Array(InfoAndTypeConcreteType { ty, .. }) =
registry.get_type(ty)?
else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
let CoreTypeConcrete::Struct(info) = registry.get_type(popped_ty)? else {
return Err(Error::SierraAssert(SierraAssertError::BadTypeInfo));
};
native_assert!(
info.members.iter().all(|member_ty| member_ty == ty),
"output struct type should match the array's type"
);
(
&signature.param_signatures[1].ty,
ty,
entry.argument(1)?.into(),
info.members.len(),
vec![range_check],
)
}
};
let (elem_type, elem_layout) =
registry.build_type_with_layout(context, helper, metadata, elem_ty)?;
let array_start = entry.extract_value(context, location, array_obj, len_ty, 1)?;
let array_end = entry.extract_value(context, location, array_obj, len_ty, 2)?;
let extract_len_value = entry.const_int_from_type(context, location, extract_len, len_ty)?;
let array_len = entry.append_op_result(arith::subi(array_end, array_start, location))?;
let has_enough_data = entry.cmpi(
context,
CmpiPredicate::Ule,
extract_len_value,
array_len,
location,
)?;
let valid_block = helper.append_block(Block::new(&[]));
let error_block = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
has_enough_data,
valid_block,
error_block,
&[],
&[],
location,
));
{
let mut branch_values = branch_values.clone();
let array_ptr_ptr = valid_block.extract_value(context, location, array_obj, ptr_ty, 0)?;
let array_ptr = valid_block.load(context, location, array_ptr_ptr, ptr_ty)?;
let elem_stride =
valid_block.const_int(context, location, elem_layout.pad_to_align().size(), 64)?;
// Extract pointer and update bounds.
let (array_obj, source_ptr) = if REVERSE {
let array_end = valid_block.append_op_result(arith::subi(
array_end,
extract_len_value,
location,
))?;
let array_obj = valid_block.insert_value(context, location, array_obj, array_end, 2)?;
// Compute data offset (elem_stride * array_end) and GEP.
let data_offset =
valid_block.extui(array_end, IntegerType::new(context, 64).into(), location)?;
let data_offset = valid_block.muli(elem_stride, data_offset, location)?;
let data_ptr = valid_block.gep(
context,
location,
array_ptr,
&[GepIndex::Value(data_offset)],
IntegerType::new(context, 8).into(),
)?;
(array_obj, data_ptr)
} else {
// Compute data offset (elem_stride * array_end) and GEP.
let data_offset =
valid_block.extui(array_start, IntegerType::new(context, 64).into(), location)?;
let data_offset = valid_block.muli(elem_stride, data_offset, location)?;
let data_ptr = valid_block.gep(
context,
location,
array_ptr,
&[GepIndex::Value(data_offset)],
IntegerType::new(context, 8).into(),
)?;
let array_start = valid_block.append_op_result(arith::addi(
array_start,
extract_len_value,
location,
))?;
let array_obj =
valid_block.insert_value(context, location, array_obj, array_start, 1)?;
(array_obj, data_ptr)
};
// Allocate output pointer.
let target_size = valid_block.const_int(
context,
location,
elem_layout.pad_to_align().size() * extract_len,
64,
)?;
let target_ptr = valid_block
.append_op_result(llvm::zero(llvm::r#type::pointer(context, 0), location))?;
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/coupon.rs | src/libfuncs/coupon.rs | //! # Branch alignment libfunc
//!
//! Natively compiled code doesn't need branch alignment because it has no notion of segments.
//! Because of this, this libfunc is a no-op.
use super::LibfuncHelper;
use crate::{error::Result, metadata::MetadataStorage, utils::ProgramRegistryExt};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
coupon::CouponConcreteLibfunc,
function_call::SignatureAndFunctionConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm,
helpers::BuiltinBlockExt,
ir::{Block, Location},
Context,
};
/// Generate MLIR operations for the `coupon` libfuncs.
/// In native it's mostly a no-op operation.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &CouponConcreteLibfunc,
) -> Result<()> {
match selector {
CouponConcreteLibfunc::Buy(info) => {
// Libfunc for buying a coupon for a function. The cost of the coupon is the cost of running the
// function (not including the `call` and `ret` instructions).
// The coupon can be used to pay in advance for running the function, and run it later for
// free (paying only for the `call` and `ret` instructions) using `coupon_call`.
build_buy(context, registry, entry, location, helper, metadata, info)
}
CouponConcreteLibfunc::Refund(info) => {
// Libfunc for getting a refund for an unused coupon. The refund is the cost of the function
// and it is added back to the gas wallet.
build_refund(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `coupon` libfunc.
pub fn build_buy<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndFunctionConcreteLibfunc,
) -> Result<()> {
// In the future if the gas cost is required, this is how to get it.
// let gas = metadata.get::<GasMetadata>().ok_or(Error::MissingMetadata)?;
// let gas_cost = gas.initial_required_gas(&info.function.id);
let ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let coupon = entry.append_op_result(llvm::undef(ty, location))?;
helper.br(entry, 0, &[coupon], location)
}
/// Generate MLIR operations for the `coupon` libfunc.
pub fn build_refund<'ctx, 'this>(
_context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureAndFunctionConcreteLibfunc,
) -> Result<()> {
// In the future if the gas cost is required, this is how to get it.
// let gas = metadata.get::<GasMetadata>().ok_or(Error::MissingMetadata)?;
// let gas_cost = gas.initial_required_gas(&info.function.id);
helper.br(entry, 0, &[], location)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/box.rs | src/libfuncs/box.rs | //! # Box libfuncs
//!
//! A heap allocated value, which is internally a pointer that can't be null.
use std::alloc::Layout;
use super::LibfuncHelper;
use crate::{
error::Result,
metadata::{realloc_bindings::ReallocBindingsMeta, MetadataStorage},
native_panic,
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
boxing::BoxConcreteLibfunc,
core::{CoreLibfunc, CoreType},
lib_func::SignatureAndTypeConcreteLibfunc,
},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
llvm::{self, r#type::pointer, LoadStoreOptions},
ods,
},
helpers::{ArithBlockExt, BuiltinBlockExt},
ir::{attribute::IntegerAttribute, r#type::IntegerType, Block, BlockLike, Location, Value},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &BoxConcreteLibfunc,
) -> Result<()> {
match selector {
BoxConcreteLibfunc::Into(info) => {
build_into_box(context, registry, entry, location, helper, metadata, info)
}
BoxConcreteLibfunc::Unbox(info) => {
build_unbox(context, registry, entry, location, helper, metadata, info)
}
BoxConcreteLibfunc::ForwardSnapshot(info) => super::build_noop::<1, false>(
context,
registry,
entry,
location,
helper,
metadata,
&info.signature.param_signatures,
),
BoxConcreteLibfunc::LocalInto(_) => native_panic!("implement box_local_into"),
}
}
/// Generate MLIR operations for the `into_box` libfunc.
pub fn build_into_box<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
if metadata.get::<ReallocBindingsMeta>().is_none() {
metadata.insert(ReallocBindingsMeta::new(context, helper));
}
let inner_type = registry.get_type(&info.ty)?;
let inner_layout = inner_type.layout(registry)?;
let ptr = into_box(context, entry, location, entry.arg(0)?, inner_layout)?;
helper.br(entry, 0, &[ptr], location)
}
/// Receives a value and inserts it into a box
pub fn into_box<'ctx, 'this>(
context: &'ctx Context,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
inner_val: Value<'ctx, 'this>,
inner_layout: Layout,
) -> Result<Value<'ctx, 'this>> {
let value_len = entry.const_int(context, location, inner_layout.pad_to_align().size(), 64)?;
let ptr = entry
.append_operation(ods::llvm::mlir_zero(context, pointer(context, 0), location).into())
.result(0)?
.into();
let ptr = entry
.append_operation(ReallocBindingsMeta::realloc(
context, ptr, value_len, location,
)?)
.result(0)?
.into();
entry.append_operation(llvm::store(
context,
inner_val,
ptr,
location,
LoadStoreOptions::new().align(Some(IntegerAttribute::new(
IntegerType::new(context, 64).into(),
inner_layout.align() as i64,
))),
));
Ok(ptr)
}
/// Generate MLIR operations for the `unbox` libfunc.
pub fn build_unbox<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
metadata.get_or_insert_with(|| ReallocBindingsMeta::new(context, helper));
let value = unbox(
context, registry, entry, location, helper, metadata, &info.ty,
)?;
helper.br(entry, 0, &[value], location)
}
// Gets the value that is inside a `Box`
pub fn unbox<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
inner_ty_id: &ConcreteTypeId,
) -> Result<Value<'ctx, 'this>> {
let (inner_type, inner_layout) =
registry.build_type_with_layout(context, helper, metadata, inner_ty_id)?;
// Load the boxed value from memory.
let value = entry
.append_operation(llvm::load(
context,
entry.arg(0)?,
inner_type,
location,
LoadStoreOptions::new().align(Some(IntegerAttribute::new(
IntegerType::new(context, 64).into(),
inner_layout.align() as i64,
))),
))
.result(0)?
.into();
entry.append_operation(ReallocBindingsMeta::free(context, entry.arg(0)?, location)?);
Ok(value)
}
#[cfg(test)]
mod test {
use crate::{load_cairo, utils::testing::run_program_assert_output, values::Value};
#[test]
fn run_box_unbox() {
let program = load_cairo! {
use box::BoxTrait;
use box::BoxImpl;
fn run_test() -> u32 {
let x: u32 = 2_u32;
let box_x: Box<u32> = BoxTrait::new(x);
box_x.unbox()
}
};
run_program_assert_output(&program, "run_test", &[], Value::Uint32(2));
}
#[test]
fn run_box() {
let program = load_cairo! {
use box::BoxTrait;
use box::BoxImpl;
fn run_test() -> Box<u32> {
let x: u32 = 2_u32;
let box_x: Box<u32> = BoxTrait::new(x);
box_x
}
};
run_program_assert_output(&program, "run_test", &[], Value::Uint32(2));
}
#[test]
fn box_unbox_stack_allocated_enum_single() {
let program = load_cairo! {
use core::box::BoxTrait;
enum MyEnum {
A: felt252,
}
fn run_test() -> MyEnum {
let x = BoxTrait::new(MyEnum::A(1234));
x.unbox()
}
};
run_program_assert_output(
&program,
"run_test",
&[],
Value::Enum {
tag: 0,
value: Box::new(Value::Felt252(1234.into())),
debug_name: None,
},
);
}
#[test]
fn box_unbox_stack_allocated_enum_c() {
let program = load_cairo! {
use core::box::BoxTrait;
enum MyEnum {
A: (),
B: (),
}
fn run_test() -> MyEnum {
let x = BoxTrait::new(MyEnum::A);
x.unbox()
}
};
run_program_assert_output(
&program,
"run_test",
&[],
Value::Enum {
tag: 0,
value: Box::new(Value::Struct {
fields: Vec::new(),
debug_name: None,
}),
debug_name: None,
},
);
}
#[test]
fn box_unbox_stack_allocated_enum_c2() {
let program = load_cairo! {
use core::box::BoxTrait;
enum MyEnum {
A: (),
B: (),
}
fn run_test() -> MyEnum {
let x = BoxTrait::new(MyEnum::B);
x.unbox()
}
};
run_program_assert_output(
&program,
"run_test",
&[],
Value::Enum {
tag: 1,
value: Box::new(Value::Struct {
fields: Vec::new(),
debug_name: None,
}),
debug_name: None,
},
);
}
#[test]
fn box_unbox_stack_allocated_enum() {
let program = load_cairo! {
use core::box::BoxTrait;
enum MyEnum {
A: felt252,
B: u128,
}
fn run_test() -> MyEnum {
let x = BoxTrait::new(MyEnum::A(1234));
x.unbox()
}
};
run_program_assert_output(
&program,
"run_test",
&[],
Value::Enum {
tag: 0,
value: Box::new(Value::Felt252(1234.into())),
debug_name: None,
},
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/dup.rs | src/libfuncs/dup.rs | //! # State value duplication libfunc
//!
//! Most types are trivial and don't need any clone (or rather, they will be cloned automatically by
//! MLIR). For those types, this libfunc is a no-op.
//!
//! However, types like an array need special handling.
use super::LibfuncHelper;
use crate::{
error::Result,
metadata::{dup_overrides::DupOverridesMeta, MetadataStorage},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
helpers::BuiltinBlockExt,
ir::{Block, Location},
Context,
};
/// Generate MLIR operations for the `dup` libfunc.
///
/// The Cairo compiler will avoid using `dup` for some non-trivially-copyable
/// types, but not all of them. For example, it'll not generate a clone
/// implementation for `Box<T>`. That's why we need to provide a clone in MLIR.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let values = DupOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
entry,
location,
metadata,
&info.signature.param_signatures[0].ty,
entry.arg(0)?,
)?;
helper.br(entry, 0, &[values.0, values.1], location)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/int_range.rs | src/libfuncs/int_range.rs | //! # Int range libfuncs
use super::LibfuncHelper;
use crate::{
error::Result, metadata::MetadataStorage, types::TypeBuilder, utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
range::IntRangeConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
ods,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{Block, Location},
Context,
};
use num_bigint::BigInt;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &IntRangeConcreteLibfunc,
) -> Result<()> {
match selector {
IntRangeConcreteLibfunc::TryNew(info) => {
build_int_range_try_new(context, registry, entry, location, helper, metadata, info)
}
IntRangeConcreteLibfunc::PopFront(info) => {
build_int_range_pop_front(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `int_range_try_new` libfunc.
pub fn build_int_range_try_new<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 1 time.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/range.rs?plain=1#L24
let range_check = super::increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let x = entry.arg(1)?;
let y = entry.arg(2)?;
let range_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[1].ty,
)?;
let inner = registry.get_type(&info.param_signatures()[1].ty)?;
// to know if it is signed
let inner_range = inner.integer_range(registry)?;
let is_valid = if inner_range.lower < BigInt::ZERO {
entry.cmpi(context, CmpiPredicate::Sle, x, y, location)?
} else {
entry.cmpi(context, CmpiPredicate::Ule, x, y, location)?
};
let range =
entry.append_op_result(ods::llvm::mlir_undef(context, range_ty, location).into())?;
// if the range is not valid, return the empty range [y, y)
let x_val = entry.append_op_result(arith::select(is_valid, x, y, location))?;
let range = entry.insert_values(context, location, range, &[x_val, y])?;
helper.cond_br(
context,
entry,
is_valid,
[0, 1],
[&[range_check, range], &[range_check, range]],
location,
)
}
/// Generate MLIR operations for the `int_range_pop_front` libfunc.
pub fn build_int_range_pop_front<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let range = entry.arg(0)?;
let inner_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[1].vars[1].ty,
)?;
let inner = registry.get_type(&info.branch_signatures()[1].vars[1].ty)?;
let x = entry.extract_value(context, location, range, inner_ty, 0)?;
let k1 = entry.const_int_from_type(context, location, 1, inner_ty)?;
let x_p_1 = entry.addi(x, k1, location)?;
let y = entry.extract_value(context, location, range, inner_ty, 1)?;
// to know if it is signed
let inner_range = inner.integer_range(registry)?;
let is_valid = if inner_range.lower < BigInt::ZERO {
entry.cmpi(context, CmpiPredicate::Slt, x, y, location)?
} else {
entry.cmpi(context, CmpiPredicate::Ult, x, y, location)?
};
let range = entry.insert_value(context, location, range, x_p_1, 0)?;
helper.cond_br(
context,
entry,
is_valid,
[1, 0], // failure, success
[&[range, x], &[]],
location,
)
}
#[cfg(test)]
mod test {
use crate::{
jit_enum, jit_struct, load_cairo, utils::testing::run_program_assert_output, values::Value,
};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
lazy_static! {
static ref INT_RANGE_TRY_NEW: (String, Program) = load_cairo! {
pub extern type IntRange<T>;
impl IntRangeDrop<T> of Drop<IntRange<T>>;
pub extern fn int_range_try_new<T>(
x: T, y: T
) -> Result<IntRange<T>, IntRange<T>> implicits(core::RangeCheck) nopanic;
fn run_test(lhs: u64, rhs: u64) -> IntRange<u64> {
int_range_try_new(lhs, rhs).unwrap()
}
};
}
#[test]
fn int_range_try_new() {
run_program_assert_output(
&INT_RANGE_TRY_NEW,
"run_test",
&[2u64.into(), 4u64.into()],
jit_enum!(
0,
jit_struct!(Value::IntRange {
x: Box::new(2u64.into()),
y: Box::new(4u64.into()),
})
),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/bounded_int.rs | src/libfuncs/bounded_int.rs | //! # Bounded int libfuncs
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
execution_result::RANGE_CHECK_BUILTIN_SIZE,
metadata::MetadataStorage,
native_assert,
types::TypeBuilder,
utils::RangeExt,
};
use cairo_lang_sierra::{
extensions::{
bounded_int::{
BoundedIntConcreteLibfunc, BoundedIntConstrainConcreteLibfunc,
BoundedIntDivRemAlgorithm, BoundedIntDivRemConcreteLibfunc,
BoundedIntTrimConcreteLibfunc,
},
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
utils::Range,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf,
},
helpers::{ArithBlockExt, BuiltinBlockExt},
ir::{r#type::IntegerType, Block, BlockLike, Location, Value, ValueLike},
Context,
};
use num_bigint::{BigInt, Sign};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &BoundedIntConcreteLibfunc,
) -> Result<()> {
match selector {
BoundedIntConcreteLibfunc::Add(info) => {
build_add(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::Sub(info) => {
build_sub(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::Mul(info) => {
build_mul(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::DivRem(info) => {
build_div_rem(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::Constrain(info) => {
build_constrain(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::TrimMin(info) | BoundedIntConcreteLibfunc::TrimMax(info) => {
build_trim(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::IsZero(info) => {
build_is_zero(context, registry, entry, location, helper, metadata, info)
}
BoundedIntConcreteLibfunc::WrapNonZero(info) => {
build_wrap_non_zero(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `bounded_int_add` libfunc.
///
/// # Cairo Signature
///
/// ```cairo
/// extern fn bounded_int_add<Lhs, Rhs, impl H: AddHelper<Lhs, Rhs>>(
/// lhs: Lhs, rhs: Rhs,
/// ) -> H::Result nopanic;
/// ```
///
/// A number X as a `BoundedInt` is internally represented as an offset Xd from the lower bound Xo.
/// So X = Xo + Xd.
///
/// Since we want to get C = A + B, we can translate this to
/// Co + Cd = Ao + Ad + Bo + Bd. Where Ao, Bo and Co represent the lower bound
/// of the ranges in the `BoundedInt` and Ad, Bd and Cd represent the offsets. Since
/// we also know that Co = Ao + Bo we can simplify the equation to Cd = Ad + Bd.
#[allow(clippy::too_many_arguments)]
fn build_add<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let lhs_value = entry.arg(0)?;
let rhs_value = entry.arg(1)?;
// Extract the ranges for the operands.
let lhs_ty = registry.get_type(&info.signature.param_signatures[0].ty)?;
let rhs_ty = registry.get_type(&info.signature.param_signatures[1].ty)?;
let lhs_range = lhs_ty.integer_range(registry)?;
let rhs_range = rhs_ty.integer_range(registry)?;
let dst_range = registry
.get_type(&info.signature.branch_signatures[0].vars[0].ty)?
.integer_range(registry)?;
// Extract the bit width.
let lhs_width = if lhs_ty.is_bounded_int(registry)? {
lhs_range.offset_bit_width()
} else {
lhs_range.zero_based_bit_width()
};
let rhs_width = if rhs_ty.is_bounded_int(registry)? {
rhs_range.offset_bit_width()
} else {
rhs_range.zero_based_bit_width()
};
let dst_width = dst_range.offset_bit_width();
// Get the compute type so we can do the addition without problems
let compute_width = lhs_width.max(rhs_width) + 1;
let compute_ty = IntegerType::new(context, compute_width).into();
// Get the operands on the same number of bits so we can operate with them
let lhs_value = if compute_width > lhs_width {
if lhs_range.lower.sign() != Sign::Minus || lhs_ty.is_bounded_int(registry)? {
entry.extui(lhs_value, compute_ty, location)?
} else {
entry.extsi(lhs_value, compute_ty, location)?
}
} else {
lhs_value
};
let rhs_value = if compute_width > rhs_width {
if rhs_range.lower.sign() != Sign::Minus || rhs_ty.is_bounded_int(registry)? {
entry.extui(rhs_value, compute_ty, location)?
} else {
entry.extsi(rhs_value, compute_ty, location)?
}
} else {
rhs_value
};
// Addition and get the result value on the desired range
let res_value = entry.addi(lhs_value, rhs_value, location)?;
let res_value = if compute_width > dst_width {
entry.trunci(
res_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else if compute_width < dst_width {
entry.extui(
res_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else {
res_value
};
helper.br(entry, 0, &[res_value], location)
}
/// Generate MLIR operations for the `bounded_int_sub` libfunc.
///
/// # Cairo Signature
/// ```cairo
/// extern fn bounded_int_sub<Lhs, Rhs, impl H: SubHelper<Lhs, Rhs>>(
/// lhs: Lhs, rhs: Rhs,
/// ) -> H::Result nopanic;
/// ```
///
/// A number X as a `BoundedInt` is internally represented as an offset Xd from the lower bound Xo.
/// So X = Xo + Xd.
///
/// Since we want to get C = A - B, we can translate this to
/// Co + Cd = (Ao + Ad) - (Bo + Bd). Where Ao, Bo and Co represent the lower bound
/// of the ranges in the `BoundedInt` and Ad, Bd and Cd represent the offsets.
#[allow(clippy::too_many_arguments)]
fn build_sub<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let lhs_value = entry.arg(0)?;
let rhs_value = entry.arg(1)?;
// Extract the ranges for the operands.
let lhs_ty = registry.get_type(&info.signature.param_signatures[0].ty)?;
let rhs_ty = registry.get_type(&info.signature.param_signatures[1].ty)?;
let lhs_range = lhs_ty.integer_range(registry)?;
let rhs_range = rhs_ty.integer_range(registry)?;
let dst_range = registry
.get_type(&info.signature.branch_signatures[0].vars[0].ty)?
.integer_range(registry)?;
// Extract the bit width.
let lhs_width = if lhs_ty.is_bounded_int(registry)? {
lhs_range.offset_bit_width()
} else {
lhs_range.zero_based_bit_width()
};
let rhs_width = if rhs_ty.is_bounded_int(registry)? {
rhs_range.offset_bit_width()
} else {
rhs_range.zero_based_bit_width()
};
let dst_width = dst_range.offset_bit_width();
// Get the compute type so we can do the subtraction without problems
let compile_time_val = lhs_range.lower.clone() - rhs_range.lower.clone() - dst_range.lower;
let compile_time_val_width = u32::try_from(compile_time_val.bits())?;
let compute_width = lhs_width.max(rhs_width).max(compile_time_val_width) + 1;
let compute_ty = IntegerType::new(context, compute_width).into();
// Get the operands on the same number of bits so we can operate with them
let lhs_value = if compute_width > lhs_width {
if lhs_range.lower.sign() != Sign::Minus || lhs_ty.is_bounded_int(registry)? {
entry.extui(lhs_value, compute_ty, location)?
} else {
entry.extsi(lhs_value, compute_ty, location)?
}
} else {
lhs_value
};
let rhs_value = if compute_width > rhs_width {
if rhs_range.lower.sign() != Sign::Minus || rhs_ty.is_bounded_int(registry)? {
entry.extui(rhs_value, compute_ty, location)?
} else {
entry.extsi(rhs_value, compute_ty, location)?
}
} else {
rhs_value
};
let compile_time_val =
entry.const_int_from_type(context, location, compile_time_val, compute_ty)?;
// First we do -> intermediate_res = Ad - Bd
let res_value = entry.subi(lhs_value, rhs_value, location)?;
// Then we do -> intermediate_res += (Ao - Bo - Co)
let res_value = entry.addi(res_value, compile_time_val, location)?;
// Get the result value on the desired range
let res_value = if compute_width > dst_width {
entry.trunci(
res_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else if compute_width < dst_width {
entry.extui(
res_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else {
res_value
};
helper.br(entry, 0, &[res_value], location)
}
/// Generate MLIR operations for the `bounded_int_mul` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_mul<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let lhs_value = entry.arg(0)?;
let rhs_value = entry.arg(1)?;
// Extract the ranges for the operands and the result type.
let lhs_ty = registry.get_type(&info.signature.param_signatures[0].ty)?;
let rhs_ty = registry.get_type(&info.signature.param_signatures[1].ty)?;
let lhs_range = lhs_ty.integer_range(registry)?;
let rhs_range = rhs_ty.integer_range(registry)?;
let dst_range = registry
.get_type(&info.signature.branch_signatures[0].vars[0].ty)?
.integer_range(registry)?;
let lhs_width = if lhs_ty.is_bounded_int(registry)? {
lhs_range.offset_bit_width()
} else {
lhs_range.zero_based_bit_width()
};
let rhs_width = if rhs_ty.is_bounded_int(registry)? {
rhs_range.offset_bit_width()
} else {
rhs_range.zero_based_bit_width()
};
// Calculate the computation range.
let compute_range = Range {
lower: (&lhs_range.lower)
.min(&rhs_range.lower)
.min(&dst_range.lower)
.min(&BigInt::ZERO)
.clone(),
upper: (&lhs_range.upper)
.max(&rhs_range.upper)
.max(&dst_range.upper)
.clone(),
};
let compute_ty = IntegerType::new(context, compute_range.zero_based_bit_width()).into();
// Zero-extend operands into the computation range.
native_assert!(
compute_range.offset_bit_width() >= lhs_width,
"the lhs_range bit_width must be less or equal than the compute_range"
);
native_assert!(
compute_range.offset_bit_width() >= rhs_width,
"the rhs_range bit_width must be less or equal than the compute_range"
);
let lhs_value = if compute_range.zero_based_bit_width() > lhs_width {
if lhs_range.lower.sign() != Sign::Minus || lhs_ty.is_bounded_int(registry)? {
entry.extui(lhs_value, compute_ty, location)?
} else {
entry.extsi(lhs_value, compute_ty, location)?
}
} else {
lhs_value
};
let rhs_value = if compute_range.zero_based_bit_width() > rhs_width {
if rhs_range.lower.sign() != Sign::Minus || rhs_ty.is_bounded_int(registry)? {
entry.extui(rhs_value, compute_ty, location)?
} else {
entry.extsi(rhs_value, compute_ty, location)?
}
} else {
rhs_value
};
// Offset the operands so that they are compatible with the operation.
let lhs_value = if lhs_ty.is_bounded_int(registry)? && lhs_range.lower != BigInt::ZERO {
let lhs_offset =
entry.const_int_from_type(context, location, lhs_range.lower, compute_ty)?;
entry.addi(lhs_value, lhs_offset, location)?
} else {
lhs_value
};
let rhs_value = if rhs_ty.is_bounded_int(registry)? && rhs_range.lower != BigInt::ZERO {
let rhs_offset =
entry.const_int_from_type(context, location, rhs_range.lower, compute_ty)?;
entry.addi(rhs_value, rhs_offset, location)?
} else {
rhs_value
};
// Compute the operation.
let res_value = entry.muli(lhs_value, rhs_value, location)?;
// Offset and truncate the result to the output type.
let res_offset = dst_range.lower.clone();
let res_value = if res_offset != BigInt::ZERO {
let res_offset = entry.const_int_from_type(context, location, res_offset, compute_ty)?;
entry.append_op_result(arith::subi(res_value, res_offset, location))?
} else {
res_value
};
let res_value = if dst_range.offset_bit_width() < compute_range.zero_based_bit_width() {
entry.trunci(
res_value,
IntegerType::new(context, dst_range.offset_bit_width()).into(),
location,
)?
} else {
res_value
};
helper.br(entry, 0, &[res_value], location)
}
/// Builds the `bounded_int_div_rem` libfunc, which divides a non negative
/// integer by a positive integer (non zero), returning the quotient and
/// the remainder as bounded ints.
///
/// # Signature
///
/// ```cairo
/// extern fn bounded_int_div_rem<Lhs, Rhs, impl H: DivRemHelper<Lhs, Rhs>>(
/// lhs: Lhs, rhs: NonZero<Rhs>,
/// ) -> (H::DivT, H::RemT) implicits(RangeCheck) nopanic;
/// ```
///
/// The input arguments can be both regular integers or bounded ints.
fn build_div_rem<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &BoundedIntDivRemConcreteLibfunc,
) -> Result<()> {
let lhs_value = entry.arg(1)?;
let rhs_value = entry.arg(2)?;
// Extract the ranges for the operands and the result type.
let lhs_ty = registry.get_type(&info.param_signatures()[1].ty)?;
let rhs_ty = registry.get_type(&info.param_signatures()[2].ty)?;
let lhs_range = lhs_ty.integer_range(registry)?;
let rhs_range = rhs_ty.integer_range(registry)?;
let div_range = registry
.get_type(&info.branch_signatures()[0].vars[1].ty)?
.integer_range(registry)?;
let rem_range = registry
.get_type(&info.branch_signatures()[0].vars[2].ty)?
.integer_range(registry)?;
let lhs_width = if lhs_ty.is_bounded_int(registry)? {
lhs_range.offset_bit_width()
} else {
lhs_range.zero_based_bit_width()
};
let rhs_width = if rhs_ty.is_bounded_int(registry)? {
rhs_range.offset_bit_width()
} else {
rhs_range.zero_based_bit_width()
};
let div_rem_algorithm = BoundedIntDivRemAlgorithm::try_new(&lhs_range, &rhs_range)
.to_native_assert_error(&format!(
"div_rem of ranges: lhs = {:#?} and rhs= {:#?} is not supported yet",
&lhs_range, &rhs_range
))?;
// Calculate the computation range.
let compute_range = Range {
lower: BigInt::ZERO,
upper: (&lhs_range.upper).max(&rhs_range.upper).clone(),
};
let compute_ty = IntegerType::new(context, compute_range.zero_based_bit_width()).into();
// Zero-extend operands into the computation range.
native_assert!(
compute_range.offset_bit_width() >= lhs_width,
"the lhs_range bit_width must be less or equal than the compute_range"
);
native_assert!(
compute_range.offset_bit_width() >= rhs_width,
"the rhs_range bit_width must be less or equal than the compute_range"
);
let lhs_value = if compute_range.zero_based_bit_width() > lhs_width {
if lhs_range.lower.sign() != Sign::Minus || lhs_ty.is_bounded_int(registry)? {
entry.extui(lhs_value, compute_ty, location)?
} else {
entry.extsi(lhs_value, compute_ty, location)?
}
} else {
lhs_value
};
let rhs_value = if compute_range.zero_based_bit_width() > rhs_width {
if rhs_range.lower.sign() != Sign::Minus || rhs_ty.is_bounded_int(registry)? {
entry.extui(rhs_value, compute_ty, location)?
} else {
entry.extsi(rhs_value, compute_ty, location)?
}
} else {
rhs_value
};
// Offset the operands so that they are compatible with the operation.
let lhs_value = if lhs_ty.is_bounded_int(registry)? && lhs_range.lower != BigInt::ZERO {
let lhs_offset =
entry.const_int_from_type(context, location, lhs_range.lower, compute_ty)?;
entry.addi(lhs_value, lhs_offset, location)?
} else {
lhs_value
};
let rhs_value = if rhs_ty.is_bounded_int(registry)? && rhs_range.lower != BigInt::ZERO {
let rhs_offset =
entry.const_int_from_type(context, location, rhs_range.lower, compute_ty)?;
entry.addi(rhs_value, rhs_offset, location)?
} else {
rhs_value
};
// Compute the operation.
let div_value = entry.append_op_result(arith::divui(lhs_value, rhs_value, location))?;
let rem_value = entry.append_op_result(arith::remui(lhs_value, rhs_value, location))?;
// Offset result to the output type.
let div_value = if div_range.lower.clone() != BigInt::ZERO {
let div_offset =
entry.const_int_from_type(context, location, div_range.lower.clone(), compute_ty)?;
entry.append_op_result(arith::subi(div_value, div_offset, location))?
} else {
div_value
};
native_assert!(
rem_range.lower == BigInt::ZERO,
"The remainder range lower bound should be zero"
);
// Truncate to the output type
let div_value = if div_range.offset_bit_width() < compute_range.zero_based_bit_width() {
entry.trunci(
div_value,
IntegerType::new(context, div_range.offset_bit_width()).into(),
location,
)?
} else {
div_value
};
let rem_value = if rem_range.offset_bit_width() < compute_range.zero_based_bit_width() {
entry.trunci(
rem_value,
IntegerType::new(context, rem_range.offset_bit_width()).into(),
location,
)?
} else {
rem_value
};
// Increase range check builtin by 3, regardless of `div_rem_algorithm`:
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/bounded.rs#L100
let range_check = match div_rem_algorithm {
BoundedIntDivRemAlgorithm::KnownSmallRhs => crate::libfuncs::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
3 * RANGE_CHECK_BUILTIN_SIZE,
)?,
BoundedIntDivRemAlgorithm::KnownSmallQuotient { .. }
| BoundedIntDivRemAlgorithm::KnownSmallLhs { .. } => {
// If `div_rem_algorithm` is `KnownSmallQuotient` or `KnownSmallLhs`, increase range check builtin by 1.
//
// Case KnownSmallQuotient: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/bounded.rs#L129
// Case KnownSmallLhs: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/bounded.rs#L157
crate::libfuncs::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
4 * RANGE_CHECK_BUILTIN_SIZE,
)?
}
};
helper.br(entry, 0, &[range_check, div_value, rem_value], location)
}
/// Generate MLIR operations for the `bounded_int_constrain` libfunc.
fn build_constrain<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &BoundedIntConstrainConcreteLibfunc,
) -> Result<()> {
let range_check = super::increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let src_value: Value = entry.arg(1)?;
let src_ty = registry.get_type(&info.param_signatures()[1].ty)?;
let src_range = src_ty.integer_range(registry)?;
let src_width = if src_ty.is_bounded_int(registry)? {
src_range.offset_bit_width()
} else {
src_range.zero_based_bit_width()
};
let lower_range = registry
.get_type(&info.branch_signatures()[0].vars[1].ty)?
.integer_range(registry)?;
let upper_range = registry
.get_type(&info.branch_signatures()[1].vars[1].ty)?
.integer_range(registry)?;
let boundary = if src_ty.is_bounded_int(registry)? {
entry.const_int_from_type(
context,
location,
info.boundary.clone() - src_range.lower.clone(),
src_value.r#type(),
)?
} else {
entry.const_int_from_type(context, location, info.boundary.clone(), src_value.r#type())?
};
let cmpi_predicate =
if src_ty.is_bounded_int(registry)? || src_range.lower.sign() != Sign::Minus {
CmpiPredicate::Ult
} else {
CmpiPredicate::Slt
};
let is_lower = entry.cmpi(context, cmpi_predicate, src_value, boundary, location)?;
let lower_block = helper.append_block(Block::new(&[]));
let upper_block = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
is_lower,
lower_block,
upper_block,
&[],
&[],
location,
));
{
let res_value = if src_range.lower != lower_range.lower {
let lower_offset = &lower_range.lower - &src_range.lower;
let lower_offset = lower_block.const_int_from_type(
context,
location,
lower_offset,
src_value.r#type(),
)?;
lower_block.append_op_result(arith::subi(src_value, lower_offset, location))?
} else {
src_value
};
let res_value = if src_width > lower_range.offset_bit_width() {
lower_block.trunci(
res_value,
IntegerType::new(context, lower_range.offset_bit_width()).into(),
location,
)?
} else {
res_value
};
helper.br(lower_block, 0, &[range_check, res_value], location)?;
}
{
let res_value = if src_range.lower != upper_range.lower {
let upper_offset = &upper_range.lower - &src_range.lower;
let upper_offset = upper_block.const_int_from_type(
context,
location,
upper_offset,
src_value.r#type(),
)?;
upper_block.append_op_result(arith::subi(src_value, upper_offset, location))?
} else {
src_value
};
let res_value = if src_width > upper_range.offset_bit_width() {
upper_block.trunci(
res_value,
IntegerType::new(context, upper_range.offset_bit_width()).into(),
location,
)?
} else {
res_value
};
helper.br(upper_block, 1, &[range_check, res_value], location)?;
}
Ok(())
}
/// Makes a downcast of a type `T` to `BoundedInt<T::MIN, T::MAX - 1>`
/// or `BoundedInt<T::MIN + 1, T::MAX>` where `T` can be any type of signed
/// or unsigned integer.
///
/// ```cairo
/// extern fn bounded_int_trim<T, const TRIMMED_VALUE: felt252, impl H: TrimHelper<T, TRIMMED_VALUE>>(
/// value: T,
/// ) -> core::internal::OptionRev<H::Target> nopanic;
/// ```
fn build_trim<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &BoundedIntTrimConcreteLibfunc,
) -> Result<()> {
let value: Value = entry.arg(0)?;
let src_ty = registry.get_type(&info.param_signatures()[0].ty)?;
let dst_ty = registry.get_type(&info.branch_signatures()[1].vars[0].ty)?;
let trimmed_value = if src_ty.is_bounded_int(registry)? {
entry.const_int_from_type(
context,
location,
info.trimmed_value.clone() - src_ty.integer_range(registry)?.lower,
value.r#type(),
)?
} else {
entry.const_int_from_type(
context,
location,
info.trimmed_value.clone(),
value.r#type(),
)?
};
let is_invalid = entry.cmpi(context, CmpiPredicate::Eq, value, trimmed_value, location)?;
let offset = if src_ty.is_bounded_int(registry)? {
dst_ty.integer_range(registry)?.lower - src_ty.integer_range(registry)?.lower
} else {
dst_ty.integer_range(registry)?.lower
};
let value = entry.append_op_result(arith::subi(
value,
entry.const_int_from_type(context, location, offset, value.r#type())?,
location,
))?;
let value = entry.trunci(
value,
dst_ty.build(
context,
helper,
registry,
metadata,
&info.branch_signatures()[1].vars[0].ty,
)?,
location,
)?;
helper.cond_br(
context,
entry,
is_invalid,
[0, 1],
[&[], &[value]],
location,
)
}
/// Generate MLIR operations for the `bounded_int_is_zero` libfunc.
fn build_is_zero<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let src_value: Value = entry.arg(0)?;
let src_ty = registry.get_type(&info.signature.param_signatures[0].ty)?;
let src_range = src_ty.integer_range(registry)?;
native_assert!(
src_range.lower <= BigInt::ZERO && BigInt::ZERO < src_range.upper,
"value can never be zero"
);
let k0 = if src_ty.is_bounded_int(registry)? {
// We can do the substraction since the lower bound of the bounded int will
// always be less or equal than 0.
entry.const_int_from_type(context, location, 0 - src_range.lower, src_value.r#type())?
} else {
entry.const_int_from_type(context, location, 0, src_value.r#type())?
};
let src_is_zero = entry.cmpi(context, CmpiPredicate::Eq, src_value, k0, location)?;
helper.cond_br(
context,
entry,
src_is_zero,
[0, 1],
[&[], &[src_value]],
location,
)
}
/// Generate MLIR operations for the `bounded_int_wrap_non_zero` libfunc.
fn build_wrap_non_zero<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let src_range = registry
.get_type(&info.signature.param_signatures[0].ty)?
.integer_range(registry)?;
native_assert!(
src_range.lower > BigInt::ZERO || BigInt::ZERO >= src_range.upper,
"value must not be zero"
);
super::build_noop::<1, false>(
context,
registry,
entry,
location,
helper,
metadata,
&info.signature.param_signatures,
)
}
#[cfg(test)]
mod test {
use cairo_lang_sierra::program::Program;
use cairo_vm::Felt252;
use lazy_static::lazy_static;
use test_case::test_case;
use crate::{
jit_enum, jit_panic_byte_array, jit_struct, load_cairo,
utils::testing::{run_program, run_program_assert_output},
Value,
};
lazy_static! {
static ref TEST_MUL_PROGRAM: (String, Program) = load_cairo! {
#[feature("bounded-int-utils")]
use core::internal::bounded_int::{self, BoundedInt, MulHelper, mul, UnitInt};
impl MulHelperBI_m128x127_BI_m128x127 of MulHelper<BoundedInt<-128, 127>, BoundedInt<-128, 127>> {
type Result = BoundedInt<-16256, 16384>;
}
impl MulHelperBI_0x128_BI_0x128 of MulHelper<BoundedInt<0, 128>, BoundedInt<0, 128>> {
type Result = BoundedInt<0, 16384>;
}
impl MulHelperBI_1x31_BI_1x1 of MulHelper<BoundedInt<1, 31>, BoundedInt<1, 1>> {
type Result = BoundedInt<1, 31>;
}
impl MulHelperBI_m1x31_BI_m1xm1 of MulHelper<BoundedInt<-1, 31>, BoundedInt<-1, -1>> {
type Result = BoundedInt<-31, 1>;
}
impl MulHelperBI_31x31_BI_1x1 of MulHelper<BoundedInt<31, 31>, BoundedInt<1, 1>> {
type Result = BoundedInt<31, 31>;
}
impl MulHelperBI_m10x0_BI_0x100 of MulHelper<BoundedInt<-100, 0>, BoundedInt<0, 100>> {
type Result = BoundedInt<-10000, 0>;
}
impl MulHelperBI_1x1_BI_1x1 of MulHelper<BoundedInt<1, 1>, BoundedInt<1, 1>> {
type Result = BoundedInt<1, 1>;
}
impl MulHelperBI_m5x5_UI_2 of MulHelper<BoundedInt<-5, 5>, UnitInt<2>> {
type Result = BoundedInt<-10, 10>;
}
fn bi_m128x127_times_bi_m128x127(a: felt252, b: felt252) -> BoundedInt<-16256, 16384> {
let a: BoundedInt<-128, 127> = a.try_into().unwrap();
let b: BoundedInt<-128, 127> = b.try_into().unwrap();
mul(a,b)
}
fn bi_0x128_times_bi_0x128(a: felt252, b: felt252) -> BoundedInt<0, 16384> {
let a: BoundedInt<0, 128> = a.try_into().unwrap();
let b: BoundedInt<0, 128> = b.try_into().unwrap();
mul(a,b)
}
fn bi_1x31_times_bi_1x1(a: felt252, b: felt252) -> BoundedInt<1, 31> {
let a: BoundedInt<1, 31> = a.try_into().unwrap();
let b: BoundedInt<1, 1> = b.try_into().unwrap();
mul(a,b)
}
fn bi_m1x31_times_bi_m1xm1(a: felt252, b: felt252) -> BoundedInt<-31, 1> {
let a: BoundedInt<-1, 31> = a.try_into().unwrap();
let b: BoundedInt<-1, -1> = b.try_into().unwrap();
mul(a,b)
}
fn bi_31x31_times_bi_1x1(a: felt252, b: felt252) -> BoundedInt<31, 31> {
let a: BoundedInt<31, 31> = a.try_into().unwrap();
let b: BoundedInt<1, 1> = b.try_into().unwrap();
mul(a,b)
}
fn bi_m100x0_times_bi_0x100(a: felt252, b: felt252) -> BoundedInt<-10000, 0> {
let a: BoundedInt<-100, 0> = a.try_into().unwrap();
let b: BoundedInt<0, 100> = b.try_into().unwrap();
mul(a,b)
}
fn bi_1x1_times_bi_1x1(a: felt252, b: felt252) -> BoundedInt<1, 1> {
let a: BoundedInt<1, 1> = a.try_into().unwrap();
let b: BoundedInt<1, 1> = b.try_into().unwrap();
mul(a,b)
}
fn bi_m5x5_times_ui_2(a: felt252, b: felt252) -> BoundedInt<-10, 10> {
let a: BoundedInt<-5, 5> = a.try_into().unwrap();
let b: UnitInt<2> = b.try_into().unwrap();
mul(a,b)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/pedersen.rs | src/libfuncs/pedersen.rs | //! # Pedersen hashing libfuncs
//!
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
execution_result::PEDERSEN_BUILTIN_SIZE,
metadata::{runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
utils::{get_integer_layout, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
pedersen::PedersenConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &PedersenConcreteLibfunc,
) -> Result<()> {
match selector {
PedersenConcreteLibfunc::PedersenHash(info) => {
build_pedersen(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_pedersen<'ctx>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'ctx Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, '_>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
// The sierra-to-casm compiler uses the pedersen builtin a total of 1 time.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/pedersen.rs?plain=1#L23
let pedersen_builtin = super::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
PEDERSEN_BUILTIN_SIZE,
)?;
let felt252_ty =
registry.build_type(context, helper, metadata, &info.param_signatures()[1].ty)?;
let i256_ty = IntegerType::new(context, 256).into();
let layout_i256 = get_integer_layout(256);
let lhs = entry.arg(1)?;
let rhs = entry.arg(2)?;
// We must extend to i256 because bswap must be an even number of bytes.
let lhs_ptr = helper
.init_block()
.alloca1(context, location, i256_ty, layout_i256.align())?;
let rhs_ptr = helper
.init_block()
.alloca1(context, location, i256_ty, layout_i256.align())?;
let dst_ptr = helper
.init_block()
.alloca1(context, location, i256_ty, layout_i256.align())?;
let lhs_i256 = entry.extui(lhs, i256_ty, location)?;
let rhs_i256 = entry.extui(rhs, i256_ty, location)?;
entry.store(context, location, lhs_ptr, lhs_i256)?;
entry.store(context, location, rhs_ptr, rhs_i256)?;
let runtime_bindings = metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
runtime_bindings
.libfunc_pedersen(context, helper, entry, dst_ptr, lhs_ptr, rhs_ptr, location)?;
let result = entry.load(context, location, dst_ptr, i256_ty)?;
let result = entry.trunci(result, felt252_ty, location)?;
helper.br(entry, 0, &[pedersen_builtin, result], location)
}
#[cfg(test)]
mod test {
use crate::{load_cairo, utils::testing::run_program_assert_output};
use starknet_types_core::felt::Felt;
#[test]
fn run_pedersen() {
let program = load_cairo!(
use core::pedersen::pedersen;
fn run_test(a: felt252, b: felt252) -> felt252 {
pedersen(a, b)
}
);
run_program_assert_output(
&program,
"run_test",
&[Felt::from(2).into(), Felt::from(4).into()],
Felt::from_dec_str(
"2178161520066714737684323463974044933282313051386084149915030950231093462467",
)
.unwrap()
.into(),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/felt252_dict_entry.rs | src/libfuncs/felt252_dict_entry.rs | //! # `Felt` dictionary entry libfuncs
use super::LibfuncHelper;
use crate::{
error::{Error, Result},
metadata::{runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
felt252_dict::Felt252DictEntryConcreteLibfunc,
lib_func::SignatureAndTypeConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{llvm, scf},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, BlockLike, Location, Region},
Context,
};
use std::cell::Cell;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Felt252DictEntryConcreteLibfunc,
) -> Result<()> {
match selector {
Felt252DictEntryConcreteLibfunc::Get(info) => {
build_get(context, registry, entry, location, helper, metadata, info)
}
Felt252DictEntryConcreteLibfunc::Finalize(info) => {
build_finalize(context, registry, entry, location, helper, metadata, info)
}
}
}
/// The felt252_dict_entry_get libfunc receives the dictionary and the key and
/// returns the associated dict entry, along with it's value.
///
/// The dict entry also contains a pointer to the dictionary.
///
/// If the key doesn't yet exist, it is created and the type's default value is returned.
///
/// # Cairo Signature
///
/// ```cairo
/// fn felt252_dict_entry_get<T>(dict: Felt252Dict<T>, key: felt252) -> (Felt252DictEntry<T>, T) nopanic;
/// ```
pub fn build_get<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let (key_ty, key_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[1].ty,
)?;
let entry_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let concrete_value_type = registry.get_type(&info.ty)?;
let value_ty = concrete_value_type.build(context, helper, registry, metadata, &info.ty)?;
let dict_ptr = entry.arg(0)?;
let entry_key = entry.arg(1)?;
let entry_key_ptr =
helper
.init_block()
.alloca1(context, location, key_ty, key_layout.align())?;
entry.store(context, location, entry_key_ptr, entry_key)?;
let (is_present, value_ptr) = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.dict_get(context, helper, entry, dict_ptr, entry_key_ptr, location)?;
let is_present = entry.trunci(is_present, IntegerType::new(context, 1).into(), location)?;
let value = entry.append_op_result(scf::r#if(
is_present,
&[value_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
// If the entry is present we can load the current value.
let value = block.load(context, location, value_ptr, value_ty)?;
block.append_operation(scf::r#yield(&[value], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let helper = LibfuncHelper {
module: helper.module,
init_block: helper.init_block,
region: ®ion,
blocks_arena: helper.blocks_arena,
last_block: Cell::new(&block),
branches: Vec::new(),
results: Vec::new(),
#[cfg(feature = "with-libfunc-profiling")]
profiler: helper.profiler.clone(),
};
// When the entry is vacant we need to create the default value.
let value = concrete_value_type.build_default(
context, registry, &block, location, &helper, metadata, &info.ty,
)?;
block.append_operation(scf::r#yield(&[value], location));
region
},
location,
))?;
let dict_entry = entry.append_op_result(llvm::undef(entry_ty, location))?;
let dict_entry = entry.insert_values(context, location, dict_entry, &[dict_ptr, value_ptr])?;
// The `Felt252DictEntry<T>` holds both the `Felt252Dict<T>` and the pointer to the space where
// the new value will be written when the entry is finalized. If the entry were to be dropped
// (without being consumed by the finalizer), which shouldn't be possible under normal
// conditions, and the type `T` requires a custom drop implementation (ex. arrays, dicts...),
// it'll cause undefined behavior because when the value is moved out of the dictionary (on
// `get`), the memory it occupied is not modified because we're expecting it to be overwritten
// by the finalizer (in other words, the extracted element will be dropped twice).
helper.br(entry, 0, &[dict_entry, value], location)
}
/// The felt252_dict_entry_finalize libfunc receives the dict entry and a new value,
/// inserts the new value in the entry, and returns the full dictionary.
///
/// # Cairo Signature
///
/// ```cairo
/// fn felt252_dict_entry_finalize<T>(dict_entry: Felt252DictEntry<T>, new_value: T) -> Felt252Dict<T> nopanic;
/// ```
pub fn build_finalize<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
// Get the dict entry struct: `crate::types::felt252_dict_entry`.
let dict_entry = entry.arg(0)?;
let new_value = entry.arg(1)?;
let dict_ptr = entry.extract_value(
context,
location,
dict_entry,
llvm::r#type::pointer(context, 0),
0,
)?;
let value_ptr = entry.extract_value(
context,
location,
dict_entry,
llvm::r#type::pointer(context, 0),
1,
)?;
entry.store(context, location, value_ptr, new_value)?;
helper.br(entry, 0, &[dict_ptr], location)
}
#[cfg(test)]
mod test {
use crate::{
jit_dict, load_cairo,
utils::testing::{run_program, run_program_assert_output},
};
#[test]
fn run_dict_insert() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() -> u32 {
let mut dict: Felt252Dict<u32> = Default::default();
dict.insert(2, 1_u32);
dict.get(2)
}
);
run_program_assert_output(&program, "run_test", &[], 1u32.into());
}
#[test]
fn run_dict_insert_big() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() -> u64 {
let mut dict: Felt252Dict<u64> = Default::default();
dict.insert(200000000, 4_u64);
dict.get(200000000)
}
);
run_program_assert_output(&program, "run_test", &[], 4u64.into());
}
#[test]
fn run_dict_insert_ret_dict() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() -> Felt252Dict<u32> {
let mut dict: Felt252Dict<u32> = Default::default();
dict.insert(2, 1_u32);
dict
}
);
run_program_assert_output(
&program,
"run_test",
&[],
jit_dict!(
2 => 1u32
),
);
}
#[test]
fn run_dict_insert_multiple() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() -> u32 {
let mut dict: Felt252Dict<u32> = Default::default();
dict.insert(2, 1_u32);
dict.insert(3, 1_u32);
dict.insert(4, 1_u32);
dict.insert(5, 1_u32);
dict.insert(6, 1_u32);
dict.insert(7, 1_u32);
dict.insert(8, 1_u32);
dict.insert(9, 1_u32);
dict.insert(10, 1_u32);
dict.insert(11, 1_u32);
dict.insert(12, 1_u32);
dict.insert(13, 1_u32);
dict.insert(14, 1_u32);
dict.insert(15, 1_u32);
dict.insert(16, 1_u32);
dict.insert(17, 1_u32);
dict.insert(18, 1345432_u32);
dict.get(18)
}
);
run_program_assert_output(&program, "run_test", &[], 1345432_u32.into());
}
#[test]
fn run_dict_clone_ptr_update() {
let program = load_cairo!(
use core::dict::Felt252Dict;
fn run_test() {
let mut dict: Felt252Dict<u64> = Default::default();
let snapshot = @dict;
dict.insert(1, 1);
drop(snapshot);
dict.insert(2, 2);
}
);
run_program(&program, "run_test", &[]);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/gas_reserve.rs | src/libfuncs/gas_reserve.rs | use crate::libfuncs::LibfuncHelper;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::extensions::lib_func::SignatureOnlyConcreteLibfunc;
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
gas_reserve::GasReserveConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::dialect::arith::{self, CmpiPredicate};
use melior::helpers::{ArithBlockExt, BuiltinBlockExt};
use melior::ir::r#type::IntegerType;
use melior::{
ir::{Block, Location},
Context,
};
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &GasReserveConcreteLibfunc,
) -> Result<()> {
match selector {
GasReserveConcreteLibfunc::Create(info) => {
build_gas_reserve_create(context, registry, entry, location, helper, metadata, info)
}
GasReserveConcreteLibfunc::Utilize(info) => {
build_gas_reserve_utilize(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `gas_reserve_create` libfunc.
///
/// # Cairo Signature
///
/// ```cairo
/// pub extern fn gas_reserve_create(
/// amount: u128,
/// ) -> Option<GasReserve> implicits(RangeCheck, GasBuiltin) nopanic;
/// ```
fn build_gas_reserve_create<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let range_check = super::increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let current_gas = entry.arg(1)?; // u64
let amount = entry.arg(2)?; // u128
let amount_ty = IntegerType::new(context, 128).into();
let current_gas_128 = entry.append_op_result(arith::extui(current_gas, amount_ty, location))?;
let enough_gas = entry.cmpi(
context,
CmpiPredicate::Uge,
current_gas_128,
amount,
location,
)?;
let gas_builtin_ty = IntegerType::new(context, 64).into();
let spare_gas_128 = entry.append_op_result(arith::subi(current_gas_128, amount, location))?;
let spare_gas =
entry.append_op_result(arith::trunci(spare_gas_128, gas_builtin_ty, location))?;
helper.cond_br(
context,
entry,
enough_gas,
[0, 1],
[
&[range_check, spare_gas, amount],
&[range_check, current_gas],
],
location,
)
}
/// Generate MLIR operations for the `gas_reserve_utilize` libfunc.
///
/// # Cairo Signature
///
/// ```cairo
/// pub extern fn gas_reserve_utilize(reserve: GasReserve) implicits(GasBuiltin) nopanic;
/// ```
fn build_gas_reserve_utilize<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let current_gas = entry.arg(0)?; // u64
let gas_reserve_128 = entry.arg(1)?; // u128
let gas_reserve = entry.append_op_result(arith::trunci(
gas_reserve_128,
IntegerType::new(context, 64).into(),
location,
))?;
let updated_gas = entry.append_op_result(arith::addi(current_gas, gas_reserve, location))?;
helper.br(entry, 0, &[updated_gas], location)
}
#[cfg(test)]
mod test {
use crate::{load_cairo, utils::testing::run_program, Value};
#[test]
fn run_create() {
let program = load_cairo!(
use core::gas::{GasReserve, gas_reserve_create, gas_reserve_utilize};
fn run_test_1() -> Option<GasReserve> {
gas_reserve_create(100)
}
fn run_test_2(amount: u128) -> u128 {
let initial_gas = core::testing::get_available_gas();
let reserve = gas_reserve_create(amount).unwrap();
let final_gas = core::testing::get_available_gas();
gas_reserve_utilize(reserve);
initial_gas - final_gas
}
);
let result = run_program(&program, "run_test_1", &[]).return_value;
if let Value::Enum { tag, value, .. } = result {
assert_eq!(tag, 0);
assert_eq!(value, Box::new(Value::Uint128(100)))
}
let gas_amount = 100;
let result =
run_program(&program, "run_test_2", &[Value::Uint128(gas_amount)]).return_value;
if let Value::Enum { tag, value, .. } = result {
if let Value::Struct { fields, .. } = *value {
assert_eq!(tag, 0);
assert_eq!(fields[0], Value::Uint128(gas_amount));
}
}
let gas_amount = 700;
let result =
run_program(&program, "run_test_2", &[Value::Uint128(gas_amount)]).return_value;
if let Value::Enum { tag, value, .. } = result {
if let Value::Struct { fields, .. } = *value {
assert_eq!(tag, 0);
assert_eq!(fields[0], Value::Uint128(gas_amount));
}
}
}
#[test]
fn run_utilize() {
let program = load_cairo!(
use core::gas::{GasReserve, gas_reserve_create, gas_reserve_utilize};
fn run_test(amount: u128) -> u128 {
let initial_gas = core::testing::get_available_gas();
let reserve = gas_reserve_create(amount).unwrap();
gas_reserve_utilize(reserve);
let final_gas = core::testing::get_available_gas();
initial_gas - final_gas
}
);
let gas_amount = 10;
let result = run_program(&program, "run_test", &[Value::Uint128(gas_amount)]).return_value;
if let Value::Enum { tag, value, .. } = result {
if let Value::Struct { fields, .. } = *value {
assert_eq!(tag, 0);
assert_eq!(fields[0], Value::Uint128(0));
}
}
let gas_amount = 1000;
let result = run_program(&program, "run_test", &[Value::Uint128(gas_amount)]).return_value;
if let Value::Enum { tag, value, .. } = result {
if let Value::Struct { fields, .. } = *value {
assert_eq!(tag, 0);
assert_eq!(fields[0], Value::Uint128(0));
}
}
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/bool.rs | src/libfuncs/bool.rs | //! # Boolean libfuncs
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
metadata::MetadataStorage,
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
boolean::BoolConcreteLibfunc,
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{arith, llvm},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &BoolConcreteLibfunc,
) -> Result<()> {
match selector {
BoolConcreteLibfunc::And(info) => build_bool_binary(
context,
registry,
entry,
location,
helper,
metadata,
info,
BoolOp::And,
),
BoolConcreteLibfunc::Not(info) => {
build_bool_not(context, registry, entry, location, helper, metadata, info)
}
BoolConcreteLibfunc::Xor(info) => build_bool_binary(
context,
registry,
entry,
location,
helper,
metadata,
info,
BoolOp::Xor,
),
BoolConcreteLibfunc::Or(info) => build_bool_binary(
context,
registry,
entry,
location,
helper,
metadata,
info,
BoolOp::Or,
),
BoolConcreteLibfunc::ToFelt252(info) => {
build_bool_to_felt252(context, registry, entry, location, helper, metadata, info)
}
}
}
#[derive(Debug, Clone, Copy)]
enum BoolOp {
And,
Xor,
Or,
}
/// Generate MLIR operations for the `bool_not_impl` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_bool_binary<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
bin_op: BoolOp,
) -> Result<()> {
let enum_ty = registry.get_type(&info.param_signatures()[0].ty)?;
let tag_bits = enum_ty
.variants()
.to_native_assert_error("bool is a enum and has variants")?
.len()
.next_power_of_two()
.trailing_zeros();
let tag_ty = IntegerType::new(context, tag_bits).into();
let lhs = entry.arg(0)?;
let rhs = entry.arg(1)?;
let lhs_tag = entry.extract_value(context, location, lhs, tag_ty, 0)?;
let rhs_tag = entry.extract_value(context, location, rhs, tag_ty, 0)?;
let new_tag_value = match bin_op {
BoolOp::And => entry.append_op_result(arith::andi(lhs_tag, rhs_tag, location))?,
BoolOp::Xor => entry.append_op_result(arith::xori(lhs_tag, rhs_tag, location))?,
BoolOp::Or => entry.append_op_result(arith::ori(lhs_tag, rhs_tag, location))?,
};
let res = entry.append_op_result(llvm::undef(
enum_ty.build(
context,
helper,
registry,
metadata,
&info.param_signatures()[0].ty,
)?,
location,
))?;
let res = entry.insert_value(context, location, res, new_tag_value, 0)?;
helper.br(entry, 0, &[res], location)
}
/// Generate MLIR operations for the `bool_not_impl` libfunc.
pub fn build_bool_not<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let enum_ty = registry.get_type(&info.param_signatures()[0].ty)?;
let tag_bits = enum_ty
.variants()
.to_native_assert_error("bool is a enum and has variants")?
.len()
.next_power_of_two()
.trailing_zeros();
let tag_ty = IntegerType::new(context, tag_bits).into();
let value = entry.arg(0)?;
let tag_value = entry.extract_value(context, location, value, tag_ty, 0)?;
let const_1 = entry.const_int_from_type(context, location, 1, tag_ty)?;
let new_tag_value = entry.append_op_result(arith::xori(tag_value, const_1, location))?;
let res = entry.append_op_result(llvm::undef(
enum_ty.build(
context,
helper,
registry,
metadata,
&info.param_signatures()[0].ty,
)?,
location,
))?;
let res = entry.insert_value(context, location, res, new_tag_value, 0)?;
helper.br(entry, 0, &[res], location)
}
/// Generate MLIR operations for the `unbox` libfunc.
pub fn build_bool_to_felt252<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let enum_ty = registry.get_type(&info.param_signatures()[0].ty)?;
let felt252_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let tag_bits = enum_ty
.variants()
.to_native_assert_error("bool is a enum and has variants")?
.len()
.next_power_of_two()
.trailing_zeros();
let tag_ty = IntegerType::new(context, tag_bits).into();
let value = entry.arg(0)?;
let tag_value = entry.extract_value(context, location, value, tag_ty, 0)?;
let result = entry.extui(tag_value, felt252_ty, location)?;
helper.br(entry, 0, &[result], location)
}
#[cfg(test)]
mod test {
use crate::{jit_enum, jit_struct, load_cairo, utils::testing::run_program, values::Value};
#[test]
fn run_not() {
let program = load_cairo!(
use array::ArrayTrait;
fn run_test(a: bool) -> bool {
!a
}
);
let result = run_program(&program, "run_test", &[jit_enum!(0, jit_struct!())]).return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(&program, "run_test", &[jit_enum!(1, jit_struct!())]).return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
}
#[test]
fn run_and() {
let program = load_cairo!(
use array::ArrayTrait;
fn run_test(a: bool, b: bool) -> bool {
a && b
}
);
let result = run_program(
&program,
"run_test",
&[jit_enum!(1, jit_struct!()), jit_enum!(1, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(1, jit_struct!()), jit_enum!(0, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(0, jit_struct!()), jit_enum!(1, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(0, jit_struct!()), jit_enum!(0, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
}
#[test]
fn run_xor() {
let program = load_cairo!(
use array::ArrayTrait;
fn run_test(a: bool, b: bool) -> bool {
a ^ b
}
);
let result = run_program(
&program,
"run_test",
&[jit_enum!(1, jit_struct!()), jit_enum!(1, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(1, jit_struct!()), jit_enum!(0, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(0, jit_struct!()), jit_enum!(1, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(0, jit_struct!()), jit_enum!(0, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
}
#[test]
fn run_or() {
let program = load_cairo!(
use array::ArrayTrait;
fn run_test(a: bool, b: bool) -> bool {
a || b
}
);
let result = run_program(
&program,
"run_test",
&[jit_enum!(1, jit_struct!()), jit_enum!(1, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(1, jit_struct!()), jit_enum!(0, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(0, jit_struct!()), jit_enum!(1, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(1, jit_struct!()));
let result = run_program(
&program,
"run_test",
&[jit_enum!(0, jit_struct!()), jit_enum!(0, jit_struct!())],
)
.return_value;
assert_eq!(result, jit_enum!(0, jit_struct!()));
}
#[test]
fn bool_to_felt252() {
let program = load_cairo!(
fn run_test(a: bool) -> felt252 {
bool_to_felt252(a)
}
);
let result = run_program(&program, "run_test", &[jit_enum!(1, jit_struct!())]).return_value;
assert_eq!(result, Value::Felt252(1.into()));
let result = run_program(&program, "run_test", &[jit_enum!(0, jit_struct!())]).return_value;
assert_eq!(result, Value::Felt252(0.into()));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/nullable.rs | src/libfuncs/nullable.rs | //! # Nullable libfuncs
//!
//! Like a Box but it can be null.
use super::LibfuncHelper;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::{SignatureAndTypeConcreteLibfunc, SignatureOnlyConcreteLibfunc},
nullable::NullableConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{cf, llvm::r#type::pointer, ods},
helpers::BuiltinBlockExt,
ir::{
attribute::IntegerAttribute, operation::OperationBuilder, r#type::IntegerType, Block,
BlockLike, Identifier, Location,
},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &NullableConcreteLibfunc,
) -> Result<()> {
match selector {
NullableConcreteLibfunc::ForwardSnapshot(info)
| NullableConcreteLibfunc::NullableFromBox(info) => super::build_noop::<1, false>(
context,
registry,
entry,
location,
helper,
metadata,
&info.signature.param_signatures,
),
NullableConcreteLibfunc::MatchNullable(info) => {
build_match_nullable(context, registry, entry, location, helper, metadata, info)
}
NullableConcreteLibfunc::Null(info) => {
build_null(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `null` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_null<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let value = entry
.append_op_result(ods::llvm::mlir_zero(context, pointer(context, 0), location).into())?;
helper.br(entry, 0, &[value], location)
}
/// Generate MLIR operations for the `match_nullable` libfunc.
#[allow(clippy::too_many_arguments)]
fn build_match_nullable<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let arg = entry.arg(0)?;
let nullptr = entry
.append_op_result(ods::llvm::mlir_zero(context, pointer(context, 0), location).into())?;
let is_null_ptr = entry.append_op_result(
OperationBuilder::new("llvm.icmp", location)
.add_operands(&[arg, nullptr])
.add_attributes(&[(
Identifier::new(context, "predicate"),
IntegerAttribute::new(IntegerType::new(context, 64).into(), 0).into(),
)])
.add_results(&[IntegerType::new(context, 1).into()])
.build()?,
)?;
let block_is_null = helper.append_block(Block::new(&[]));
let block_is_not_null = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
is_null_ptr,
block_is_null,
block_is_not_null,
&[],
&[],
location,
));
helper.br(block_is_null, 0, &[], location)?;
helper.br(block_is_not_null, 1, &[arg], location)?;
Ok(())
}
#[cfg(test)]
mod test {
use crate::{
jit_enum, jit_struct, load_cairo, utils::testing::run_program_assert_output, values::Value,
};
#[test]
fn run_null() {
let program = load_cairo!(
use nullable::null;
use nullable::match_nullable;
use nullable::FromNullableResult;
use nullable::nullable_from_box;
use box::BoxTrait;
fn run_test() {
let _a: Nullable<u8> = null();
}
);
run_program_assert_output(&program, "run_test", &[], jit_struct!());
}
#[test]
fn run_null_jit() {
let program = load_cairo!(
use nullable::null;
use nullable::match_nullable;
use nullable::FromNullableResult;
use nullable::nullable_from_box;
use box::BoxTrait;
fn run_test() -> Nullable<u8> {
let a: Nullable<u8> = null();
a
}
);
run_program_assert_output(&program, "run_test", &[], Value::Null);
}
#[test]
fn run_not_null() {
let program = load_cairo!(
use nullable::null;
use nullable::match_nullable;
use nullable::FromNullableResult;
use nullable::nullable_from_box;
use box::BoxTrait;
fn run_test(x: u8) -> u8 {
let b: Box<u8> = BoxTrait::new(x);
let c = if x == 0 {
null()
} else {
nullable_from_box(b)
};
let d = match match_nullable(c) {
FromNullableResult::Null(_) => 99_u8,
FromNullableResult::NotNull(value) => value.unbox()
};
d
}
);
run_program_assert_output(&program, "run_test", &[4u8.into()], 4u8.into());
run_program_assert_output(&program, "run_test", &[0u8.into()], 99u8.into());
}
#[test]
fn match_snapshot_nullable_clone_bug() {
let program = load_cairo! {
use core::{NullableTrait, match_nullable, null, nullable::FromNullableResult};
fn run_test(x: Option<u8>) -> Option<u8> {
let a = match x {
Option::Some(x) => @NullableTrait::new(x),
Option::None(_) => @null::<u8>(),
};
let b = *a;
match match_nullable(b) {
FromNullableResult::Null(_) => Option::None(()),
FromNullableResult::NotNull(x) => Option::Some(x.unbox()),
}
}
};
run_program_assert_output(
&program,
"run_test",
&[jit_enum!(0, 42u8.into())],
jit_enum!(0, 42u8.into()),
);
run_program_assert_output(
&program,
"run_test",
&[jit_enum!(
1,
Value::Struct {
fields: Vec::new(),
debug_name: None
}
)],
jit_enum!(
1,
Value::Struct {
fields: Vec::new(),
debug_name: None
}
),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/debug.rs | src/libfuncs/debug.rs | //! # Debug libfuncs
// Printable: 9-13, 27, 32, 33-126
// is_ascii_graphic() -> 33-126
// is_ascii_whitespace():
// U+0009 HORIZONTAL TAB
// U+000A LINE FEED
// U+000C FORM FEED
// U+000D CARRIAGE RETURN.
// U+0020 SPACE
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
metadata::{
drop_overrides::DropOverridesMeta, runtime_bindings::RuntimeBindingsMeta, MetadataStorage,
},
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
debug::DebugConcreteLibfunc,
lib_func::SignatureOnlyConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{arith, cf, llvm},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location},
Context,
};
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &DebugConcreteLibfunc,
) -> Result<()> {
match selector {
DebugConcreteLibfunc::Print(info) => {
build_print(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_print<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let stdout_fd = entry.const_int(context, location, 1, 32)?;
let values_ptr = entry.extract_value(
context,
location,
entry.arg(0)?,
llvm::r#type::pointer(context, 0),
0,
)?;
let values_start = entry.extract_value(
context,
location,
entry.arg(0)?,
IntegerType::new(context, 32).into(),
1,
)?;
let values_end = entry.extract_value(
context,
location,
entry.arg(0)?,
IntegerType::new(context, 32).into(),
2,
)?;
let runtime_bindings = metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
let values_len = entry.append_op_result(arith::subi(values_end, values_start, location))?;
let values_ptr = {
let values_start =
entry.extui(values_start, IntegerType::new(context, 64).into(), location)?;
entry.append_op_result(llvm::get_element_ptr_dynamic(
context,
values_ptr,
&[values_start],
IntegerType::new(context, 252).into(),
llvm::r#type::pointer(context, 0),
location,
))?
};
let return_code = runtime_bindings.libfunc_debug_print(
context, helper, entry, stdout_fd, values_ptr, values_len, location,
)?;
let input_ty = &info.signature.param_signatures[0].ty;
registry.build_type(context, helper, metadata, input_ty)?;
DropOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
entry,
location,
metadata,
input_ty,
entry.arg(0)?,
)?;
let k0 = entry.const_int(context, location, 0, 32)?;
let return_code_is_ok =
entry.cmpi(context, arith::CmpiPredicate::Eq, return_code, k0, location)?;
cf::assert(
context,
return_code_is_ok,
"Print libfunc invocation failed.",
location,
);
helper.br(entry, 0, &[], location)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/starknet.rs | src/libfuncs/starknet.rs | //! # Starknet libfuncs
use super::LibfuncHelper;
use crate::{
error::{Error, Result},
ffi::get_struct_field_type_at,
metadata::{drop_overrides::DropOverridesMeta, MetadataStorage},
starknet::handler::StarknetSyscallHandlerCallbacks,
utils::{get_integer_layout, ProgramRegistryExt, PRIME},
};
use cairo_lang_sierra::{
extensions::{
consts::SignatureAndConstConcreteLibfunc,
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
starknet::{testing::TestingConcreteLibfunc, StarknetConcreteLibfunc},
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
llvm::{self, r#type::pointer, LoadStoreOptions},
},
helpers::{ArithBlockExt, BuiltinBlockExt, GepIndex, LlvmBlockExt},
ir::{
attribute::DenseI64ArrayAttribute, operation::OperationBuilder, r#type::IntegerType,
Attribute, Block, BlockLike, Location, Type, ValueLike,
},
Context,
};
use num_bigint::Sign;
use std::alloc::Layout;
mod secp256;
mod testing;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &StarknetConcreteLibfunc,
) -> Result<()> {
match selector {
StarknetConcreteLibfunc::ClassHashToFelt252(info)
| StarknetConcreteLibfunc::ContractAddressToFelt252(info)
| StarknetConcreteLibfunc::StorageAddressFromBase(info)
| StarknetConcreteLibfunc::StorageAddressToFelt252(info)
| StarknetConcreteLibfunc::Sha256StateHandleInit(info)
| StarknetConcreteLibfunc::Sha256StateHandleDigest(info) => super::build_noop::<1, false>(
context,
registry,
entry,
location,
helper,
metadata,
&info.signature.param_signatures,
),
StarknetConcreteLibfunc::CallContract(info) => {
build_call_contract(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::ClassHashConst(info) => {
build_class_hash_const(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::ClassHashTryFromFelt252(info) => {
build_class_hash_try_from_felt252(
context, registry, entry, location, helper, metadata, info,
)
}
StarknetConcreteLibfunc::ContractAddressConst(info) => {
build_contract_address_const(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::ContractAddressTryFromFelt252(info) => {
build_contract_address_try_from_felt252(
context, registry, entry, location, helper, metadata, info,
)
}
StarknetConcreteLibfunc::StorageRead(info) => {
build_storage_read(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::StorageWrite(info) => {
build_storage_write(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::StorageBaseAddressConst(info) => build_storage_base_address_const(
context, registry, entry, location, helper, metadata, info,
),
StarknetConcreteLibfunc::StorageBaseAddressFromFelt252(info) => {
build_storage_base_address_from_felt252(
context, registry, entry, location, helper, metadata, info,
)
}
StarknetConcreteLibfunc::StorageAddressFromBaseAndOffset(info) => {
build_storage_address_from_base_and_offset(
context, registry, entry, location, helper, metadata, info,
)
}
StarknetConcreteLibfunc::StorageAddressTryFromFelt252(info) => {
build_storage_address_try_from_felt252(
context, registry, entry, location, helper, metadata, info,
)
}
StarknetConcreteLibfunc::EmitEvent(info) => {
build_emit_event(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::GetBlockHash(info) => {
build_get_block_hash(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::GetClassHashAt(info) => {
build_get_class_hash_at(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::GetExecutionInfo(info) => {
build_get_execution_info(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::GetExecutionInfoV2(info) => {
build_get_execution_info_v2(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::GetExecutionInfoV3(info) => {
build_get_execution_info_v3(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::Deploy(info) => {
build_deploy(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::Keccak(info) => {
build_keccak(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::LibraryCall(info) => {
build_library_call(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::ReplaceClass(info) => {
build_replace_class(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::SendMessageToL1(info) => {
build_send_message_to_l1(context, registry, entry, location, helper, metadata, info)
}
StarknetConcreteLibfunc::Secp256(selector) => self::secp256::build(
context, registry, entry, location, helper, metadata, selector,
),
StarknetConcreteLibfunc::Sha256ProcessBlock(info) => build_sha256_process_block_syscall(
context, registry, entry, location, helper, metadata, info,
),
StarknetConcreteLibfunc::MetaTxV0(info) => {
build_meta_tx_v0(context, registry, entry, location, helper, metadata, info)
}
#[cfg(feature = "with-cheatcode")]
StarknetConcreteLibfunc::Testing(TestingConcreteLibfunc::Cheatcode(info)) => {
self::testing::build(context, registry, entry, location, helper, metadata, info)
}
#[cfg(not(feature = "with-cheatcode"))]
StarknetConcreteLibfunc::Testing(TestingConcreteLibfunc::Cheatcode(_)) => {
crate::native_panic!(
"feature 'with-cheatcode' is required to compile with cheatcode syscall"
)
}
}
}
pub fn build_call_contract<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry.load(
context,
location,
entry.arg(1)?,
llvm::r#type::pointer(context, 0),
)?;
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
info.branch_signatures()[0].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.arg(0)?,
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `address` argument and write the value.
let address_arg_ptr =
helper
.init_block()
.alloca_int(context, location, 252, get_integer_layout(252).align())?;
entry.append_operation(llvm::store(
context,
entry.arg(2)?,
address_arg_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `entry_point_selector` argument and write the value.
let entry_point_selector_arg_ptr =
helper
.init_block()
.alloca_int(context, location, 252, get_integer_layout(252).align())?;
entry.append_operation(llvm::store(
context,
entry.arg(3)?,
entry_point_selector_arg_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `calldata` argument and write the value.
let calldata_arg_ty = llvm::r#type::r#struct(
context,
&[llvm::r#type::r#struct(
context,
&[
llvm::r#type::pointer(context, 0), // ptr to felt
IntegerType::new(context, 32).into(),
IntegerType::new(context, 32).into(),
IntegerType::new(context, 32).into(),
],
false,
)],
false,
);
let calldata_arg_ptr = helper.init_block().alloca1(
context,
location,
calldata_arg_ty,
get_integer_layout(64).align(),
)?;
entry.store(context, location, calldata_arg_ptr, entry.arg(4)?)?;
// Extract function pointer.
let fn_ptr = entry.gep(
context,
location,
entry.arg(1)?,
&[GepIndex::Const(
StarknetSyscallHandlerCallbacks::<()>::CALL_CONTRACT.try_into()?,
)],
pointer(context, 0),
)?;
let fn_ptr = entry.load(context, location, fn_ptr, llvm::r#type::pointer(context, 0))?;
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
address_arg_ptr,
entry_point_selector_arg_ptr,
calldata_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(
context,
location,
gas_builtin_ptr,
IntegerType::new(context, 64).into(),
)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.arg(1)?, payload_err],
&[remaining_gas, entry.arg(1)?, payload_ok],
],
location,
)
}
pub fn build_class_hash_const<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureAndConstConcreteLibfunc,
) -> Result<()> {
let value = entry.const_int(
context,
location,
match info.c.sign() {
Sign::Minus => &*PRIME - info.c.magnitude(),
_ => info.c.magnitude().clone(),
},
252,
)?;
helper.br(entry, 0, &[value], location)
}
pub fn build_class_hash_try_from_felt252<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 3 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/misc.rs?plain=1#L266
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 3)?;
let value = entry.arg(1)?;
let limit = entry.append_op_result(arith::constant(
context,
Attribute::parse(
context,
"3618502788666131106986593281521497120414687020801267626233049500247285301248 : i252",
)
.ok_or(Error::ParseAttributeError)?,
location,
))?;
let is_in_range = entry.cmpi(context, CmpiPredicate::Ult, value, limit, location)?;
helper.cond_br(
context,
entry,
is_in_range,
[0, 1],
[&[range_check, value], &[range_check]],
location,
)
}
pub fn build_contract_address_const<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureAndConstConcreteLibfunc,
) -> Result<()> {
let value = entry.const_int(
context,
location,
match info.c.sign() {
Sign::Minus => &*PRIME - info.c.magnitude(),
_ => info.c.magnitude().clone(),
},
252,
)?;
helper.br(entry, 0, &[value], location)
}
pub fn build_contract_address_try_from_felt252<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 3 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/misc.rs?plain=1#L266
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 3)?;
let value = entry.arg(1)?;
let limit = entry.append_op_result(arith::constant(
context,
Attribute::parse(
context,
"3618502788666131106986593281521497120414687020801267626233049500247285301248 : i252",
)
.ok_or(Error::ParseAttributeError)?,
location,
))?;
let is_in_range = entry.cmpi(context, CmpiPredicate::Ult, value, limit, location)?;
helper.cond_br(
context,
entry,
is_in_range,
[0, 1],
[&[range_check, value], &[range_check]],
location,
)
}
pub fn build_storage_read<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry.load(
context,
location,
entry.arg(1)?,
llvm::r#type::pointer(context, 0),
)?;
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
info.branch_signatures()[0].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.store(context, location, gas_builtin_ptr, entry.arg(0)?)?;
// Allocate `address` argument and write the value.
let address_arg_ptr =
helper
.init_block()
.alloca_int(context, location, 252, get_integer_layout(252).align())?;
entry.store(context, location, address_arg_ptr, entry.arg(3)?)?;
// Extract function pointer.
let fn_ptr = entry.gep(
context,
location,
entry.arg(1)?,
&[GepIndex::Const(
StarknetSyscallHandlerCallbacks::<()>::STORAGE_READ.try_into()?,
)],
pointer(context, 0),
)?;
let fn_ptr = entry.load(context, location, fn_ptr, llvm::r#type::pointer(context, 0))?;
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
entry.arg(2)?,
address_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.arg(1)?, payload_err],
&[remaining_gas, entry.arg(1)?, payload_ok],
],
location,
)
}
pub fn build_storage_write<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry.load(
context,
location,
entry.arg(1)?,
llvm::r#type::pointer(context, 0),
)?;
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
// The branch is deliberately duplicated because:
// - There is no `[0].vars[2]` (it returns `()`).
// - We need a variant to make the length be 2.
// - It requires a `ConcreteTypeId`, we can't pass an MLIR type.
info.branch_signatures()[1].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.store(context, location, gas_builtin_ptr, entry.arg(0)?)?;
// Allocate `address` argument and write the value.
let address_arg_ptr =
helper
.init_block()
.alloca_int(context, location, 252, get_integer_layout(252).align())?;
entry.store(context, location, address_arg_ptr, entry.arg(3)?)?;
// Allocate `value` argument and write the value.
let value_arg_ptr =
helper
.init_block()
.alloca_int(context, location, 252, get_integer_layout(252).align())?;
entry.store(context, location, value_arg_ptr, entry.arg(4)?)?;
let fn_ptr = entry.gep(
context,
location,
entry.arg(1)?,
&[GepIndex::Const(
StarknetSyscallHandlerCallbacks::<()>::STORAGE_WRITE.try_into()?,
)],
pointer(context, 0),
)?;
let fn_ptr = entry.load(context, location, fn_ptr, llvm::r#type::pointer(context, 0))?;
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
entry.arg(2)?,
address_arg_ptr,
value_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.arg(1)?, payload_err],
&[remaining_gas, entry.arg(1)?, payload_ok],
],
location,
)
}
pub fn build_storage_base_address_const<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureAndConstConcreteLibfunc,
) -> Result<()> {
let value = entry.const_int(
context,
location,
match info.c.sign() {
Sign::Minus => &*PRIME - info.c.magnitude(),
_ => info.c.magnitude().clone(),
},
252,
)?;
helper.br(entry, 0, &[value], location)
}
pub fn build_storage_base_address_from_felt252<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 3 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/starknet/storage.rs?plain=1#L30
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 3)?;
let k_limit = entry.append_op_result(arith::constant(
context,
Attribute::parse(
context,
"3618502788666131106986593281521497120414687020801267626233049500247285300992 : i252",
)
.ok_or(Error::ParseAttributeError)?,
location,
))?;
let limited_value = entry.append_op_result(arith::subi(entry.arg(1)?, k_limit, location))?;
let is_within_limit = entry.cmpi(
context,
CmpiPredicate::Ult,
entry.arg(1)?,
k_limit,
location,
)?;
let value = entry.append_op_result(arith::select(
is_within_limit,
entry.arg(1)?,
limited_value,
location,
))?;
helper.br(entry, 0, &[range_check, value], location)
}
pub fn build_storage_address_from_base_and_offset<'ctx, 'this>(
_context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let offset = entry.extui(entry.arg(1)?, entry.argument(0)?.r#type(), location)?;
let addr = entry.addi(entry.arg(0)?, offset, location)?;
helper.br(entry, 0, &[addr], location)
}
pub fn build_storage_address_try_from_felt252<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 3 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/misc.rs?plain=1#L266
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 3)?;
let value = entry.arg(1)?;
let limit = entry.append_op_result(arith::constant(
context,
Attribute::parse(
context,
"3618502788666131106986593281521497120414687020801267626233049500247285301248 : i252",
)
.ok_or(Error::ParseAttributeError)?,
location,
))?;
let is_in_range = entry.cmpi(context, CmpiPredicate::Ult, value, limit, location)?;
helper.cond_br(
context,
entry,
is_in_range,
[0, 1],
[&[range_check, value], &[range_check]],
location,
)
}
pub fn build_emit_event<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry.load(
context,
location,
entry.arg(1)?,
llvm::r#type::pointer(context, 0),
)?;
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
// The branch is deliberately duplicated because:
// - There is no `[0].vars[2]` (it returns `()`).
// - We need a variant to make the length be 2.
// - It requires a `ConcreteTypeId`, we can't pass an MLIR type.
info.branch_signatures()[1].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.arg(0)?,
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `keys` argument and write the value.
let keys_arg_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[llvm::r#type::r#struct(
context,
&[
llvm::r#type::pointer(context, 0), // ptr to felt
IntegerType::new(context, 32).into(),
IntegerType::new(context, 32).into(),
IntegerType::new(context, 32).into(),
],
false,
)],
false,
),
get_integer_layout(64).align(),
)?;
entry.store(context, location, keys_arg_ptr, entry.arg(2)?)?;
// Allocate `data` argument and write the value.
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/drop.rs | src/libfuncs/drop.rs | //! # `AP` tracking libfuncs
//!
//! Most types are trivial and don't need dropping (or rather, they will be dropped automatically
//! by MLIR). For those types, this libfunc is a no-op.
//!
//! However, types like an array need manual dropping.
use super::LibfuncHelper;
use crate::{
error::Result,
metadata::{drop_overrides::DropOverridesMeta, MetadataStorage},
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
helpers::BuiltinBlockExt,
ir::{Block, Location},
Context,
};
/// Generate MLIR operations for the `drop` libfunc.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
registry.build_type(
context,
helper,
metadata,
&info.signature.param_signatures[0].ty,
)?;
DropOverridesMeta::invoke_override(
context,
registry,
helper,
helper.init_block(),
entry,
location,
metadata,
&info.signature.param_signatures[0].ty,
entry.arg(0)?,
)?;
helper.br(entry, 0, &[], location)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/gas.rs | src/libfuncs/gas.rs | //! # Gas management libfuncs
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Error, Result},
metadata::{gas::GasCost, runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
native_panic,
utils::BuiltinCosts,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
gas::GasConcreteLibfunc,
lib_func::SignatureOnlyConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{arith::CmpiPredicate, ods},
helpers::{ArithBlockExt, BuiltinBlockExt, GepIndex, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location, Value},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &GasConcreteLibfunc,
) -> Result<()> {
match selector {
GasConcreteLibfunc::WithdrawGas(info) => {
build_withdraw_gas(context, registry, entry, location, helper, metadata, info)
}
GasConcreteLibfunc::RedepositGas(info) => {
build_redeposit_gas(context, registry, entry, location, helper, metadata, info)
}
GasConcreteLibfunc::GetAvailableGas(info) => {
build_get_available_gas(context, registry, entry, location, helper, metadata, info)
}
GasConcreteLibfunc::BuiltinWithdrawGas(info) => {
build_builtin_withdraw_gas(context, registry, entry, location, helper, metadata, info)
}
GasConcreteLibfunc::GetBuiltinCosts(info) => {
build_get_builtin_costs(context, registry, entry, location, helper, metadata, info)
}
GasConcreteLibfunc::GetUnspentGas(_) => {
native_panic!("Implement GetUnspentGas libfunc");
}
}
}
/// Generate MLIR operations for the `get_available_gas` libfunc.
pub fn build_get_available_gas<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let i128_ty = IntegerType::new(context, 128).into();
let gas_u128 = entry.extui(entry.arg(0)?, i128_ty, location)?;
// The gas is returned as u128 on the second arg.
helper.br(entry, 0, &[entry.arg(0)?, gas_u128], location)
}
/// Generate MLIR operations for the `withdraw_gas` libfunc.
pub fn build_withdraw_gas<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let range_check = super::increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let current_gas = entry.arg(1)?;
let builtin_ptr = {
let runtime = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?;
runtime
.get_costs_builtin(context, helper, entry, location)?
.result(0)?
.into()
};
let gas_cost = metadata
.get::<GasCost>()
.to_native_assert_error("withdraw_gas should always have a gas cost")?
.clone();
let total_gas_cost_value =
build_calculate_gas_cost(context, entry, location, gas_cost, builtin_ptr)?;
let is_enough = entry.cmpi(
context,
CmpiPredicate::Uge,
current_gas,
total_gas_cost_value,
location,
)?;
let resulting_gas = entry.append_op_result(
ods::llvm::intr_usub_sat(context, current_gas, total_gas_cost_value, location).into(),
)?;
helper.cond_br(
context,
entry,
is_enough,
[0, 1],
[&[range_check, resulting_gas], &[range_check, current_gas]],
location,
)
}
/// Returns the unused gas to the remaining
///
/// ```cairo
/// extern fn redeposit_gas() implicits(GasBuiltin) nopanic;
/// ```
pub fn build_redeposit_gas<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let current_gas = entry.arg(0)?;
let gas_cost = metadata
.get::<GasCost>()
.to_native_assert_error("redeposit_gas should always have a gas cost")?
.clone();
let builtin_ptr = {
let runtime = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?;
runtime
.get_costs_builtin(context, helper, entry, location)?
.result(0)?
.into()
};
let total_gas_cost_value =
build_calculate_gas_cost(context, entry, location, gas_cost, builtin_ptr)?;
let resulting_gas = entry.append_op_result(
ods::llvm::intr_uadd_sat(context, current_gas, total_gas_cost_value, location).into(),
)?;
helper.br(entry, 0, &[resulting_gas], location)
}
/// Generate MLIR operations for the `withdraw_gas_all` libfunc.
pub fn build_builtin_withdraw_gas<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let range_check = super::increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let current_gas = entry.arg(1)?;
let builtin_ptr = entry.arg(2)?;
let gas_cost = metadata
.get::<GasCost>()
.to_native_assert_error("builtin_withdraw_gas should always have a gas cost")?
.clone();
let total_gas_cost_value =
build_calculate_gas_cost(context, entry, location, gas_cost, builtin_ptr)?;
let is_enough = entry.cmpi(
context,
CmpiPredicate::Uge,
current_gas,
total_gas_cost_value,
location,
)?;
let resulting_gas = entry.append_op_result(
ods::llvm::intr_usub_sat(context, current_gas, total_gas_cost_value, location).into(),
)?;
helper.cond_br(
context,
entry,
is_enough,
[0, 1],
[&[range_check, resulting_gas], &[range_check, current_gas]],
location,
)
}
/// Generate MLIR operations for the `get_builtin_costs` libfunc.
pub fn build_get_builtin_costs<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Get the ptr to the global, holding a ptr to the list.
let builtin_ptr = {
let runtime = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?;
runtime
.get_costs_builtin(context, helper, entry, location)?
.result(0)?
.into()
};
helper.br(entry, 0, &[builtin_ptr], location)
}
/// Calculate the current gas cost, given the constant `GasCost` configuration,
/// and the current `BuiltinCosts` pointer.
pub fn build_calculate_gas_cost<'c, 'b>(
context: &'c Context,
block: &'b Block<'c>,
location: Location<'c>,
gas_cost: GasCost,
builtin_ptr: Value<'c, 'b>,
) -> Result<Value<'c, 'b>> {
let u64_type: melior::ir::Type = IntegerType::new(context, 64).into();
let mut total_gas_cost = block.const_int_from_type(context, location, 0, u64_type)?;
// For each gas cost entry
for (token_count, token_type) in &gas_cost.0 {
if *token_count == 0 {
continue;
}
let token_count = block.const_int_from_type(context, location, *token_count, u64_type)?;
// Calculate the index of the token type in the builtin costs array
let token_costs_index = block.const_int_from_type(
context,
location,
BuiltinCosts::index_for_token_type(token_type)?,
u64_type,
)?;
// Index the builtin costs array
let token_cost_ptr = block.gep(
context,
location,
builtin_ptr,
&[GepIndex::Value(token_costs_index)],
u64_type,
)?;
let token_cost = block.load(context, location, token_cost_ptr, u64_type)?;
// Multiply the number of tokens by the cost of each token
let gas_cost = block.muli(token_count, token_cost, location)?;
total_gas_cost = block.addi(total_gas_cost, gas_cost, location)?;
}
Ok(total_gas_cost)
}
#[cfg(test)]
mod test {
use crate::{load_cairo, utils::testing::run_program};
#[test]
fn run_withdraw_gas() {
#[rustfmt::skip]
let program = load_cairo!(
use gas::withdraw_gas;
fn run_test() {
let mut i = 10;
loop {
if i == 0 {
break;
}
match withdraw_gas() {
Option::Some(()) => {
i = i - 1;
},
Option::None(()) => {
break;
}
};
i = i - 1;
}
}
);
let result = run_program(&program, "run_test", &[]);
assert_eq!(result.remaining_gas, Some(18446744073709545465));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/mem.rs | src/libfuncs/mem.rs | //! # Memory-related libfuncs
//!
//! Natively compiled code doesn't need this kind of memory tracking because it has no notion of the
//! segments. Because of this, all of the memory-related libfuncs here are no-ops.
use super::LibfuncHelper;
use crate::{error::Result, metadata::MetadataStorage, utils::ProgramRegistryExt};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::{SignatureAndTypeConcreteLibfunc, SignatureOnlyConcreteLibfunc},
mem::MemConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm,
helpers::BuiltinBlockExt,
ir::{Block, Location},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &MemConcreteLibfunc,
) -> Result<()> {
match selector {
MemConcreteLibfunc::AllocLocal(info) => {
build_alloc_local(context, registry, entry, location, helper, metadata, info)
}
MemConcreteLibfunc::StoreLocal(info) => {
build_store_local(context, registry, entry, location, helper, metadata, info)
}
MemConcreteLibfunc::FinalizeLocals(info) => super::build_noop::<0, false>(
context,
registry,
entry,
location,
helper,
metadata,
&info.signature.param_signatures,
),
MemConcreteLibfunc::Rename(SignatureOnlyConcreteLibfunc { signature })
| MemConcreteLibfunc::StoreTemp(SignatureAndTypeConcreteLibfunc { signature, .. }) => {
super::build_noop::<1, false>(
context,
registry,
entry,
location,
helper,
metadata,
&signature.param_signatures,
)
}
}
}
/// Generate MLIR operations for the `alloc_local` libfunc.
pub fn build_alloc_local<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
let target_type = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let value = entry.append_op_result(llvm::undef(target_type, location))?;
helper.br(entry, 0, &[value], location)
}
/// Generate MLIR operations for the `store_local` libfunc.
pub fn build_store_local<'ctx, 'this>(
_context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureAndTypeConcreteLibfunc,
) -> Result<()> {
helper.br(entry, 0, &[entry.arg(1)?], location)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/uint256.rs | src/libfuncs/uint256.rs | //! # `u256`-related libfuncs
use super::{increment_builtin_counter_conditionally_by, LibfuncHelper};
use crate::{error::Result, metadata::MetadataStorage, utils::ProgramRegistryExt};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
int::unsigned256::Uint256Concrete,
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
llvm, ods, scf,
},
helpers::BuiltinBlockExt,
ir::{
attribute::{DenseI64ArrayAttribute, IntegerAttribute},
operation::OperationBuilder,
r#type::IntegerType,
Block, BlockLike, Location, Region, Value,
},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Uint256Concrete,
) -> Result<()> {
match selector {
Uint256Concrete::Divmod(info) => {
build_divmod(context, registry, entry, location, helper, metadata, info)
}
Uint256Concrete::IsZero(info) => {
build_is_zero(context, registry, entry, location, helper, metadata, info)
}
Uint256Concrete::SquareRoot(info) => {
build_square_root(context, registry, entry, location, helper, metadata, info)
}
Uint256Concrete::InvModN(info) => build_u256_guarantee_inv_mod_n(
context, registry, entry, location, helper, metadata, info,
),
}
}
/// Generate MLIR operations for the `u256_safe_divmod` libfunc.
pub fn build_divmod<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 6 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned256.rs?plain=1#L47
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 6)?;
let i128_ty = IntegerType::new(context, 128).into();
let i256_ty = IntegerType::new(context, 256).into();
let guarantee_type =
registry.build_type(context, helper, metadata, &info.output_types()[0][3])?;
let lhs_struct: Value = entry.arg(1)?;
let rhs_struct: Value = entry.arg(2)?;
let lhs_lo = entry
.append_operation(llvm::extract_value(
context,
lhs_struct,
DenseI64ArrayAttribute::new(context, &[0]),
i128_ty,
location,
))
.result(0)?
.into();
let lhs_hi = entry
.append_operation(llvm::extract_value(
context,
lhs_struct,
DenseI64ArrayAttribute::new(context, &[1]),
i128_ty,
location,
))
.result(0)?
.into();
let rhs_lo = entry
.append_operation(llvm::extract_value(
context,
rhs_struct,
DenseI64ArrayAttribute::new(context, &[0]),
i128_ty,
location,
))
.result(0)?
.into();
let rhs_hi = entry
.append_operation(llvm::extract_value(
context,
rhs_struct,
DenseI64ArrayAttribute::new(context, &[1]),
i128_ty,
location,
))
.result(0)?
.into();
let lhs_lo = entry
.append_operation(arith::extui(lhs_lo, i256_ty, location))
.result(0)?
.into();
let lhs_hi = entry
.append_operation(arith::extui(lhs_hi, i256_ty, location))
.result(0)?
.into();
let rhs_lo = entry
.append_operation(arith::extui(rhs_lo, i256_ty, location))
.result(0)?
.into();
let rhs_hi = entry
.append_operation(arith::extui(rhs_hi, i256_ty, location))
.result(0)?
.into();
let k128 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 128).into(),
location,
))
.result(0)?
.into();
let lhs_hi = entry
.append_operation(arith::shli(lhs_hi, k128, location))
.result(0)?
.into();
let rhs_hi = entry
.append_operation(arith::shli(rhs_hi, k128, location))
.result(0)?
.into();
let lhs = entry
.append_operation(arith::ori(lhs_hi, lhs_lo, location))
.result(0)?
.into();
let rhs = entry
.append_operation(arith::ori(rhs_hi, rhs_lo, location))
.result(0)?
.into();
let result_div = entry
.append_operation(arith::divui(lhs, rhs, location))
.result(0)?
.into();
let result_rem = entry
.append_operation(arith::remui(lhs, rhs, location))
.result(0)?
.into();
let result_div_lo = entry
.append_operation(arith::trunci(result_div, i128_ty, location))
.result(0)?
.into();
let result_div_hi = entry
.append_operation(arith::shrui(result_div, k128, location))
.result(0)?
.into();
let result_div_hi = entry
.append_operation(arith::trunci(result_div_hi, i128_ty, location))
.result(0)?
.into();
let result_rem_lo = entry
.append_operation(arith::trunci(result_rem, i128_ty, location))
.result(0)?
.into();
let result_rem_hi = entry
.append_operation(arith::shrui(result_rem, k128, location))
.result(0)?
.into();
let result_rem_hi = entry
.append_operation(arith::trunci(result_rem_hi, i128_ty, location))
.result(0)?
.into();
let result_div = entry
.append_operation(llvm::undef(
llvm::r#type::r#struct(context, &[i128_ty, i128_ty], false),
location,
))
.result(0)?
.into();
let result_div = entry
.append_operation(llvm::insert_value(
context,
result_div,
DenseI64ArrayAttribute::new(context, &[0]),
result_div_lo,
location,
))
.result(0)?
.into();
let result_div = entry
.append_operation(llvm::insert_value(
context,
result_div,
DenseI64ArrayAttribute::new(context, &[1]),
result_div_hi,
location,
))
.result(0)?
.into();
let result_rem = entry
.append_operation(llvm::undef(
llvm::r#type::r#struct(context, &[i128_ty, i128_ty], false),
location,
))
.result(0)?
.into();
let result_rem = entry
.append_operation(llvm::insert_value(
context,
result_rem,
DenseI64ArrayAttribute::new(context, &[0]),
result_rem_lo,
location,
))
.result(0)?
.into();
let result_rem = entry
.append_operation(llvm::insert_value(
context,
result_rem,
DenseI64ArrayAttribute::new(context, &[1]),
result_rem_hi,
location,
))
.result(0)?
.into();
let op = entry.append_operation(llvm::undef(guarantee_type, location));
let guarantee = op.result(0)?.into();
helper.br(
entry,
0,
&[range_check, result_div, result_rem, guarantee],
location,
)
}
/// Generate MLIR operations for the `u256_is_zero` libfunc.
pub fn build_is_zero<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let i128_ty = IntegerType::new(context, 128).into();
let val_struct = entry.arg(0)?;
let val_lo = entry
.append_operation(llvm::extract_value(
context,
val_struct,
DenseI64ArrayAttribute::new(context, &[0]),
i128_ty,
location,
))
.result(0)?
.into();
let val_hi = entry
.append_operation(llvm::extract_value(
context,
val_struct,
DenseI64ArrayAttribute::new(context, &[1]),
i128_ty,
location,
))
.result(0)?
.into();
let k0 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i128_ty, 0).into(),
location,
))
.result(0)?
.into();
let val_lo_is_zero = entry
.append_operation(arith::cmpi(
context,
CmpiPredicate::Eq,
val_lo,
k0,
location,
))
.result(0)?
.into();
let val_hi_is_zero = entry
.append_operation(arith::cmpi(
context,
CmpiPredicate::Eq,
val_hi,
k0,
location,
))
.result(0)?
.into();
let val_is_zero = entry
.append_operation(arith::andi(val_lo_is_zero, val_hi_is_zero, location))
.result(0)?
.into();
helper.cond_br(
context,
entry,
val_is_zero,
[0, 1],
[&[], &[val_struct]],
location,
)
}
/// Generate MLIR operations for the `u256_sqrt` libfunc.
pub fn build_square_root<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 7 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned256.rs?plain=1#L189
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 7)?;
let i128_ty = IntegerType::new(context, 128).into();
let i256_ty = IntegerType::new(context, 256).into();
let arg_struct = entry.arg(1)?;
let arg_lo = entry
.append_operation(llvm::extract_value(
context,
arg_struct,
DenseI64ArrayAttribute::new(context, &[0]),
i128_ty,
location,
))
.result(0)?
.into();
let arg_hi = entry
.append_operation(llvm::extract_value(
context,
arg_struct,
DenseI64ArrayAttribute::new(context, &[1]),
i128_ty,
location,
))
.result(0)?
.into();
let arg_lo = entry
.append_operation(arith::extui(arg_lo, i256_ty, location))
.result(0)?
.into();
let arg_hi = entry
.append_operation(arith::extui(arg_hi, i256_ty, location))
.result(0)?
.into();
let k128 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 128).into(),
location,
))
.result(0)?
.into();
let arg_hi = entry
.append_operation(arith::shli(arg_hi, k128, location))
.result(0)?
.into();
let arg_value = entry
.append_operation(arith::ori(arg_hi, arg_lo, location))
.result(0)?
.into();
let k1 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 1).into(),
location,
))
.result(0)?
.into();
let is_small = entry
.append_operation(arith::cmpi(
context,
CmpiPredicate::Ule,
arg_value,
k1,
location,
))
.result(0)?
.into();
let result = entry
.append_operation(scf::r#if(
is_small,
&[i256_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(&[arg_value], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let k128 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 256).into(),
location,
))
.result(0)?
.into();
let leading_zeros = block
.append_operation(
ods::llvm::intr_ctlz(
context,
i256_ty,
arg_value,
IntegerAttribute::new(IntegerType::new(context, 1).into(), 1),
location,
)
.into(),
)
.result(0)?
.into();
let num_bits = block
.append_operation(arith::subi(k128, leading_zeros, location))
.result(0)?
.into();
let shift_amount = block
.append_operation(arith::addi(num_bits, k1, location))
.result(0)?
.into();
let parity_mask = block
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, -2).into(),
location,
))
.result(0)?
.into();
let shift_amount = block
.append_operation(arith::andi(shift_amount, parity_mask, location))
.result(0)?
.into();
let k0 = block
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 0).into(),
location,
))
.result(0)?
.into();
let result = block
.append_operation(scf::r#while(
&[k0, shift_amount],
&[i256_ty, i256_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[
(i256_ty, location),
(i256_ty, location),
]));
let result = block
.append_operation(arith::shli(block.arg(0)?, k1, location))
.result(0)?
.into();
let large_candidate = block
.append_operation(arith::xori(result, k1, location))
.result(0)?
.into();
let large_candidate_squared = block
.append_operation(arith::muli(
large_candidate,
large_candidate,
location,
))
.result(0)?
.into();
let threshold = block
.append_operation(arith::shrui(arg_value, block.arg(1)?, location))
.result(0)?
.into();
let threshold_is_poison = block
.append_operation(arith::cmpi(
context,
CmpiPredicate::Eq,
block.arg(1)?,
k128,
location,
))
.result(0)?
.into();
let threshold = block
.append_operation(
OperationBuilder::new("arith.select", location)
.add_operands(&[threshold_is_poison, k0, threshold])
.add_results(&[i256_ty])
.build()?,
)
.result(0)?
.into();
let is_in_range = block
.append_operation(arith::cmpi(
context,
CmpiPredicate::Ule,
large_candidate_squared,
threshold,
location,
))
.result(0)?
.into();
let result = block
.append_operation(
OperationBuilder::new("arith.select", location)
.add_operands(&[is_in_range, large_candidate, result])
.add_results(&[i256_ty])
.build()?,
)
.result(0)?
.into();
let k2 = block
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 2).into(),
location,
))
.result(0)?
.into();
let shift_amount = block
.append_operation(arith::subi(block.arg(1)?, k2, location))
.result(0)?
.into();
let should_continue = block
.append_operation(arith::cmpi(
context,
CmpiPredicate::Sge,
shift_amount,
k0,
location,
))
.result(0)?
.into();
block.append_operation(scf::condition(
should_continue,
&[result, shift_amount],
location,
));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[
(i256_ty, location),
(i256_ty, location),
]));
block.append_operation(scf::r#yield(
&[block.arg(0)?, block.argument(1)?.into()],
location,
));
region
},
location,
))
.result(0)?
.into();
block.append_operation(scf::r#yield(&[result], location));
region
},
location,
))
.result(0)?
.into();
let result = entry
.append_operation(arith::trunci(result, i128_ty, location))
.result(0)?
.into();
helper.br(entry, 0, &[range_check, result], location)
}
/// Generate MLIR operations for the `u256_guarantee_inv_mod_n` libfunc.
pub fn build_u256_guarantee_inv_mod_n<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let i128_ty = IntegerType::new(context, 128).into();
let i256_ty = IntegerType::new(context, 256).into();
let lhs_struct = entry.arg(1)?;
let lhs_lo = entry
.append_operation(llvm::extract_value(
context,
lhs_struct,
DenseI64ArrayAttribute::new(context, &[0]),
i128_ty,
location,
))
.result(0)?
.into();
let lhs_hi = entry
.append_operation(llvm::extract_value(
context,
lhs_struct,
DenseI64ArrayAttribute::new(context, &[1]),
i128_ty,
location,
))
.result(0)?
.into();
let rhs_struct = entry.arg(2)?;
let rhs_lo = entry
.append_operation(llvm::extract_value(
context,
rhs_struct,
DenseI64ArrayAttribute::new(context, &[0]),
i128_ty,
location,
))
.result(0)?
.into();
let rhs_hi = entry
.append_operation(llvm::extract_value(
context,
rhs_struct,
DenseI64ArrayAttribute::new(context, &[1]),
i128_ty,
location,
))
.result(0)?
.into();
let lhs_lo = entry
.append_operation(arith::extui(lhs_lo, i256_ty, location))
.result(0)?
.into();
let lhs_hi = entry
.append_operation(arith::extui(lhs_hi, i256_ty, location))
.result(0)?
.into();
let rhs_lo = entry
.append_operation(arith::extui(rhs_lo, i256_ty, location))
.result(0)?
.into();
let rhs_hi = entry
.append_operation(arith::extui(rhs_hi, i256_ty, location))
.result(0)?
.into();
let k128 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 128).into(),
location,
))
.result(0)?
.into();
let lhs_hi = entry
.append_operation(arith::shli(lhs_hi, k128, location))
.result(0)?
.into();
let rhs_hi = entry
.append_operation(arith::shli(rhs_hi, k128, location))
.result(0)?
.into();
let lhs = entry
.append_operation(arith::ori(lhs_hi, lhs_lo, location))
.result(0)?
.into();
let rhs = entry
.append_operation(arith::ori(rhs_hi, rhs_lo, location))
.result(0)?
.into();
let k0 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 0).into(),
location,
))
.result(0)?
.into();
let k1 = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(i256_ty, 1).into(),
location,
))
.result(0)?
.into();
let result = entry.append_operation(scf::r#while(
&[lhs, rhs, k1, k0],
&[i256_ty, i256_ty, i256_ty, i256_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[
(i256_ty, location),
(i256_ty, location),
(i256_ty, location),
(i256_ty, location),
]));
let q = block
.append_operation(arith::divui(block.arg(1)?, block.arg(0)?, location))
.result(0)?
.into();
let q_c = block
.append_operation(arith::muli(q, block.arg(0)?, location))
.result(0)?
.into();
let c = block
.append_operation(arith::subi(block.arg(1)?, q_c, location))
.result(0)?
.into();
let q_uc = block
.append_operation(arith::muli(q, block.arg(2)?, location))
.result(0)?
.into();
let u_c = block
.append_operation(arith::subi(block.arg(3)?, q_uc, location))
.result(0)?
.into();
let should_continue = block
.append_operation(arith::cmpi(context, CmpiPredicate::Ne, c, k0, location))
.result(0)?
.into();
block.append_operation(scf::condition(
should_continue,
&[c, block.arg(0)?, u_c, block.argument(2)?.into()],
location,
));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[
(i256_ty, location),
(i256_ty, location),
(i256_ty, location),
(i256_ty, location),
]));
block.append_operation(scf::r#yield(
&[block.arg(0)?, block.arg(1)?, block.arg(2)?, block.arg(3)?],
location,
));
region
},
location,
));
let inv = result.result(3)?.into();
let inv = entry
.append_operation(scf::r#if(
entry
.append_operation(arith::cmpi(context, CmpiPredicate::Slt, inv, k0, location))
.result(0)?
.into(),
&[i256_ty],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(
&[entry
.append_operation(arith::addi(inv, rhs, location))
.result(0)?
.into()],
location,
));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(
&[entry
.append_operation(arith::remui(inv, rhs, location))
.result(0)?
.into()],
location,
));
region
},
location,
))
.result(0)?
.into();
let inv_lo = entry
.append_operation(arith::trunci(inv, i128_ty, location))
.result(0)?
.into();
let inv_hi = entry
.append_operation(arith::shrui(inv, k128, location))
.result(0)?
.into();
let inv_hi = entry
.append_operation(arith::trunci(inv_hi, i128_ty, location))
.result(0)?
.into();
let return_ty = registry.build_type(context, helper, metadata, &info.output_types()[0][1])?;
let result_inv = entry
.append_operation(llvm::undef(return_ty, location))
.result(0)?
.into();
let result_inv = entry
.append_operation(llvm::insert_value(
context,
result_inv,
DenseI64ArrayAttribute::new(context, &[0]),
inv_lo,
location,
))
.result(0)?
.into();
let result_inv = entry
.append_operation(llvm::insert_value(
context,
result_inv,
DenseI64ArrayAttribute::new(context, &[1]),
inv_hi,
location,
))
.result(0)?
.into();
let lhs_is_invertible = entry
.append_operation(arith::cmpi(
context,
CmpiPredicate::Eq,
result.result(1)?.into(),
k1,
location,
))
.result(0)?
.into();
let inv_not_zero = entry
.append_operation(arith::cmpi(context, CmpiPredicate::Ne, inv, k0, location))
.result(0)?
.into();
let inverse_exists_and_is_not_zero = entry
.append_operation(arith::andi(lhs_is_invertible, inv_not_zero, location))
.result(0)?
.into();
let guarantee_type =
registry.build_type(context, helper, metadata, &info.output_types()[0][2])?;
let op = entry.append_operation(llvm::undef(guarantee_type, location));
let guarantee = op.result(0)?.into();
// The sierra-to-casm compiler uses the range check builtin a total of 9 times if the inverse is
// not equal to 0 and lhs is invertible. Otherwise it will be used 7 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned256.rs#L21
let range_check = increment_builtin_counter_conditionally_by(
context,
entry,
location,
entry.arg(0)?,
9,
7,
inverse_exists_and_is_not_zero,
)?;
helper.cond_br(
context,
entry,
inverse_exists_and_is_not_zero,
[0, 1],
[
&[
range_check,
result_inv,
guarantee,
guarantee,
guarantee,
guarantee,
guarantee,
guarantee,
guarantee,
guarantee,
],
&[range_check, guarantee, guarantee],
],
location,
)
}
#[cfg(test)]
mod test {
use crate::{
jit_enum, jit_panic, jit_struct, load_cairo, utils::testing::run_program_assert_output,
values::Value,
};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use num_bigint::BigUint;
use num_traits::One;
use starknet_types_core::felt::Felt;
use std::ops::Shl;
lazy_static! {
static ref U256_IS_ZERO: (String, Program) = load_cairo! {
use zeroable::IsZeroResult;
extern fn u256_is_zero(a: u256) -> IsZeroResult<u256> implicits() nopanic;
fn run_test(value: u256) -> bool {
match u256_is_zero(value) {
IsZeroResult::Zero(_) => true,
IsZeroResult::NonZero(_) => false,
}
}
};
static ref U256_SAFE_DIVMOD: (String, Program) = load_cairo! {
fn run_test(lhs: u256, rhs: u256) -> (u256, u256) {
let q = lhs / rhs;
let r = lhs % rhs;
(q, r)
}
};
static ref U256_SQRT: (String, Program) = load_cairo! {
use core::num::traits::Sqrt;
fn run_test(value: u256) -> u128 {
value.sqrt()
}
};
static ref U256_INV_MOD_N: (String, Program) = load_cairo! {
use core::math::u256_inv_mod;
fn run_test(a: u256, n: NonZero<u256>) -> Option<NonZero<u256>> {
u256_inv_mod(a, n)
}
};
}
fn u256(value: BigUint) -> Value {
assert!(value.bits() <= 256);
jit_struct!(
Value::Uint128((&value & &u128::MAX.into()).try_into().unwrap()),
Value::Uint128(((&value >> 128u32) & &u128::MAX.into()).try_into().unwrap()),
)
}
#[test]
fn u256_is_zero() {
run_program_assert_output(
&U256_IS_ZERO,
"run_test",
&[u256(0u32.into())],
jit_enum!(1, jit_struct!()),
);
run_program_assert_output(
&U256_IS_ZERO,
"run_test",
&[u256(1u32.into())],
jit_enum!(0, jit_struct!()),
);
run_program_assert_output(
&U256_IS_ZERO,
"run_test",
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/cast.rs | src/libfuncs/cast.rs | //! # Casting libfuncs
use super::LibfuncHelper;
use crate::{
error::Result,
libfuncs::increment_builtin_counter,
metadata::MetadataStorage,
native_assert, native_panic,
types::TypeBuilder,
utils::{RangeExt, HALF_PRIME, PRIME},
};
use cairo_lang_sierra::{
extensions::{
casts::{CastConcreteLibfunc, DowncastConcreteLibfunc},
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
utils::Range,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::arith::{self, CmpiPredicate},
helpers::{ArithBlockExt, BuiltinBlockExt},
ir::{r#type::IntegerType, Block, Location, Value, ValueLike},
Context,
};
use num_bigint::{BigInt, Sign};
use num_traits::One;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &CastConcreteLibfunc,
) -> Result<()> {
match selector {
CastConcreteLibfunc::Downcast(info) => {
build_downcast(context, registry, entry, location, helper, metadata, info)
}
CastConcreteLibfunc::Upcast(info) => {
build_upcast(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `downcast` libfunc which converts from a
/// source type `T` to a target type `U`, where `U` might not fully include `T`.
/// This means that the operation can fail.
///
/// ## Signature
/// ```cairo
/// pub extern const fn downcast<FromType, ToType>(
/// x: FromType,
/// ) -> Option<ToType> implicits(RangeCheck) nopanic;
/// ```
pub fn build_downcast<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &DowncastConcreteLibfunc,
) -> Result<()> {
let range_check = entry.arg(0)?;
let src_value: Value = entry.arg(1)?;
let src_ty = registry.get_type(&info.signature.param_signatures[1].ty)?;
let dst_ty = registry.get_type(&info.signature.branch_signatures[0].vars[1].ty)?;
let dst_range = dst_ty.integer_range(registry)?;
let src_range = if src_ty.is_felt252(registry)? && dst_range.lower.sign() == Sign::Minus {
if dst_range.upper.sign() != Sign::Plus {
Range {
lower: BigInt::from_biguint(Sign::Minus, PRIME.clone()) + 1,
upper: BigInt::one(),
}
} else {
Range {
lower: BigInt::from_biguint(Sign::Minus, HALF_PRIME.clone()),
upper: BigInt::from_biguint(Sign::Plus, HALF_PRIME.clone()) + BigInt::one(),
}
}
} else {
src_ty.integer_range(registry)?
};
// When the source type is the same as the target type, we just return the
// value as it cannot fail. However, for backwards compatibility, we need to
// increment the range check as if we were checking the upper bound. See:
// - https://github.com/starkware-libs/cairo/tree/v2.12.3/crates/cairo-lang-sierra/src/extensions/modules/casts.rs#L67.
// - https://github.com/starkware-libs/cairo/tree/v2.12.3/crates/cairo-lang-sierra-to-casm/src/invocations/casts.rs#L56.
if info.signature.param_signatures[1].ty == info.signature.branch_signatures[0].vars[1].ty {
let range_check = if src_range.lower == 0.into() {
increment_builtin_counter(context, entry, location, range_check)?
} else {
range_check
};
let k1 = entry.const_int(context, location, 1, 1)?;
return helper.cond_br(
context,
entry,
k1,
[0, 1],
[&[range_check, src_value], &[range_check]],
location,
);
}
let src_width = if src_ty.is_bounded_int(registry)? {
src_range.offset_bit_width()
} else {
src_ty.integer_range(registry)?.zero_based_bit_width()
};
let dst_width = if dst_ty.is_bounded_int(registry)? {
dst_range.offset_bit_width()
} else {
dst_range.zero_based_bit_width()
};
let compute_width = src_range
.zero_based_bit_width()
.max(dst_range.zero_based_bit_width());
let is_signed = src_range.lower.sign() == Sign::Minus;
// If the target type is wider than the source type, extend the value representation width.
let src_value = if compute_width > src_width {
if is_signed && !src_ty.is_bounded_int(registry)? && !src_ty.is_felt252(registry)? {
entry.extsi(
src_value,
IntegerType::new(context, compute_width).into(),
location,
)?
} else {
entry.extui(
src_value,
IntegerType::new(context, compute_width).into(),
location,
)?
}
} else {
src_value
};
// Correct the value representation accordingly.
// 1. if it is a felt, then we need to convert the value from [0,P) to
// [-P/2, P/2].
// 2. if it is a bounded_int, we need to offset the value to get the
// actual value.
let src_value = if is_signed && src_ty.is_felt252(registry)? {
if src_range.upper.is_one() {
let adj_offset =
entry.const_int_from_type(context, location, PRIME.clone(), src_value.r#type())?;
entry.append_op_result(arith::subi(src_value, adj_offset, location))?
} else {
let adj_offset = entry.const_int_from_type(
context,
location,
HALF_PRIME.clone(),
src_value.r#type(),
)?;
let is_negative =
entry.cmpi(context, CmpiPredicate::Ugt, src_value, adj_offset, location)?;
let k_prime =
entry.const_int_from_type(context, location, PRIME.clone(), src_value.r#type())?;
let adj_value = entry.append_op_result(arith::subi(src_value, k_prime, location))?;
entry.append_op_result(arith::select(is_negative, adj_value, src_value, location))?
}
} else if src_ty.is_bounded_int(registry)? && src_range.lower != BigInt::ZERO {
let dst_offset = entry.const_int_from_type(
context,
location,
src_range.lower.clone(),
src_value.r#type(),
)?;
entry.addi(src_value, dst_offset, location)?
} else {
src_value
};
// Check if the source type is included in the target type. If it is not
// then check if the value is in bounds. If the value is also not in
// bounds then return an error.
if dst_range.lower <= src_range.lower && dst_range.upper >= src_range.upper {
let dst_value = if dst_ty.is_bounded_int(registry)? && dst_range.lower != BigInt::ZERO {
let dst_offset = entry.const_int_from_type(
context,
location,
dst_range.lower,
src_value.r#type(),
)?;
entry.append_op_result(arith::subi(src_value, dst_offset, location))?
} else {
src_value
};
let dst_value = if dst_width < compute_width {
entry.trunci(
dst_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else {
dst_value
};
let is_in_bounds = entry.const_int(context, location, 1, 1)?;
helper.cond_br(
context,
entry,
is_in_bounds,
[0, 1],
[&[range_check, dst_value], &[range_check]],
location,
)?;
} else {
// Check if the value is in bounds with respect to the lower bound.
let lower_check = if dst_range.lower > src_range.lower {
let dst_lower = entry.const_int_from_type(
context,
location,
dst_range.lower.clone(),
src_value.r#type(),
)?;
Some(entry.cmpi(
context,
if !is_signed {
CmpiPredicate::Uge
} else {
CmpiPredicate::Sge
},
src_value,
dst_lower,
location,
)?)
} else {
None
};
// Check if the value is in bounds with respect to the upper bound.
let upper_check = if dst_range.upper < src_range.upper {
let dst_upper = entry.const_int_from_type(
context,
location,
dst_range.upper.clone(),
src_value.r#type(),
)?;
Some(entry.cmpi(
context,
if !is_signed {
CmpiPredicate::Ult
} else {
CmpiPredicate::Slt
},
src_value,
dst_upper,
location,
)?)
} else {
None
};
let is_in_bounds = match (lower_check, upper_check) {
(Some(lower_check), Some(upper_check)) => {
entry.append_op_result(arith::andi(lower_check, upper_check, location))?
}
(Some(lower_check), None) => lower_check,
(None, Some(upper_check)) => upper_check,
// its always in bounds since dst is larger than src (i.e no bounds checks needed)
(None, None) => {
native_panic!("matched an unreachable: no bounds checks are being performed")
}
};
// Incrementing the range check depends on whether the source range can hold a felt252 or not
let range_check = if info.from_range.is_full_felt252_range() {
let rc_size = BigInt::from(1) << 128;
// If the range can contain a felt252, how the range check is increased depends on whether the value is in bounds or not:
// * If it is in bounds, we check whether the destination range size is less than range check size. If it is, increment
// the range check builtin by 2. Otherwise, increment it by 1.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/range_reduction.rs#L87
// * If it is not in bounds, increment the range check builtin by 3.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/range_reduction.rs#L79
super::increment_builtin_counter_conditionally_by(
context,
entry,
location,
range_check,
if dst_range.size() < rc_size { 2 } else { 1 },
3,
is_in_bounds,
)?
} else {
match (lower_check, upper_check) {
(Some(_), None) | (None, Some(_)) => {
// If either the lower or the upper bound was checked, increment the range check builtin by 1.
// * In case the lower bound was checked: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/casts.rs#L135
// * In case the upper bound was checked: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/casts.rs#L111
super::increment_builtin_counter_by(context, entry, location, range_check, 1)?
}
(Some(lower_check), Some(upper_check)) => {
let is_in_range =
entry.append_op_result(arith::andi(lower_check, upper_check, location))?;
// If the result is in range, increment the range check builtin by 2. Otherwise, increment it by 1.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/casts.rs#L160
super::increment_builtin_counter_conditionally_by(
context,
entry,
location,
range_check,
2,
1,
is_in_range,
)?
}
(None, None) => range_check,
}
};
let dst_value = if dst_ty.is_bounded_int(registry)? && dst_range.lower != BigInt::ZERO {
let dst_offset = entry.const_int_from_type(
context,
location,
dst_range.lower,
src_value.r#type(),
)?;
entry.append_op_result(arith::subi(src_value, dst_offset, location))?
} else {
src_value
};
let dst_value = if dst_width < compute_width {
entry.trunci(
dst_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else {
dst_value
};
helper.cond_br(
context,
entry,
is_in_bounds,
[0, 1],
[&[range_check, dst_value], &[range_check]],
location,
)?;
}
Ok(())
}
/// Builds the `upcast` libfunc, which converts from a source type `T` to a
/// target type `U`, where `U` fully includes `T`. This means that the operation
/// cannot fail.
///
/// ## Signature
///
/// ```cairo
/// extern const fn upcast<FromType, ToType>(x: FromType) -> ToType nopanic;
/// ```
pub fn build_upcast<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let src_value = entry.arg(0)?;
if info.signature.param_signatures[0].ty == info.signature.branch_signatures[0].vars[0].ty {
return helper.br(entry, 0, &[src_value], location);
}
let src_ty = registry.get_type(&info.signature.param_signatures[0].ty)?;
let dst_ty = registry.get_type(&info.signature.branch_signatures[0].vars[0].ty)?;
let src_range = src_ty.integer_range(registry)?;
let dst_range = dst_ty.integer_range(registry)?;
// An upcast is infallible, so the target type should always contain the source type.
{
let dst_contains_src =
dst_range.lower <= src_range.lower && dst_range.upper >= src_range.upper;
// If the target type is a felt, then both [0; P) and [-P/2, P/2] ranges are valid.
let dst_contains_src = if dst_ty.is_felt252(registry)? {
let signed_dst_range = Range {
lower: BigInt::from_biguint(Sign::Minus, HALF_PRIME.clone()),
upper: BigInt::from_biguint(Sign::Plus, HALF_PRIME.clone()) + BigInt::one(),
};
let signed_dst_contains_src = signed_dst_range.lower <= src_range.lower
&& signed_dst_range.upper >= src_range.upper;
dst_contains_src || signed_dst_contains_src
} else {
dst_contains_src
};
native_assert!(
dst_contains_src,
"cannot upcast `{:?}` into `{:?}`: target range doesn't contain source range",
info.signature.param_signatures[0].ty,
info.signature.branch_signatures[0].vars[0].ty
);
}
let src_width = if src_ty.is_bounded_int(registry)? {
src_range.offset_bit_width()
} else {
src_range.zero_based_bit_width()
};
let dst_width = if dst_ty.is_bounded_int(registry)? {
dst_range.offset_bit_width()
} else {
dst_range.zero_based_bit_width()
};
// Extend value to target bit width.
let dst_value = if dst_width > src_width {
if src_ty.is_bounded_int(registry)? {
// A bounded int is always represented as a positive integer,
// because we store the offset to the lower bound.
entry.extui(
src_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else if src_range.lower.sign() == Sign::Minus {
entry.extsi(
src_value,
IntegerType::new(context, dst_width).into(),
location,
)?
} else {
entry.extui(
src_value,
IntegerType::new(context, dst_width).into(),
location,
)?
}
} else {
src_value
};
// When converting to/from bounded ints, we need to take into account the offset.
let offset = if src_ty.is_bounded_int(registry)? && dst_ty.is_bounded_int(registry)? {
&src_range.lower - &dst_range.lower
} else if src_ty.is_bounded_int(registry)? {
src_range.lower.clone()
} else if dst_ty.is_bounded_int(registry)? {
-dst_range.lower
} else {
BigInt::ZERO
};
let offset_value = entry.const_int_from_type(context, location, offset, dst_value.r#type())?;
let dst_value = entry.addi(dst_value, offset_value, location)?;
// When converting to a felt from a signed integer, we need to convert
// the canonical signed integer representation, to the signed felt
// representation: `negative = P - absolute`.
let dst_value = if dst_ty.is_felt252(registry)? && src_range.lower.sign() == Sign::Minus {
let k0 = entry.const_int(context, location, 0, 252)?;
let is_negative = entry.cmpi(context, CmpiPredicate::Slt, dst_value, k0, location)?;
let k_prime = entry.const_int(context, location, PRIME.clone(), 252)?;
let adj_value = entry.addi(dst_value, k_prime, location)?;
entry.append_op_result(arith::select(is_negative, adj_value, dst_value, location))?
} else {
dst_value
};
helper.br(entry, 0, &[dst_value], location)
}
#[cfg(test)]
mod test {
use crate::{
jit_enum, jit_struct, load_cairo, utils::testing::run_program_assert_output, Value,
};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use starknet_types_core::felt::Felt;
use test_case::test_case;
lazy_static! {
static ref DOWNCAST: (String, Program) = load_cairo! {
extern const fn downcast<FromType, ToType>( x: FromType, ) -> Option<ToType> implicits(RangeCheck) nopanic;
fn run_test(
v8: u8, v16: u16, v32: u32, v64: u64, v128: u128
) -> (
(Option<u8>, Option<u8>, Option<u8>, Option<u8>, Option<u8>),
(Option<u16>, Option<u16>, Option<u16>, Option<u16>),
(Option<u32>, Option<u32>, Option<u32>),
(Option<u64>, Option<u64>),
(Option<u128>,),
) {
(
(downcast(v128), downcast(v64), downcast(v32), downcast(v16), downcast(v8)),
(downcast(v128), downcast(v64), downcast(v32), downcast(v16)),
(downcast(v128), downcast(v64), downcast(v32)),
(downcast(v128), downcast(v64)),
(downcast(v128),),
)
}
};
static ref DOWNCAST_BOUNDED_INT: (String, Program) = load_cairo! {
#[feature("bounded-int-utils")]
use core::internal::bounded_int::BoundedInt;
extern const fn downcast<FromType, ToType>( x: FromType, ) -> Option<ToType> implicits(RangeCheck) nopanic;
fn test_x_y<
X,
Y,
+TryInto<felt252, X>,
+Into<Y, felt252>
>(v: felt252) -> felt252 {
let v: X = v.try_into().unwrap();
let v: Y = downcast(v).unwrap();
v.into()
}
fn b0x30_b0x30(v: felt252) -> felt252 { test_x_y::<BoundedInt<0,30>, BoundedInt<0,30>>(v) }
fn bm31x30_b31x30(v: felt252) -> felt252 { test_x_y::<BoundedInt<-31,30>, BoundedInt<-31,30>>(v) }
fn bm31x30_bm5x30(v: felt252) -> felt252 { test_x_y::<BoundedInt<-31,30>, BoundedInt<-5,30>>(v) }
fn bm31x30_b5x30(v: felt252) -> felt252 { test_x_y::<BoundedInt<-31,30>, BoundedInt<5,30>>(v) }
fn b5x30_b31x31(v: felt252) -> felt252 { test_x_y::<BoundedInt<5,31>, BoundedInt<31,31>>(v) }
fn bm100x100_bm100xm1(v: felt252) -> felt252 { test_x_y::<BoundedInt<-100,100>, BoundedInt<-100,-1>>(v) }
fn bm31xm31_bm31xm31(v: felt252) -> felt252 { test_x_y::<BoundedInt<-31,-31>, BoundedInt<-31,-31>>(v) }
// Check if the target type is wider than the source type
fn b0x30_b5x40(v: felt252) -> felt252 { test_x_y::<BoundedInt<0,30>, BoundedInt<5,40>>(v) }
// Check if the source's lower and upper bound are included in the
// target type.
fn b0x30_bm40x40(v: felt252) -> felt252 { test_x_y::<BoundedInt<0,30>, BoundedInt<-40,40>>(v) }
};
static ref DOWNCAST_FELT: (String, Program) = load_cairo! {
extern const fn downcast<FromType, ToType>( x: FromType, ) -> Option<ToType> implicits(RangeCheck) nopanic;
fn test_x_y<
X,
Y,
+TryInto<felt252, X>,
+Into<Y, felt252>
>(v: felt252) -> felt252 {
let v: X = v.try_into().unwrap();
let v: Y = downcast(v).unwrap();
v.into()
}
fn felt252_i8(v: felt252) -> felt252 { test_x_y::<felt252, i8>(v) }
fn felt252_i16(v: felt252) -> felt252 { test_x_y::<felt252, i16>(v) }
fn felt252_i32(v: felt252) -> felt252 { test_x_y::<felt252, i32>(v) }
fn felt252_i64(v: felt252) -> felt252 { test_x_y::<felt252, i64>(v) }
};
}
#[test]
fn downcast() {
run_program_assert_output(
&DOWNCAST,
"run_test",
&[
u8::MAX.into(),
u16::MAX.into(),
u32::MAX.into(),
u64::MAX.into(),
u128::MAX.into(),
],
jit_struct!(
jit_struct!(
jit_enum!(1, jit_struct!()),
jit_enum!(1, jit_struct!()),
jit_enum!(1, jit_struct!()),
jit_enum!(1, jit_struct!()),
jit_enum!(0, u8::MAX.into()),
),
jit_struct!(
jit_enum!(1, jit_struct!()),
jit_enum!(1, jit_struct!()),
jit_enum!(1, jit_struct!()),
jit_enum!(0, u16::MAX.into()),
),
jit_struct!(
jit_enum!(1, jit_struct!()),
jit_enum!(1, jit_struct!()),
jit_enum!(0, u32::MAX.into()),
),
jit_struct!(jit_enum!(1, jit_struct!()), jit_enum!(0, u64::MAX.into())),
jit_struct!(jit_enum!(0, u128::MAX.into())),
),
);
}
#[test_case("b0x30_b0x30", 5.into())]
#[test_case("bm31x30_b31x30", 5.into())]
#[test_case("bm31x30_bm5x30", (-5).into())]
#[test_case("bm31x30_b5x30", 30.into())]
#[test_case("b5x30_b31x31", 31.into())]
#[test_case("bm100x100_bm100xm1", (-90).into())]
#[test_case("bm31xm31_bm31xm31", (-31).into())]
#[test_case("b0x30_b5x40", 10.into())]
#[test_case("b0x30_bm40x40", 10.into())]
fn downcast_bounded_int(entry_point: &str, value: Felt) {
run_program_assert_output(
&DOWNCAST_BOUNDED_INT,
entry_point,
&[Value::Felt252(value)],
jit_enum!(0, jit_struct!(Value::Felt252(value))),
);
}
#[test_case("felt252_i8", i8::MAX.into())]
#[test_case("felt252_i8", i8::MIN.into())]
#[test_case("felt252_i16", i16::MAX.into())]
#[test_case("felt252_i16", i16::MIN.into())]
#[test_case("felt252_i32", i32::MAX.into())]
#[test_case("felt252_i32", i32::MIN.into())]
#[test_case("felt252_i64", i64::MAX.into())]
#[test_case("felt252_i64", i64::MIN.into())]
fn downcast_felt(entry_point: &str, value: Felt) {
run_program_assert_output(
&DOWNCAST_FELT,
entry_point,
&[Value::Felt252(value)],
jit_enum!(0, jit_struct!(Value::Felt252(value))),
);
}
lazy_static! {
static ref TEST_UPCAST_PROGRAM: (String, Program) = load_cairo! {
#[feature("bounded-int-utils")]
use core::internal::bounded_int::{BoundedInt};
extern const fn upcast<FromType, ToType>(x: FromType) -> ToType nopanic;
fn test_x_y<
X,
Y,
+TryInto<felt252, X>,
+Into<Y, felt252>
>(v: felt252) -> felt252 {
let v: X = v.try_into().unwrap();
let v: Y = upcast(v);
v.into()
}
fn u8_u16(v: felt252) -> felt252 { test_x_y::<u8, u16>(v) }
fn u8_u32(v: felt252) -> felt252 { test_x_y::<u8, u32>(v) }
fn u8_u64(v: felt252) -> felt252 { test_x_y::<u8, u64>(v) }
fn u8_u128(v: felt252) -> felt252 { test_x_y::<u8, u128>(v) }
fn u8_felt252(v: felt252) -> felt252 { test_x_y::<u8, felt252>(v) }
fn u16_u32(v: felt252) -> felt252 { test_x_y::<u16, u32>(v) }
fn u16_u64(v: felt252) -> felt252 { test_x_y::<u16, u64>(v) }
fn u16_u128(v: felt252) -> felt252 { test_x_y::<u16, u128>(v) }
fn u16_felt252(v: felt252) -> felt252 { test_x_y::<u16, felt252>(v) }
fn u32_u64(v: felt252) -> felt252 { test_x_y::<u32, u64>(v) }
fn u32_u128(v: felt252) -> felt252 { test_x_y::<u32, u128>(v) }
fn u32_felt252(v: felt252) -> felt252 { test_x_y::<u32, felt252>(v) }
fn u64_u128(v: felt252) -> felt252 { test_x_y::<u64, u128>(v) }
fn u64_felt252(v: felt252) -> felt252 { test_x_y::<u64, felt252>(v) }
fn u128_felt252(v: felt252) -> felt252 { test_x_y::<u128, felt252>(v) }
fn i8_i16(v: felt252) -> felt252 { test_x_y::<i8, i16>(v) }
fn i8_i32(v: felt252) -> felt252 { test_x_y::<i8, i32>(v) }
fn i8_i64(v: felt252) -> felt252 { test_x_y::<i8, i64>(v) }
fn i8_i128(v: felt252) -> felt252 { test_x_y::<i8, i128>(v) }
fn i8_felt252(v: felt252) -> felt252 { test_x_y::<i8, felt252>(v) }
fn i16_i32(v: felt252) -> felt252 { test_x_y::<i16, i32>(v) }
fn i16_i64(v: felt252) -> felt252 { test_x_y::<i16, i64>(v) }
fn i16_i128(v: felt252) -> felt252 { test_x_y::<i16, i128>(v) }
fn i16_felt252(v: felt252) -> felt252 { test_x_y::<i16, felt252>(v) }
fn i32_i64(v: felt252) -> felt252 { test_x_y::<i32, i64>(v) }
fn i32_i128(v: felt252) -> felt252 { test_x_y::<i32, i128>(v) }
fn i32_felt252(v: felt252) -> felt252 { test_x_y::<i32, felt252>(v) }
fn i64_i128(v: felt252) -> felt252 { test_x_y::<i64, i128>(v) }
fn i64_felt252(v: felt252) -> felt252 { test_x_y::<i64, felt252>(v) }
fn i128_felt252(v: felt252) -> felt252 { test_x_y::<i128, felt252>(v) }
fn b0x5_b0x10(v: felt252) -> felt252 { test_x_y::<BoundedInt<0, 5>, BoundedInt<0, 10>>(v) }
fn b2x5_b2x10(v: felt252) -> felt252 { test_x_y::<BoundedInt<2, 5>, BoundedInt<2, 10>>(v) }
fn b2x5_b1x10(v: felt252) -> felt252 { test_x_y::<BoundedInt<2, 5>, BoundedInt<1, 10>>(v) }
fn b0x5_bm10x10(v: felt252) -> felt252 { test_x_y::<BoundedInt<0, 5>, BoundedInt<-10, 10>>(v) }
fn bm5x5_bm10x10(v: felt252) -> felt252 { test_x_y::<BoundedInt<-5, 5>, BoundedInt<-10, 10>>(v) }
fn i8_bm200x200(v: felt252) -> felt252 { test_x_y::<i8, BoundedInt<-200, 200>>(v) }
fn bm100x100_i8(v: felt252) -> felt252 { test_x_y::<BoundedInt<-100, 100>, i8>(v) }
};
}
// u8 upcast test
#[test_case("u8_u16", u8::MIN.into())]
#[test_case("u8_u16", u8::MAX.into())]
#[test_case("u8_u32", u8::MIN.into())]
#[test_case("u8_u32", u8::MAX.into())]
#[test_case("u8_u64", u8::MIN.into())]
#[test_case("u8_u64", u8::MAX.into())]
#[test_case("u8_u128", u8::MIN.into())]
#[test_case("u8_u128", u8::MAX.into())]
#[test_case("u8_felt252", u8::MIN.into())]
#[test_case("u8_felt252", u8::MAX.into())]
// u16 upcast test
#[test_case("u16_u32", u16::MIN.into())]
#[test_case("u16_u32", u16::MAX.into())]
#[test_case("u16_u64", u16::MIN.into())]
#[test_case("u16_u64", u16::MAX.into())]
#[test_case("u16_u128", u16::MIN.into())]
#[test_case("u16_u128", u16::MAX.into())]
#[test_case("u16_felt252", u16::MIN.into())]
#[test_case("u16_felt252", u16::MAX.into())]
// u32 upcast test
#[test_case("u32_u64", u32::MIN.into())]
#[test_case("u32_u64", u32::MAX.into())]
#[test_case("u32_u128", u32::MIN.into())]
#[test_case("u32_u128", u32::MAX.into())]
#[test_case("u32_felt252", u32::MIN.into())]
#[test_case("u32_felt252", u32::MAX.into())]
// u64 upcast test
#[test_case("u64_u128", u64::MIN.into())]
#[test_case("u64_u128", u64::MAX.into())]
#[test_case("u64_felt252", u64::MIN.into())]
#[test_case("u64_felt252", u64::MAX.into())]
// u128 upcast test
#[test_case("u128_felt252", u128::MIN.into())]
#[test_case("u128_felt252", u128::MAX.into())]
// i8 upcast test
#[test_case("i8_i16", i8::MIN.into())]
#[test_case("i8_i16", i8::MAX.into())]
#[test_case("i8_i32", i8::MIN.into())]
#[test_case("i8_i32", i8::MAX.into())]
#[test_case("i8_i64", i8::MIN.into())]
#[test_case("i8_i64", i8::MAX.into())]
#[test_case("i8_i128", i8::MIN.into())]
#[test_case("i8_i128", i8::MAX.into())]
#[test_case("i8_felt252", i8::MIN.into())]
#[test_case("i8_felt252", i8::MAX.into())]
// i16 upcast test
#[test_case("i16_i32", i16::MIN.into())]
#[test_case("i16_i32", i16::MAX.into())]
#[test_case("i16_i64", i16::MIN.into())]
#[test_case("i16_i64", i16::MAX.into())]
#[test_case("i16_i128", i16::MIN.into())]
#[test_case("i16_i128", i16::MAX.into())]
#[test_case("i16_felt252", i16::MIN.into())]
#[test_case("i16_felt252", i16::MAX.into())]
// i32 upcast test
#[test_case("i32_i64", i32::MIN.into())]
#[test_case("i32_i64", i32::MAX.into())]
#[test_case("i32_i128", i32::MIN.into())]
#[test_case("i32_i128", i32::MAX.into())]
#[test_case("i32_felt252", i32::MIN.into())]
#[test_case("i32_felt252", i32::MAX.into())]
// i64 upcast test
#[test_case("i64_i128", i64::MIN.into())]
#[test_case("i64_i128", i64::MAX.into())]
#[test_case("i64_felt252", i64::MIN.into())]
#[test_case("i64_felt252", i64::MAX.into())]
// i128 upcast test
#[test_case("i128_felt252", i128::MIN.into())]
#[test_case("i128_felt252", i128::MAX.into())]
// bounded int test
#[test_case("b0x5_b0x10", 0.into())]
#[test_case("b0x5_b0x10", 5.into())]
#[test_case("b2x5_b2x10", 2.into())]
#[test_case("b2x5_b2x10", 5.into())]
#[test_case("b2x5_b1x10", 2.into())]
#[test_case("b2x5_b1x10", 5.into())]
#[test_case("b0x5_bm10x10", 0.into())]
#[test_case("b0x5_bm10x10", 5.into())]
#[test_case("bm5x5_bm10x10", Felt::from(-5))]
#[test_case("bm5x5_bm10x10", 5.into())]
#[test_case("i8_bm200x200", Felt::from(-128))]
#[test_case("i8_bm200x200", 127.into())]
#[test_case("bm100x100_i8", Felt::from(-100))]
#[test_case("bm100x100_i8", 100.into())]
fn upcast(entry_point: &str, value: Felt) {
let arguments = &[value.into()];
let expected_result = jit_enum!(0, jit_struct!(value.into(),));
run_program_assert_output(
&TEST_UPCAST_PROGRAM,
entry_point,
arguments,
expected_result,
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/felt252.rs | src/libfuncs/felt252.rs | //! # `Felt`-related libfuncs
use super::LibfuncHelper;
use crate::{
error::Result,
metadata::MetadataStorage,
utils::{ProgramRegistryExt, PRIME},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
felt252::{
Felt252BinaryOperationConcrete, Felt252BinaryOperator, Felt252Concrete,
Felt252ConstConcreteLibfunc,
},
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf,
},
helpers::{ArithBlockExt, BuiltinBlockExt},
ir::{r#type::IntegerType, Block, BlockLike, Location, Value, ValueLike},
Context,
};
use num_bigint::{BigInt, Sign};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Felt252Concrete,
) -> Result<()> {
match selector {
Felt252Concrete::BinaryOperation(info) => {
build_binary_operation(context, registry, entry, location, helper, metadata, info)
}
Felt252Concrete::Const(info) => {
build_const(context, registry, entry, location, helper, metadata, info)
}
Felt252Concrete::IsZero(info) => {
build_is_zero(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the following libfuncs:
/// - `felt252_add` and `felt252_add_const`.
/// - `felt252_sub` and `felt252_sub_const`.
/// - `felt252_mul` and `felt252_mul_const`.
/// - `felt252_div` and `felt252_div_const`.
pub fn build_binary_operation<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &Felt252BinaryOperationConcrete,
) -> Result<()> {
let felt252_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let i256 = IntegerType::new(context, 256).into();
let i512 = IntegerType::new(context, 512).into();
let (op, lhs, rhs) = match info {
Felt252BinaryOperationConcrete::WithVar(operation) => {
(operation.operator, entry.arg(0)?, entry.arg(1)?)
}
Felt252BinaryOperationConcrete::WithConst(operation) => {
let value = match operation.c.sign() {
Sign::Minus => (BigInt::from_biguint(Sign::Plus, PRIME.clone()) + &operation.c)
.magnitude()
.clone(),
_ => operation.c.magnitude().clone(),
};
// TODO: Ensure that the constant is on the correct side of the operation.
let rhs = entry.const_int_from_type(context, location, value, felt252_ty)?;
(operation.operator, entry.arg(0)?, rhs)
}
};
let result = match op {
Felt252BinaryOperator::Add => {
let lhs = entry.extui(lhs, i256, location)?;
let rhs = entry.extui(rhs, i256, location)?;
let result = entry.addi(lhs, rhs, location)?;
let prime = entry.const_int_from_type(context, location, PRIME.clone(), i256)?;
let result_mod = entry.append_op_result(arith::subi(result, prime, location))?;
let is_out_of_range =
entry.cmpi(context, CmpiPredicate::Uge, result, prime, location)?;
let result = entry.append_op_result(arith::select(
is_out_of_range,
result_mod,
result,
location,
))?;
entry.trunci(result, felt252_ty, location)?
}
Felt252BinaryOperator::Sub => {
let lhs = entry.extui(lhs, i256, location)?;
let rhs = entry.extui(rhs, i256, location)?;
let result = entry.append_op_result(arith::subi(lhs, rhs, location))?;
let prime = entry.const_int_from_type(context, location, PRIME.clone(), i256)?;
let result_mod = entry.addi(result, prime, location)?;
let is_out_of_range = entry.cmpi(context, CmpiPredicate::Ult, lhs, rhs, location)?;
let result = entry.append_op_result(arith::select(
is_out_of_range,
result_mod,
result,
location,
))?;
entry.trunci(result, felt252_ty, location)?
}
Felt252BinaryOperator::Mul => {
let lhs = entry.extui(lhs, i512, location)?;
let rhs = entry.extui(rhs, i512, location)?;
let result = entry.muli(lhs, rhs, location)?;
let prime = entry.const_int_from_type(context, location, PRIME.clone(), i512)?;
let result_mod = entry.append_op_result(arith::remui(result, prime, location))?;
let is_out_of_range =
entry.cmpi(context, CmpiPredicate::Uge, result, prime, location)?;
let result = entry.append_op_result(arith::select(
is_out_of_range,
result_mod,
result,
location,
))?;
entry.trunci(result, felt252_ty, location)?
}
Felt252BinaryOperator::Div => {
// The extended euclidean algorithm calculates the greatest common divisor of two integers,
// as well as the bezout coefficients x and y such that for inputs a and b, ax+by=gcd(a,b)
// We use this in felt division to find the modular inverse of a given number
// If a is the number we're trying to find the inverse of, we can do
// ax+y*PRIME=gcd(a,PRIME)=1 => ax = 1 (mod PRIME)
// Hence for input a, we return x
// The input MUST be non-zero
// See https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
let start_block = helper.append_block(Block::new(&[(i512, location)]));
let loop_block = helper.append_block(Block::new(&[
(i512, location),
(i512, location),
(i512, location),
(i512, location),
]));
let negative_check_block = helper.append_block(Block::new(&[]));
// Block containing final result
let inverse_result_block = helper.append_block(Block::new(&[(i512, location)]));
// Egcd works by calculating a series of remainders, each the remainder of dividing the previous two
// For the initial setup, r0 = PRIME, r1 = a
// This order is chosen because if we reverse them, then the first iteration will just swap them
let prev_remainder =
start_block.const_int_from_type(context, location, PRIME.clone(), i512)?;
let remainder = start_block.arg(0)?;
// Similarly we'll calculate another series which starts 0,1,... and from which we will retrieve the modular inverse of a
let prev_inverse = start_block.const_int_from_type(context, location, 0, i512)?;
let inverse = start_block.const_int_from_type(context, location, 1, i512)?;
start_block.append_operation(cf::br(
loop_block,
&[prev_remainder, remainder, prev_inverse, inverse],
location,
));
//---Loop body---
// Arguments are rem_(i-1), rem, inv_(i-1), inv
let prev_remainder = loop_block.arg(0)?;
let remainder = loop_block.arg(1)?;
let prev_inverse = loop_block.arg(2)?;
let inverse = loop_block.arg(3)?;
// First calculate q = rem_(i-1)/rem_i, rounded down
let quotient =
loop_block.append_op_result(arith::divui(prev_remainder, remainder, location))?;
// Then r_(i+1) = r_(i-1) - q * r_i, and inv_(i+1) = inv_(i-1) - q * inv_i
let rem_times_quo = loop_block.muli(remainder, quotient, location)?;
let inv_times_quo = loop_block.muli(inverse, quotient, location)?;
let next_remainder = loop_block.append_op_result(arith::subi(
prev_remainder,
rem_times_quo,
location,
))?;
let next_inverse =
loop_block.append_op_result(arith::subi(prev_inverse, inv_times_quo, location))?;
// If r_(i+1) is 0, then inv_i is the inverse
let zero = loop_block.const_int_from_type(context, location, 0, i512)?;
let next_remainder_eq_zero =
loop_block.cmpi(context, CmpiPredicate::Eq, next_remainder, zero, location)?;
loop_block.append_operation(cf::cond_br(
context,
next_remainder_eq_zero,
negative_check_block,
loop_block,
&[],
&[remainder, next_remainder, inverse, next_inverse],
location,
));
// egcd sometimes returns a negative number for the inverse,
// in such cases we must simply wrap it around back into [0, PRIME)
// this suffices because |inv_i| <= divfloor(PRIME,2)
let zero = negative_check_block.const_int_from_type(context, location, 0, i512)?;
let is_negative = negative_check_block
.append_operation(arith::cmpi(
context,
CmpiPredicate::Slt,
inverse,
zero,
location,
))
.result(0)?
.into();
// if the inverse is < 0, add PRIME
let prime =
negative_check_block.const_int_from_type(context, location, PRIME.clone(), i512)?;
let wrapped_inverse = negative_check_block.addi(inverse, prime, location)?;
let inverse = negative_check_block.append_op_result(arith::select(
is_negative,
wrapped_inverse,
inverse,
location,
))?;
negative_check_block.append_operation(cf::br(
inverse_result_block,
&[inverse],
location,
));
// Div Logic Start
// Fetch operands
let lhs = entry.extui(lhs, i512, location)?;
let rhs = entry.extui(rhs, i512, location)?;
// Calculate inverse of rhs, callling the inverse implementation's starting block
entry.append_operation(cf::br(start_block, &[rhs], location));
// Fetch the inverse result from the result block
let inverse = inverse_result_block.arg(0)?;
// Peform lhs * (1/ rhs)
let result = inverse_result_block.muli(lhs, inverse, location)?;
// Apply modulo and convert result to felt252
let result_mod =
inverse_result_block.append_op_result(arith::remui(result, prime, location))?;
let is_out_of_range =
inverse_result_block.cmpi(context, CmpiPredicate::Uge, result, prime, location)?;
let result = inverse_result_block.append_op_result(arith::select(
is_out_of_range,
result_mod,
result,
location,
))?;
let result = inverse_result_block.trunci(result, felt252_ty, location)?;
return helper.br(inverse_result_block, 0, &[result], location);
}
};
helper.br(entry, 0, &[result], location)
}
/// Generate MLIR operations for the `felt252_const` libfunc.
pub fn build_const<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &Felt252ConstConcreteLibfunc,
) -> Result<()> {
let value = match info.c.sign() {
Sign::Minus => (&info.c + BigInt::from_biguint(Sign::Plus, PRIME.clone()))
.magnitude()
.clone(),
_ => info.c.magnitude().clone(),
};
let felt252_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let value = entry.const_int_from_type(context, location, value, felt252_ty)?;
helper.br(entry, 0, &[value], location)
}
/// Generate MLIR operations for the `felt252_is_zero` libfunc.
pub fn build_is_zero<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let arg0: Value = entry.arg(0)?;
let k0 = entry.const_int_from_type(context, location, 0, arg0.r#type())?;
let condition = entry.cmpi(context, CmpiPredicate::Eq, arg0, k0, location)?;
helper.cond_br(context, entry, condition, [0, 1], [&[], &[arg0]], location)
}
#[cfg(test)]
pub mod test {
use crate::{jit_struct, load_cairo, utils::testing::run_program, values::Value};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use starknet_types_core::felt::Felt;
lazy_static! {
static ref FELT252_ADD: (String, Program) = load_cairo! {
fn run_test(lhs: felt252, rhs: felt252) -> felt252 {
lhs + rhs
}
};
static ref FELT252_SUB: (String, Program) = load_cairo! {
fn run_test(lhs: felt252, rhs: felt252) -> felt252 {
lhs - rhs
}
};
static ref FELT252_MUL: (String, Program) = load_cairo! {
fn run_test(lhs: felt252, rhs: felt252) -> felt252 {
lhs * rhs
}
};
static ref FELT252_DIV: (String, Program) = load_cairo! {
fn run_test(lhs: felt252, rhs: felt252) -> felt252 {
felt252_div(lhs, rhs.try_into().unwrap())
}
};
static ref FELT252_CONST: (String, Program) = load_cairo! {
extern fn felt252_const<const value: felt252>() -> felt252 nopanic;
fn run_test() -> (felt252, felt252, felt252, felt252) {
(
felt252_const::<0>(),
felt252_const::<1>(),
felt252_const::<-2>(),
felt252_const::<-1>()
)
}
};
static ref FELT252_ADD_CONST: (String, Program) = load_cairo! {
extern fn felt252_add_const<const rhs: felt252>(lhs: felt252) -> felt252 nopanic;
fn run_test() -> (felt252, felt252, felt252, felt252, felt252, felt252, felt252, felt252, felt252) {
(
felt252_add_const::<0>(0),
felt252_add_const::<0>(1),
felt252_add_const::<1>(0),
felt252_add_const::<1>(1),
felt252_add_const::<0>(-1),
felt252_add_const::<-1>(0),
felt252_add_const::<-1>(-1),
felt252_add_const::<-1>(1),
felt252_add_const::<1>(-1),
)
}
};
static ref FELT252_SUB_CONST: (String, Program) = load_cairo! {
extern fn felt252_sub_const<const rhs: felt252>(lhs: felt252) -> felt252 nopanic;
fn run_test() -> (felt252, felt252, felt252, felt252, felt252, felt252, felt252, felt252, felt252) {
(
felt252_sub_const::<0>(0),
felt252_sub_const::<0>(1),
felt252_sub_const::<1>(0),
felt252_sub_const::<1>(1),
felt252_sub_const::<0>(-1),
felt252_sub_const::<-1>(0),
felt252_sub_const::<-1>(-1),
felt252_sub_const::<-1>(1),
felt252_sub_const::<1>(-1),
)
}
};
static ref FELT252_MUL_CONST: (String, Program) = load_cairo! {
extern fn felt252_mul_const<const rhs: felt252>(lhs: felt252) -> felt252 nopanic;
fn run_test() -> (felt252, felt252, felt252, felt252, felt252, felt252, felt252, felt252, felt252) {
(
felt252_mul_const::<0>(0),
felt252_mul_const::<0>(1),
felt252_mul_const::<1>(0),
felt252_mul_const::<1>(1),
felt252_mul_const::<2>(-1),
felt252_mul_const::<-2>(2),
felt252_mul_const::<-1>(-1),
felt252_mul_const::<-1>(1),
felt252_mul_const::<1>(-1),
)
}
};
static ref FELT252_DIV_CONST: (String, Program) = load_cairo! {
extern fn felt252_div_const<const rhs: felt252>(lhs: felt252) -> felt252 nopanic;
fn run_test() -> (
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252,
felt252
) {
(
felt252_div_const::<1>(0),
felt252_div_const::<1>(1),
felt252_div_const::<2>(-1),
felt252_div_const::<-2>(2),
felt252_div_const::<-1>(-1),
felt252_div_const::<-1>(1),
felt252_div_const::<1>(-1),
felt252_div_const::<500>(1000),
felt252_div_const::<256>(1024),
felt252_div_const::<-256>(1024),
felt252_div_const::<256>(-1024),
felt252_div_const::<-256>(-1024),
felt252_div_const::<8>(64),
felt252_div_const::<8>(-64),
felt252_div_const::<-8>(64),
felt252_div_const::<-8>(-64),
)
}
};
static ref FELT252_IS_ZERO: (String, Program) = load_cairo! {
fn run_test(x: felt252) -> bool {
match x {
0 => true,
_ => false,
}
}
};
}
fn f(val: &str) -> Felt {
Felt::from_dec_str(val).unwrap()
}
#[test]
fn felt252_add() {
fn r(lhs: Felt, rhs: Felt) -> Felt {
match run_program(
&FELT252_ADD,
"run_test",
&[Value::Felt252(lhs), Value::Felt252(rhs)],
)
.return_value
{
Value::Felt252(x) => x,
_ => panic!("invalid return type"),
}
}
assert_eq!(r(f("0"), f("0")), f("0"));
assert_eq!(r(f("1"), f("2")), f("3"));
assert_eq!(r(f("0"), f("1")), f("1"));
assert_eq!(r(f("0"), f("-2")), f("-2"));
assert_eq!(r(f("0"), f("-1")), f("-1"));
assert_eq!(r(f("1"), f("0")), f("1"));
assert_eq!(r(f("1"), f("1")), f("2"));
assert_eq!(r(f("1"), f("-2")), f("-1"));
assert_eq!(r(f("1"), f("-1")), f("0"));
assert_eq!(r(f("-2"), f("0")), f("-2"));
assert_eq!(r(f("-2"), f("1")), f("-1"));
assert_eq!(r(f("-2"), f("-2")), f("-4"));
assert_eq!(r(f("-2"), f("-1")), f("-3"));
assert_eq!(r(f("-1"), f("0")), f("-1"));
assert_eq!(r(f("-1"), f("1")), f("0"));
assert_eq!(r(f("-1"), f("-2")), f("-3"));
assert_eq!(r(f("-1"), f("-1")), f("-2"));
}
#[test]
fn felt252_sub() {
fn r(lhs: Felt, rhs: Felt) -> Felt {
match run_program(
&FELT252_SUB,
"run_test",
&[Value::Felt252(lhs), Value::Felt252(rhs)],
)
.return_value
{
Value::Felt252(x) => x,
_ => panic!("invalid return type"),
}
}
assert_eq!(r(f("0"), f("0")), f("0"));
assert_eq!(r(f("0"), f("1")), f("-1"));
assert_eq!(r(f("0"), f("-2")), f("2"));
assert_eq!(r(f("0"), f("-1")), f("1"));
assert_eq!(r(f("1"), f("0")), f("1"));
assert_eq!(r(f("1"), f("1")), f("0"));
assert_eq!(r(f("1"), f("-2")), f("3"));
assert_eq!(r(f("1"), f("-1")), f("2"));
assert_eq!(r(f("-2"), f("0")), f("-2"));
assert_eq!(r(f("-2"), f("1")), f("-3"));
assert_eq!(r(f("-2"), f("-2")), f("0"));
assert_eq!(r(f("-2"), f("-1")), f("-1"));
assert_eq!(r(f("-1"), f("0")), f("-1"));
assert_eq!(r(f("-1"), f("1")), f("-2"));
assert_eq!(r(f("-1"), f("-2")), f("1"));
assert_eq!(r(f("-1"), f("-1")), f("0"));
}
#[test]
fn felt252_mul() {
fn r(lhs: Felt, rhs: Felt) -> Felt {
match run_program(
&FELT252_MUL,
"run_test",
&[Value::Felt252(lhs), Value::Felt252(rhs)],
)
.return_value
{
Value::Felt252(x) => x,
_ => panic!("invalid return type"),
}
}
assert_eq!(r(f("0"), f("0")), f("0"));
assert_eq!(r(f("0"), f("1")), f("0"));
assert_eq!(r(f("0"), f("-2")), f("0"));
assert_eq!(r(f("0"), f("-1")), f("0"));
assert_eq!(r(f("1"), f("0")), f("0"));
assert_eq!(r(f("1"), f("1")), f("1"));
assert_eq!(r(f("1"), f("-2")), f("-2"));
assert_eq!(r(f("1"), f("-1")), f("-1"));
assert_eq!(r(f("-2"), f("0")), f("0"));
assert_eq!(r(f("-2"), f("1")), f("-2"));
assert_eq!(r(f("-2"), f("-2")), f("4"));
assert_eq!(r(f("-2"), f("-1")), f("2"));
assert_eq!(r(f("-1"), f("0")), f("0"));
assert_eq!(r(f("-1"), f("1")), f("-1"));
assert_eq!(r(f("-1"), f("-2")), f("2"));
assert_eq!(r(f("-1"), f("-1")), f("1"));
}
#[test]
fn felt252_div() {
// Helper function to run the test and extract the return value.
fn r(lhs: Felt, rhs: Felt) -> Option<Felt> {
match run_program(
&FELT252_DIV,
"run_test",
&[Value::Felt252(lhs), Value::Felt252(rhs)],
)
.return_value
{
Value::Enum { tag: 0, value, .. } => match *value {
Value::Struct { fields, .. } => {
assert_eq!(fields.len(), 1);
Some(match &fields[0] {
Value::Felt252(x) => *x,
_ => panic!("invalid return type payload"),
})
}
_ => panic!("invalid return type"),
},
Value::Enum { tag: 1, .. } => None,
_ => panic!("invalid return type"),
}
}
// Helper function to assert that a division panics.
let assert_panics =
|lhs, rhs| assert!(r(lhs, rhs).is_none(), "division by 0 is expected to panic",);
// Division by zero is expected to panic.
assert_panics(f("0"), f("0"));
assert_panics(f("1"), f("0"));
assert_panics(f("-2"), f("0"));
// Test cases for valid division results.
assert_eq!(r(f("0"), f("1")), Some(f("0")));
assert_eq!(r(f("0"), f("-2")), Some(f("0")));
assert_eq!(r(f("0"), f("-1")), Some(f("0")));
assert_eq!(r(f("1"), f("1")), Some(f("1")));
assert_eq!(
r(f("1"), f("-2")),
Some(f(
"1809251394333065606848661391547535052811553607665798349986546028067936010240"
))
);
assert_eq!(r(f("1"), f("-1")), Some(f("-1")));
assert_eq!(r(f("-2"), f("1")), Some(f("-2")));
assert_eq!(r(f("-2"), f("-2")), Some(f("1")));
assert_eq!(r(f("-2"), f("-1")), Some(f("2")));
assert_eq!(r(f("-1"), f("1")), Some(f("-1")));
assert_eq!(
r(f("-1"), f("-2")),
Some(f(
"1809251394333065606848661391547535052811553607665798349986546028067936010241"
))
);
assert_eq!(r(f("-1"), f("-1")), Some(f("1")));
assert_eq!(r(f("6"), f("2")), Some(f("3")));
assert_eq!(r(f("1000"), f("2")), Some(f("500")));
}
#[test]
fn felt252_const() {
assert_eq!(
run_program(&FELT252_CONST, "run_test", &[]).return_value,
Value::Struct {
fields: [f("0"), f("1"), f("-2"), f("-1")]
.map(Value::Felt252)
.to_vec(),
debug_name: None
}
);
}
#[test]
fn felt252_add_const() {
assert_eq!(
run_program(&FELT252_ADD_CONST, "run_test", &[]).return_value,
jit_struct!(
f("0").into(),
f("1").into(),
f("1").into(),
f("2").into(),
f("-1").into(),
f("-1").into(),
f("-2").into(),
f("0").into(),
f("0").into(),
)
);
}
#[test]
fn felt252_sub_const() {
assert_eq!(
run_program(&FELT252_SUB_CONST, "run_test", &[]).return_value,
jit_struct!(
f("0").into(),
f("1").into(),
f("-1").into(),
f("0").into(),
f("-1").into(),
f("1").into(),
f("0").into(),
f("2").into(),
f("-2").into(),
)
);
}
#[test]
fn felt252_mul_const() {
assert_eq!(
run_program(&FELT252_MUL_CONST, "run_test", &[]).return_value,
jit_struct!(
f("0").into(),
f("0").into(),
f("0").into(),
f("1").into(),
f("-2").into(),
f("-4").into(),
f("1").into(),
f("-1").into(),
f("-1").into(),
)
);
}
#[test]
fn felt252_div_const() {
assert_eq!(
run_program(&FELT252_DIV_CONST, "run_test", &[]).return_value,
jit_struct!(
f("0").into(),
f("1").into(),
f("1809251394333065606848661391547535052811553607665798349986546028067936010240")
.into(),
f("-1").into(),
f("1").into(),
f("-1").into(),
f("-1").into(),
f("2").into(),
f("4").into(),
f("-4").into(),
f("-4").into(),
f("4").into(),
f("8").into(),
f("-8").into(),
f("-8").into(),
f("8").into(),
)
);
}
#[test]
fn felt252_is_zero() {
fn r(x: Felt) -> bool {
match run_program(&FELT252_IS_ZERO, "run_test", &[Value::Felt252(x)]).return_value {
Value::Enum { tag, .. } => tag != 0,
_ => panic!("invalid return type"),
}
}
assert!(r(f("0")));
assert!(!r(f("1")));
assert!(!r(f("-2")));
assert!(!r(f("-1")));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/const.rs | src/libfuncs/const.rs | //! # Const libfuncs
use super::LibfuncHelper;
use crate::{
error::{Error, Result},
libfuncs::{r#enum::build_enum_value, r#struct::build_struct_value},
metadata::{realloc_bindings::ReallocBindingsMeta, MetadataStorage},
native_panic,
types::TypeBuilder,
utils::{ProgramRegistryExt, RangeExt, PRIME},
};
use cairo_lang_sierra::{
extensions::{
bounded_int::BoundedIntConcreteType,
const_type::{
ConstAsBoxConcreteLibfunc, ConstAsImmediateConcreteLibfunc, ConstConcreteLibfunc,
ConstConcreteType,
},
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
starknet::StarknetTypeConcrete,
},
program::GenericArg,
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm::{self, r#type::pointer},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{Block, Location, Value},
Context,
};
use num_bigint::Sign;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &ConstConcreteLibfunc,
) -> Result<()> {
match selector {
ConstConcreteLibfunc::AsBox(info) => {
build_const_as_box(context, registry, entry, location, helper, metadata, info)
}
ConstConcreteLibfunc::AsImmediate(info) => {
build_const_as_immediate(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `const_as_box` libfunc.
pub fn build_const_as_box<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &ConstAsBoxConcreteLibfunc,
) -> Result<()> {
if metadata.get::<ReallocBindingsMeta>().is_none() {
metadata.insert(ReallocBindingsMeta::new(context, helper));
}
let const_type_outer = registry.get_type(&info.const_type)?;
// Create constant
let const_type = match &const_type_outer {
CoreTypeConcrete::Const(inner) => inner,
_ => native_panic!("matched an unexpected CoreTypeConcrete that is not a Const"),
};
let value = build_const_type_value(
context, registry, entry, location, helper, metadata, const_type,
)?;
let const_ty = registry.get_type(&const_type.inner_ty)?;
let inner_layout = const_ty.layout(registry)?;
// Create box
let value_len = entry.const_int(context, location, inner_layout.pad_to_align().size(), 64)?;
let ptr = entry.append_op_result(llvm::zero(pointer(context, 0), location))?;
let ptr = entry.append_op_result(ReallocBindingsMeta::realloc(
context, ptr, value_len, location,
)?)?;
// Store constant in box
entry.store(context, location, ptr, value)?;
helper.br(entry, 0, &[ptr], location)
}
/// Generate MLIR operations for the `const_as_immediate` libfunc.
pub fn build_const_as_immediate<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &ConstAsImmediateConcreteLibfunc,
) -> Result<()> {
let const_ty = registry.get_type(&info.const_type)?;
let const_type = match &const_ty {
CoreTypeConcrete::Const(inner) => inner,
_ => native_panic!("matched an unexpected CoreTypeConcrete that is not a Const"),
};
let value = build_const_type_value(
context, registry, entry, location, helper, metadata, const_type,
)?;
helper.br(entry, 0, &[value], location)
}
pub fn build_const_type_value<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &ConstConcreteType,
) -> Result<Value<'ctx, 'this>> {
// const_type.inner_data Should be one of the following:
// - A single value, if the inner type is a simple numeric type (e.g., `felt252`, `u32`,
// etc.).
// - A list of const types, if the inner type is a struct. The type of each const type must be
// the same as the corresponding struct member type.
// - A selector (a single value) followed by a const type, if the inner type is an enum. The
// type of the const type must be the same as the corresponding enum variant type.
let inner_type = registry.get_type(&info.inner_ty)?;
let inner_ty = registry.build_type(context, helper, metadata, &info.inner_ty)?;
match inner_type {
CoreTypeConcrete::Struct(_) => {
let mut fields = Vec::new();
for field in &info.inner_data {
match field {
GenericArg::Type(const_field_ty) => {
let field_type = registry.get_type(const_field_ty)?;
let const_field_type = match &field_type {
CoreTypeConcrete::Const(inner) => inner,
_ => native_panic!(
"matched an unexpected CoreTypeConcrete that is not a Const"
),
};
let field_value = build_const_type_value(
context,
registry,
entry,
location,
helper,
metadata,
const_field_type,
)?;
fields.push(field_value);
}
_ => return Err(Error::ConstDataMismatch),
}
}
build_struct_value(
context,
registry,
entry,
location,
helper,
metadata,
&info.inner_ty,
&fields,
)
}
CoreTypeConcrete::Enum(_enum_info) => match &info.inner_data[..] {
[GenericArg::Value(variant_index), GenericArg::Type(payload_ty)] => {
let payload_type = registry.get_type(payload_ty)?;
let const_payload_type = match payload_type {
CoreTypeConcrete::Const(inner) => inner,
_ => {
native_panic!("matched an unexpected CoreTypeConcrete that is not a Const")
}
};
let payload_value = build_const_type_value(
context,
registry,
entry,
location,
helper,
metadata,
const_payload_type,
)?;
build_enum_value(
context,
registry,
entry,
location,
helper,
metadata,
payload_value,
&info.inner_ty,
payload_ty,
variant_index
.try_into()
.map_err(|_| Error::IntegerConversion)?,
)
}
_ => Err(Error::ConstDataMismatch),
},
CoreTypeConcrete::NonZero(_) => match &info.inner_data[..] {
// Copied from the sierra to casm lowering
// NonZero is the same type as the inner type in native.
[GenericArg::Type(inner)] => {
let inner_type = registry.get_type(inner)?;
let const_inner_type = match inner_type {
CoreTypeConcrete::Const(inner) => inner,
_ => native_panic!("unreachable: unexpected CoreTypeConcrete found"),
};
build_const_type_value(
context,
registry,
entry,
location,
helper,
metadata,
const_inner_type,
)
}
_ => Err(Error::ConstDataMismatch),
},
CoreTypeConcrete::BoundedInt(BoundedIntConcreteType { range, .. }) => {
let value = match &info.inner_data.as_slice() {
[GenericArg::Value(value)] => value.clone(),
_ => return Err(Error::ConstDataMismatch),
};
// Offset the value so that 0 matches with lower.
let value = &value - &range.lower;
Ok(entry.const_int(
context,
location,
value,
inner_type.integer_range(registry)?.offset_bit_width(),
)?)
}
CoreTypeConcrete::Felt252(_) => {
let value = match &info.inner_data.as_slice() {
[GenericArg::Value(value)] => value.clone(),
_ => return Err(Error::ConstDataMismatch),
};
let (sign, value) = value.into_parts();
let value = match sign {
Sign::Minus => PRIME.clone() - value,
_ => value,
};
Ok(entry.const_int_from_type(context, location, value, inner_ty)?)
}
CoreTypeConcrete::Starknet(
StarknetTypeConcrete::ClassHash(_) | StarknetTypeConcrete::ContractAddress(_),
) => {
let value = match &info.inner_data.as_slice() {
[GenericArg::Value(value)] => value.clone(),
_ => return Err(Error::ConstDataMismatch),
};
let (sign, value) = value.into_parts();
let value = match sign {
Sign::Minus => PRIME.clone() - value,
_ => value,
};
Ok(entry.const_int_from_type(context, location, value, inner_ty)?)
}
CoreTypeConcrete::Uint8(_)
| CoreTypeConcrete::Uint16(_)
| CoreTypeConcrete::Uint32(_)
| CoreTypeConcrete::Uint64(_)
| CoreTypeConcrete::Uint128(_)
| CoreTypeConcrete::Sint8(_)
| CoreTypeConcrete::Sint16(_)
| CoreTypeConcrete::Sint32(_)
| CoreTypeConcrete::Sint64(_)
| CoreTypeConcrete::Sint128(_)
| CoreTypeConcrete::Bytes31(_) => match &info.inner_data.as_slice() {
[GenericArg::Value(value)] => {
Ok(entry.const_int_from_type(context, location, value.clone(), inner_ty)?)
}
_ => Err(Error::ConstDataMismatch),
},
_ => native_panic!("const for type {} not implemented", info.inner_ty),
}
}
#[cfg(test)]
pub mod test {
use crate::{jit_struct, load_cairo, utils::testing::run_program, values::Value};
#[test]
fn run_const_as_box() {
let program = load_cairo!(
use core::box::BoxTrait;
struct Hello {
x: i32,
}
fn run_test() -> Hello {
let x = BoxTrait::new(Hello {
x: -2
});
x.unbox()
}
);
let result = run_program(&program, "run_test", &[]).return_value;
assert_eq!(result, jit_struct!(Value::Sint32(-2)));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/ec.rs | src/libfuncs/ec.rs | //! # Elliptic curve libfuncs
use super::LibfuncHelper;
use crate::{
error::{Error, Result},
execution_result::EC_OP_BUILTIN_SIZE,
libfuncs::increment_builtin_counter_conditionally_by,
metadata::{runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
native_panic,
utils::{get_integer_layout, ProgramRegistryExt, PRIME},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
ec::EcConcreteLibfunc,
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
llvm,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{operation::OperationBuilder, r#type::IntegerType, Block, Location},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &EcConcreteLibfunc,
) -> Result<()> {
match selector {
EcConcreteLibfunc::IsZero(info) => {
build_is_zero(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::Neg(info) => {
build_neg(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::PointFromX(info) => {
build_point_from_x(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::StateAdd(info) => {
build_state_add(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::StateAddMul(info) => {
build_state_add_mul(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::StateFinalize(info) => {
build_state_finalize(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::StateInit(info) => {
build_state_init(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::TryNew(info) => {
build_try_new(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::UnwrapPoint(info) => {
build_unwrap_point(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::Zero(info) => {
build_zero(context, registry, entry, location, helper, metadata, info)
}
EcConcreteLibfunc::NegNz(_) => native_panic!("implement ec_neg_nz"),
}
}
/// Generate MLIR operations for the `ec_point_is_zero` libfunc.
pub fn build_is_zero<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// To check whether `(x, y) = (0, 0)` (the zero point), it is enough to check
// whether `y = 0`, since there is no point on the curve with y = 0.
let y = entry.extract_value(
context,
location,
entry.arg(0)?,
IntegerType::new(context, 252).into(),
1,
)?;
let k0 = entry.const_int(context, location, 0, 252)?;
let y_is_zero = entry.cmpi(context, CmpiPredicate::Eq, y, k0, location)?;
helper.cond_br(
context,
entry,
y_is_zero,
[0, 1],
[&[], &[entry.arg(0)?]],
location,
)
}
/// Generate MLIR operations for the `ec_neg` libfunc.
pub fn build_neg<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let y = entry.extract_value(
context,
location,
entry.arg(0)?,
IntegerType::new(context, 252).into(),
1,
)?;
let k_prime = entry.const_int(context, location, PRIME.clone(), 252)?;
let k0 = entry.const_int(context, location, 0, 252)?;
let y_is_zero = entry.cmpi(context, CmpiPredicate::Eq, y, k0, location)?;
let y_neg = entry.append_op_result(arith::subi(k_prime, y, location))?;
let y_neg = entry.append_op_result(
OperationBuilder::new("arith.select", location)
.add_operands(&[y_is_zero, k0, y_neg])
.add_results(&[IntegerType::new(context, 252).into()])
.build()?,
)?;
let result = entry.insert_value(context, location, entry.arg(0)?, y_neg, 1)?;
helper.br(entry, 0, &[result], location)
}
/// Generate MLIR operations for the `ec_point_from_x_nz` libfunc.
pub fn build_point_from_x<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let ec_point_ty = llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
],
false,
);
let point_ptr = helper.init_block().alloca1(
context,
location,
ec_point_ty,
get_integer_layout(252).align(),
)?;
let point = entry.append_op_result(llvm::undef(ec_point_ty, location))?;
let point = entry.insert_value(context, location, point, entry.arg(1)?, 0)?;
entry.store(context, location, point_ptr, point)?;
let is_on_curve = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.libfunc_ec_point_from_x_nz(context, helper, entry, point_ptr, location)?
.result(0)?
.into();
let point = entry.load(context, location, point_ptr, ec_point_ty)?;
// The sierra-to-casm compiler uses the range check builtin a total of 3 times if the
// point is on the curve. Otherwise it is not used.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/ec.rs#L167
let range_check = increment_builtin_counter_conditionally_by(
context,
entry,
location,
entry.arg(0)?,
3,
0,
is_on_curve,
)?;
helper.cond_br(
context,
entry,
is_on_curve,
[0, 1],
[&[range_check, point], &[range_check]],
location,
)
}
/// Generate MLIR operations for the `ec_state_add` libfunc.
pub fn build_state_add<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let ec_state_ty = llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
],
false,
);
let state_ptr = helper.init_block().alloca1(
context,
location,
ec_state_ty,
get_integer_layout(252).align(),
)?;
let point_ptr = helper.init_block().alloca1(
context,
location,
ec_state_ty,
get_integer_layout(252).align(),
)?;
entry.store(context, location, state_ptr, entry.arg(0)?)?;
entry.store(context, location, point_ptr, entry.arg(1)?)?;
metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.libfunc_ec_state_add(context, helper, entry, state_ptr, point_ptr, location)?;
let state = entry.load(context, location, state_ptr, ec_state_ty)?;
helper.br(entry, 0, &[state], location)
}
/// Generate MLIR operations for the `ec_state_add_mul` libfunc.
pub fn build_state_add_mul<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the ec op builtin 1 time.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/ec.rs#L439
let ec_op = super::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
EC_OP_BUILTIN_SIZE,
)?;
let felt252_ty = IntegerType::new(context, 252).into();
let ec_state_ty = llvm::r#type::r#struct(
context,
&[felt252_ty, felt252_ty, felt252_ty, felt252_ty],
false,
);
let ec_point_ty = llvm::r#type::r#struct(context, &[felt252_ty, felt252_ty], false);
let state_ptr = helper.init_block().alloca1(
context,
location,
ec_state_ty,
get_integer_layout(252).align(),
)?;
let scalar_ptr = helper.init_block().alloca1(
context,
location,
felt252_ty,
get_integer_layout(252).align(),
)?;
let point_ptr = helper.init_block().alloca1(
context,
location,
ec_point_ty,
get_integer_layout(252).align(),
)?;
entry.store(context, location, state_ptr, entry.arg(1)?)?;
entry.store(context, location, scalar_ptr, entry.arg(2)?)?;
entry.store(context, location, point_ptr, entry.arg(3)?)?;
metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.libfunc_ec_state_add_mul(
context, helper, entry, state_ptr, scalar_ptr, point_ptr, location,
)?;
let state = entry.load(context, location, state_ptr, ec_state_ty)?;
helper.br(entry, 0, &[ec_op, state], location)
}
/// Generate MLIR operations for the `ec_state_try_finalize_nz` libfunc.
pub fn build_state_finalize<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let felt252_ty = IntegerType::new(context, 252).into();
let ec_state_ty = llvm::r#type::r#struct(
context,
&[felt252_ty, felt252_ty, felt252_ty, felt252_ty],
false,
);
let ec_point_ty = llvm::r#type::r#struct(context, &[felt252_ty, felt252_ty], false);
let point_ptr = helper.init_block().alloca1(
context,
location,
ec_point_ty,
get_integer_layout(252).align(),
)?;
let state_ptr = helper.init_block().alloca1(
context,
location,
ec_state_ty,
get_integer_layout(252).align(),
)?;
entry.store(context, location, state_ptr, entry.arg(0)?)?;
let is_zero = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.libfunc_ec_state_try_finalize_nz(context, helper, entry, point_ptr, state_ptr, location)?
.result(0)?
.into();
let point = entry.load(context, location, point_ptr, ec_point_ty)?;
helper.cond_br(context, entry, is_zero, [0, 1], [&[point], &[]], location)
}
/// Generate MLIR operations for the `ec_state_init` libfunc.
pub fn build_state_init<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let ec_state_ty = llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
],
false,
);
let state_ptr = helper.init_block().alloca1(
context,
location,
ec_state_ty,
get_integer_layout(252).align(),
)?;
metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.libfunc_ec_state_init(context, helper, entry, state_ptr, location)?;
let state = entry.load(context, location, state_ptr, ec_state_ty)?;
helper.br(entry, 0, &[state], location)
}
/// Generate MLIR operations for the `ec_point_try_new_nz` libfunc.
pub fn build_try_new<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let ec_point_ty = llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 252).into(),
IntegerType::new(context, 252).into(),
],
false,
);
let point_ptr = helper.init_block().alloca1(
context,
location,
ec_point_ty,
get_integer_layout(252).align(),
)?;
let point = entry.append_op_result(llvm::undef(ec_point_ty, location))?;
let point = entry.insert_value(context, location, point, entry.arg(0)?, 0)?;
let point = entry.insert_value(context, location, point, entry.arg(1)?, 1)?;
entry.store(context, location, point_ptr, point)?;
let result = metadata
.get_mut::<RuntimeBindingsMeta>()
.ok_or(Error::MissingMetadata)?
.libfunc_ec_point_try_new_nz(context, helper, entry, point_ptr, location)?
.result(0)?
.into();
helper.cond_br(context, entry, result, [0, 1], [&[point], &[]], location)
}
/// Generate MLIR operations for the `ec_point_unwrap` libfunc.
pub fn build_unwrap_point<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let x = entry.extract_value(
context,
location,
entry.arg(0)?,
registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?,
0,
)?;
let y = entry.extract_value(
context,
location,
entry.arg(0)?,
registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[1].ty,
)?,
1,
)?;
helper.br(entry, 0, &[x, y], location)
}
/// Generate MLIR operations for the `ec_point_zero` libfunc.
pub fn build_zero<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let ec_point_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let point = entry.append_op_result(llvm::undef(ec_point_ty, location))?;
let k0 = entry.const_int(context, location, 0, 252)?;
let point = entry.insert_value(context, location, point, k0, 0)?;
let point = entry.insert_value(context, location, point, k0, 1)?;
helper.br(entry, 0, &[point], location)
}
#[cfg(test)]
mod test {
use crate::{
jit_enum, jit_struct, load_cairo,
utils::testing::{run_program, run_program_assert_output},
values::Value,
};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use starknet_types_core::felt::Felt;
use std::ops::Neg;
lazy_static! {
static ref EC_POINT_IS_ZERO: (String, Program) = load_cairo! {
use core::{ec::{ec_point_is_zero, EcPoint}, zeroable::IsZeroResult};
fn run_test(point: EcPoint) -> IsZeroResult<EcPoint> {
ec_point_is_zero(point)
}
};
static ref EC_NEG: (String, Program) = load_cairo! {
use core::ec::{ec_neg, EcPoint};
fn run_test(point: EcPoint) -> EcPoint {
ec_neg(point)
}
};
static ref EC_POINT_FROM_X_NZ: (String, Program) = load_cairo! {
use core::ec::{ec_point_from_x_nz, EcPoint};
use core::zeroable::NonZero;
fn run_test(x: felt252) -> Option<NonZero<EcPoint>> {
ec_point_from_x_nz(x)
}
};
static ref EC_STATE_ADD: (String, Program) = load_cairo! {
use core::ec::{ec_state_add, EcPoint, EcState};
use core::zeroable::NonZero;
fn run_test(mut state: EcState, point: NonZero<EcPoint>) -> EcState {
ec_state_add(ref state, point);
state
}
};
static ref EC_STATE_ADD_MUL: (String, Program) = load_cairo! {
use core::ec::{ec_state_add_mul, EcPoint, EcState};
use core::zeroable::NonZero;
fn run_test(mut state: EcState, scalar: felt252, point: NonZero<EcPoint>) -> EcState {
ec_state_add_mul(ref state, scalar, point);
state
}
};
static ref EC_STATE_FINALIZE: (String, Program) = load_cairo! {
use core::ec::{ec_state_try_finalize_nz, EcPoint, EcState};
use core::zeroable::NonZero;
fn run_test(state: EcState) -> Option<NonZero<EcPoint>> {
ec_state_try_finalize_nz(state)
}
};
static ref EC_STATE_INIT: (String, Program) = load_cairo! {
use core::ec::{ec_state_init, EcState};
fn run_test() -> EcState {
ec_state_init()
}
};
static ref EC_POINT_TRY_NEW_NZ: (String, Program) = load_cairo! {
use core::ec::{ec_point_try_new_nz, EcPoint};
use core::zeroable::NonZero;
fn run_test(x: felt252, y: felt252) -> Option<NonZero<EcPoint>> {
ec_point_try_new_nz(x, y)
}
};
static ref EC_POINT_UNWRAP: (String, Program) = load_cairo! {
use core::{ec::{ec_point_unwrap, EcPoint}, zeroable::NonZero};
fn run_test(point: NonZero<EcPoint>) -> (felt252, felt252) {
ec_point_unwrap(point)
}
};
static ref EC_POINT_ZERO: (String, Program) = load_cairo! {
use core::ec::{ec_point_zero, EcPoint};
fn run_test() -> EcPoint {
ec_point_zero()
}
};
}
#[test]
fn ec_point_is_zero() {
let r =
|x, y| run_program(&EC_POINT_IS_ZERO, "run_test", &[Value::EcPoint(x, y)]).return_value;
assert_eq!(r(0.into(), 0.into()), jit_enum!(0, jit_struct!()));
assert_eq!(
r(0.into(), 1.into()),
jit_enum!(1, Value::EcPoint(0.into(), 1.into()))
);
assert_eq!(r(1.into(), 0.into()), jit_enum!(0, jit_struct!()));
assert_eq!(
r(1.into(), 1.into()),
jit_enum!(1, Value::EcPoint(1.into(), 1.into()))
);
}
#[test]
fn ec_neg() {
let r = |x, y| run_program(&EC_NEG, "run_test", &[Value::EcPoint(x, y)]).return_value;
assert_eq!(r(0.into(), 0.into()), Value::EcPoint(0.into(), 0.into()));
assert_eq!(
r(0.into(), 1.into()),
Value::EcPoint(0.into(), Felt::from(-1))
);
assert_eq!(r(1.into(), 0.into()), Value::EcPoint(1.into(), 0.into()));
assert_eq!(
r(1.into(), 1.into()),
Value::EcPoint(1.into(), Felt::from(-1))
);
}
#[test]
fn ec_point_from_x() {
let r = |x| run_program(&EC_POINT_FROM_X_NZ, "run_test", &[Value::Felt252(x)]).return_value;
assert_eq!(r(0.into()), jit_enum!(1, jit_struct!()));
assert_eq!(r(1234.into()), jit_enum!(0, Value::EcPoint(
Felt::from(1234),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap()
)));
}
#[test]
fn ec_state_add() {
run_program_assert_output(&EC_STATE_ADD, "run_test", &[
Value::EcState(
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
),
Value::EcPoint(
Felt::from_dec_str("1234").unwrap(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap()
)
],
Value::EcState(
Felt::from_dec_str("763975897824944497806946001227010133599886598340174017198031710397718335159").unwrap(),
Felt::from_dec_str("2805180267536471620369715068237762638204710971142209985448115065526708105983").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
));
}
#[test]
fn ec_state_add_mul() {
run_program_assert_output(&EC_STATE_ADD_MUL, "run_test", &[
Value::EcState(
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
),
Felt::ONE.into(), // scalar
Value::EcPoint(
Felt::from_dec_str("1234").unwrap(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap()
)
],
Value::EcState(
Felt::from_dec_str("763975897824944497806946001227010133599886598340174017198031710397718335159").unwrap(),
Felt::from_dec_str("2805180267536471620369715068237762638204710971142209985448115065526708105983").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
)
);
run_program_assert_output(&EC_STATE_ADD_MUL, "run_test", &[
Value::EcState(
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
),
Felt::from(2).into(), // scalar
Value::EcPoint(
Felt::from_dec_str("1234").unwrap(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap()
)
],
Value::EcState(
Felt::from_dec_str("3016674370847061744386893405108272070153695046160622325692702034987910716850").unwrap(),
Felt::from_dec_str("898133181809473419542838028331350248951548889944002871647069130998202992502").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
)
);
}
#[test]
fn ec_state_finalize() {
run_program_assert_output(
&EC_STATE_FINALIZE,
"run_test",
&[Value::EcState(
Felt::from_dec_str(
"3151312365169595090315724863753927489909436624354740709748557281394568342450",
)
.unwrap(),
Felt::from_dec_str(
"2835232394579952276045648147338966184268723952674536708929458753792035266179",
)
.unwrap(),
Felt::from_dec_str(
"3151312365169595090315724863753927489909436624354740709748557281394568342450",
)
.unwrap(),
Felt::from_dec_str(
"2835232394579952276045648147338966184268723952674536708929458753792035266179",
)
.unwrap(),
)],
jit_enum!(1, jit_struct!()),
);
run_program_assert_output(&EC_STATE_FINALIZE, "run_test", &[
Value::EcState(
Felt::from_dec_str("763975897824944497806946001227010133599886598340174017198031710397718335159").unwrap(),
Felt::from_dec_str("2805180267536471620369715068237762638204710971142209985448115065526708105983").unwrap(),
Felt::from_dec_str("3151312365169595090315724863753927489909436624354740709748557281394568342450").unwrap(),
Felt::from_dec_str("2835232394579952276045648147338966184268723952674536708929458753792035266179").unwrap()
),
],
jit_enum!(0, Value::EcPoint(
Felt::from(1234),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap()
)
)
);
}
#[test]
fn ec_state_init() {
let result = run_program(&EC_STATE_INIT, "run_test", &[]);
// cant match the values because the state init is a random point
assert!(matches!(result.return_value, Value::EcState(_, _, _, _)));
}
#[test]
fn ec_point_try_new_nz() {
run_program_assert_output(
&EC_POINT_TRY_NEW_NZ,
"run_test",
&[
Felt::from_dec_str("0").unwrap().into(),
Felt::from_dec_str("0").unwrap().into(),
],
jit_enum!(1, jit_struct!()),
);
run_program_assert_output(
&EC_POINT_TRY_NEW_NZ,
"run_test",
&[
Felt::from_dec_str("1234").unwrap().into(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap().into()
],
jit_enum!(0, Value::EcPoint(
Felt::from_dec_str("1234").unwrap(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap()
))
,
);
run_program_assert_output(
&EC_POINT_TRY_NEW_NZ,
"run_test",
&[ Felt::from_dec_str("1234").unwrap().into(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap().neg().into()
],
jit_enum!(0, Value::EcPoint(
Felt::from_dec_str("1234").unwrap(),
Felt::from_dec_str("1301976514684871091717790968549291947487646995000837413367950573852273027507").unwrap().neg()
))
,
);
}
#[test]
fn ec_point_unwrap() {
fn parse(x: &str) -> Felt {
if let Some(x) = x.strip_prefix('-') {
Felt::from_dec_str(x).unwrap().neg()
} else {
Felt::from_dec_str(x).unwrap()
}
}
#[track_caller]
fn run(a: &str, b: &str, ea: &str, eb: &str) {
run_program_assert_output(
&EC_POINT_UNWRAP,
"run_test",
&[Value::EcPoint(parse(a), parse(b))],
jit_struct!(parse(ea).into(), parse(eb).into()),
);
}
run("0", "0", "0", "0");
run("0", "1", "0", "1");
run("0", "-1", "0", "-1");
run("1", "0", "1", "0");
run("1", "1", "1", "1");
run("1", "-1", "1", "-1");
run("-1", "0", "-1", "0");
run("-1", "1", "-1", "1");
run("-1", "-1", "-1", "-1");
}
#[test]
fn ec_point_zero() {
run_program_assert_output(
&EC_POINT_ZERO,
"run_test",
&[],
Value::EcPoint(
Felt::from_dec_str("0").unwrap(),
Felt::from_dec_str("0").unwrap().neg(),
),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/bytes31.rs | src/libfuncs/bytes31.rs | //! # Bytes31-related libfuncs
use super::LibfuncHelper;
use crate::{
error::{Error, Result},
metadata::MetadataStorage,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
bytes31::Bytes31ConcreteLibfunc,
consts::SignatureAndConstConcreteLibfunc,
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf,
},
helpers::{ArithBlockExt, BuiltinBlockExt},
ir::{Attribute, Block, BlockLike, Location, Value},
Context,
};
use num_bigint::BigUint;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Bytes31ConcreteLibfunc,
) -> Result<()> {
match selector {
Bytes31ConcreteLibfunc::Const(info) => {
build_const(context, registry, entry, location, helper, metadata, info)
}
Bytes31ConcreteLibfunc::ToFelt252(info) => {
build_to_felt252(context, registry, entry, location, helper, metadata, info)
}
Bytes31ConcreteLibfunc::TryFromFelt252(info) => {
build_from_felt252(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `bytes31_const` libfunc.
pub fn build_const<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndConstConcreteLibfunc,
) -> Result<()> {
let value = &info.c;
let value_ty = registry.build_type(
context,
helper,
metadata,
&info.signature.branch_signatures[0].vars[0].ty,
)?;
let op0 = entry.append_operation(arith::constant(
context,
Attribute::parse(context, &format!("{value} : {value_ty}"))
.ok_or(Error::ParseAttributeError)?,
location,
));
helper.br(entry, 0, &[op0.result(0)?.into()], location)
}
/// Generate MLIR operations for the `bytes31_to_felt252` libfunc.
pub fn build_to_felt252<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let felt252_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let value: Value = entry.arg(0)?;
let result = entry.extui(value, felt252_ty, location)?;
helper.br(entry, 0, &[result], location)
}
/// Generate MLIR operations for the `u8_from_felt252` libfunc.
pub fn build_from_felt252<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 3 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/misc.rs?plain=1#L266
let range_check: Value =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 3)?;
let value: Value = entry.arg(1)?;
let felt252_ty =
registry.build_type(context, helper, metadata, &info.param_signatures()[1].ty)?;
let result_ty = registry.build_type(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[1].ty,
)?;
let max_value = BigUint::from(2u32).pow(248) - 1u32;
let const_max = entry.append_op_result(arith::constant(
context,
Attribute::parse(context, &format!("{} : {}", max_value, felt252_ty))
.ok_or(Error::ParseAttributeError)?,
location,
))?;
let is_ule = entry.cmpi(context, CmpiPredicate::Ule, value, const_max, location)?;
let block_success = helper.append_block(Block::new(&[]));
let block_failure = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
is_ule,
block_success,
block_failure,
&[],
&[],
location,
));
let value = block_success.trunci(value, result_ty, location)?;
helper.br(block_success, 0, &[range_check, value], location)?;
helper.br(block_failure, 1, &[range_check], location)?;
Ok(())
}
#[cfg(test)]
mod test {
use crate::{
jit_enum, jit_panic, jit_struct, load_cairo, utils::testing::run_program_assert_output,
};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use starknet_types_core::felt::Felt;
lazy_static! {
// TODO: Test `bytes31_const` once the compiler supports it. See: https://github.com/lambdaclass/cairo_native/issues/1224
static ref BYTES31_ROUNDTRIP: (String, Program) = load_cairo! {
use core::bytes_31::{bytes31_try_from_felt252, bytes31_to_felt252};
fn run_test(value: felt252) -> felt252 {
let a: bytes31 = bytes31_try_from_felt252(value).unwrap();
bytes31_to_felt252(a)
}
};
}
#[test]
fn bytes31_roundtrip() {
run_program_assert_output(
&BYTES31_ROUNDTRIP,
"run_test",
&[Felt::from(2).into()],
jit_enum!(0, jit_struct!(Felt::from(2).into())),
);
run_program_assert_output(
&BYTES31_ROUNDTRIP,
"run_test",
&[Felt::MAX.into()],
jit_panic!(Felt::from_bytes_be_slice(b"Option::unwrap failed.")),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/function_call.rs | src/libfuncs/function_call.rs | //! # Function call libfuncs
//!
//! Includes logic for handling direct tail recursive function calls. More information on this topic
//! at the [tail recursive metadata](crate::metadata::tail_recursion).
use super::LibfuncHelper;
use crate::{
error::{Error, Result},
metadata::{tail_recursion::TailRecursionMeta, MetadataStorage},
native_assert,
types::TypeBuilder,
utils::generate_function_name,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
function_call::SignatureAndFunctionConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{cf, index, llvm, memref},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{DenseI32ArrayAttribute, FlatSymbolRefAttribute},
operation::OperationBuilder,
r#type::IntegerType,
Attribute, Block, BlockLike, Identifier, Location, Type, Value,
},
Context,
};
use std::alloc::Layout;
/// Generate MLIR operations for the `function_call` libfunc.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureAndFunctionConcreteLibfunc,
) -> Result<()> {
let mut tailrec_meta = metadata.remove::<TailRecursionMeta>();
let mut arguments = Vec::new();
for (idx, type_id) in info.function.signature.param_types.iter().enumerate() {
let type_info = registry.get_type(type_id)?;
if !(type_info.is_builtin() && type_info.is_zst(registry)?) {
arguments.push(
if tailrec_meta.is_none() && type_info.is_memory_allocated(registry)? {
let elem_ty = type_info.build(context, helper, registry, metadata, type_id)?;
let stack_ptr = helper.init_block().alloca1(
context,
location,
elem_ty,
type_info.layout(registry)?.align(),
)?;
entry.store(context, location, stack_ptr, entry.argument(idx)?.into())?;
stack_ptr
} else {
entry.argument(idx)?.into()
},
);
}
}
if let Some(tailrec_meta) = &mut tailrec_meta {
let depth_counter =
entry.append_op_result(memref::load(tailrec_meta.depth_counter(), &[], location))?;
let k1 = entry.const_int_from_type(context, location, 1, Type::index(context))?;
let new_depth_counter = entry.append_op_result(index::add(depth_counter, k1, location))?;
entry.append_operation(memref::store(
new_depth_counter,
tailrec_meta.depth_counter(),
&[],
location,
));
entry.append_operation(cf::br(
&tailrec_meta.recursion_target(),
&arguments,
location,
));
let cont_block = helper.append_block(Block::new(
&info
.function
.signature
.ret_types
.iter()
.map(|ty| {
Ok((
registry
.get_type(ty)?
.build(context, helper, registry, metadata, ty)?,
location,
))
})
.collect::<Result<Vec<_>>>()?,
));
tailrec_meta.set_return_target(cont_block);
let mut results = Vec::<Value>::new();
let mut count = 0;
for var_info in &info.signature.branch_signatures[0].vars {
let type_info = registry.get_type(&var_info.ty)?;
if type_info.is_builtin() && type_info.is_zst(registry)? {
results.push(cont_block.append_op_result(llvm::undef(
type_info.build(context, helper, registry, metadata, &var_info.ty)?,
location,
))?);
} else {
let val = cont_block.argument(count)?.into();
count += 1;
results.push(val);
}
}
helper.br(cont_block, 0, &results, location)?;
} else {
let mut result_types = Vec::new();
let return_types = info
.function
.signature
.ret_types
.iter()
.filter_map(|type_id| {
let type_info = match registry.get_type(type_id) {
Ok(x) => x,
Err(e) => return Some(Err(e.into())),
};
let is_zst = match type_info.is_zst(registry) {
Ok(x) => x,
Err(e) => return Some(Err(e)),
};
(!(type_info.is_builtin() && is_zst)).then_some(Ok((type_id, type_info)))
})
.collect::<Result<Vec<_>>>()?;
// A function has a return pointer if either:
// - There are multiple return values.
// - The return value is memory allocated.
let has_return_ptr = if return_types.len() > 1 {
result_types.extend(
return_types
.iter()
.map(|(type_id, type_info)| {
type_info.build(context, helper, registry, metadata, type_id)
})
.collect::<std::result::Result<Vec<_>, _>>()?,
);
Some(false)
} else if return_types
.first()
.map(|(_, type_info)| type_info.is_memory_allocated(registry))
.transpose()?
== Some(true)
{
let (type_id, type_info) = return_types[0];
let layout = type_info.layout(registry)?;
let stack_ptr = helper.init_block().alloca1(
context,
location,
type_info.build(context, helper, registry, metadata, type_id)?,
layout.align(),
)?;
arguments.insert(0, stack_ptr);
Some(true)
} else if !return_types.is_empty() {
let (type_id, type_info) = return_types[0];
result_types.push(type_info.build(context, helper, registry, metadata, type_id)?);
None
} else {
None
};
let function_call_result = entry.append_op_result(
OperationBuilder::new("llvm.call", location)
.add_attributes(&[
(
Identifier::new(context, "callee"),
FlatSymbolRefAttribute::new(
context,
&format!("impl${}", generate_function_name(&info.function.id, false)),
)
.into(),
),
(
Identifier::new(context, "CConv"),
Attribute::parse(context, "#llvm.cconv<fastcc>")
.ok_or(Error::ParseAttributeError)?,
),
])
.add_operands(&arguments)
.add_results(&[llvm::r#type::r#struct(context, &result_types, false)])
.build()?,
)?;
let mut results = Vec::new();
match has_return_ptr {
Some(true) => {
// Manual return type.
let mut layout = Layout::new::<()>();
for (idx, type_id) in info.function.signature.ret_types.iter().enumerate() {
let type_info = registry.get_type(type_id)?;
if type_info.is_builtin() && type_info.is_zst(registry)? {
results.push(entry.argument(idx)?.into());
} else {
let val = arguments[0];
let offset;
let ret_layout = type_info.layout(registry)?;
(layout, offset) = layout.extend(ret_layout)?;
let pointer_val = entry.append_op_result(llvm::get_element_ptr(
context,
val,
DenseI32ArrayAttribute::new(context, &[offset as i32]),
IntegerType::new(context, 8).into(),
llvm::r#type::pointer(context, 0),
location,
))?;
results.push(entry.load(
context,
location,
pointer_val,
type_info.build(context, helper, registry, metadata, type_id)?,
)?);
}
}
}
Some(false) => {
// Complex return type. Just extract the values from the struct, since LLVM will
// handle the rest.
for (idx, type_id) in info.function.signature.ret_types.iter().enumerate() {
let type_info = registry.get_type(type_id)?;
if type_info.is_builtin() && type_info.is_zst(registry)? {
results.push(entry.argument(idx)?.into());
} else {
let val = entry.extract_value(
context,
location,
function_call_result,
result_types[idx],
idx,
)?;
results.push(val);
}
}
}
None => {
// Returned data is simple.
let mut count = 0;
for (idx, type_id) in info.function.signature.ret_types.iter().enumerate() {
let type_info = registry.get_type(type_id)?;
native_assert!(
!type_info.is_memory_allocated(registry)?,
"if there is no return pointer, return data must not be memory allocated"
);
if type_info.is_builtin() && type_info.is_zst(registry)? {
results.push(entry.argument(idx)?.into());
} else {
let value = entry.extract_value(
context,
location,
function_call_result,
result_types[count],
count,
)?;
count += 1;
results.push(value);
}
}
}
}
helper.br(entry, 0, &results, location)?;
}
if let Some(tailrec_meta) = tailrec_meta {
metadata.insert(tailrec_meta);
}
Ok(())
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/int.rs | src/libfuncs/int.rs | use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
execution_result::BITWISE_BUILTIN_SIZE,
libfuncs::{increment_builtin_counter, increment_builtin_counter_by},
metadata::MetadataStorage,
native_panic,
types::TypeBuilder,
utils::{ProgramRegistryExt, PRIME},
};
use cairo_lang_sierra::{
extensions::{
bounded_int::BoundedIntDivRemAlgorithm,
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
int::{
signed::{SintConcrete, SintTraits},
signed128::Sint128Concrete,
unsigned::{UintConcrete, UintTraits},
unsigned128::Uint128Concrete,
IntConstConcreteLibfunc, IntMulTraits, IntOperationConcreteLibfunc, IntOperator,
IntTraits,
},
is_zero::IsZeroTraits,
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf, llvm,
ods::{self, math},
scf,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::IntegerAttribute, operation::OperationBuilder, r#type::IntegerType, Block,
BlockLike, Location, Region, ValueLike,
},
Context,
};
use num_bigint::{BigInt, Sign};
use num_traits::Zero;
pub fn build_unsigned<'ctx, 'this, T>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &UintConcrete<T>,
) -> Result<()>
where
T: IntMulTraits + IsZeroTraits + UintTraits,
{
match selector {
UintConcrete::Bitwise(info) => {
build_bitwise(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::Const(info) => {
build_const(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::Divmod(info) => {
build_divmod(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::Equal(info) => {
build_equal(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::FromFelt252(info) => {
build_from_felt252(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::IsZero(info) => {
build_is_zero(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::Operation(info) => {
build_operation(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::SquareRoot(info) => {
build_square_root(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::ToFelt252(info) => {
build_to_felt252(context, registry, entry, location, helper, metadata, info)
}
UintConcrete::WideMul(info) => {
build_wide_mul(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_signed<'ctx, 'this, T>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &SintConcrete<T>,
) -> Result<()>
where
T: IntMulTraits + SintTraits,
{
match selector {
SintConcrete::Const(info) => {
build_const(context, registry, entry, location, helper, metadata, info)
}
SintConcrete::Diff(info) => {
build_diff(context, registry, entry, location, helper, metadata, info)
}
SintConcrete::Equal(info) => {
build_equal(context, registry, entry, location, helper, metadata, info)
}
SintConcrete::FromFelt252(info) => {
build_from_felt252(context, registry, entry, location, helper, metadata, info)
}
SintConcrete::Operation(info) => {
build_operation(context, registry, entry, location, helper, metadata, info)
}
SintConcrete::ToFelt252(info) => {
build_to_felt252(context, registry, entry, location, helper, metadata, info)
}
SintConcrete::WideMul(info) => {
build_wide_mul(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_u128<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Uint128Concrete,
) -> Result<()> {
match selector {
Uint128Concrete::Bitwise(info) => {
build_bitwise(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::ByteReverse(info) => {
build_byte_reverse(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::Const(info) => {
build_const(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::Divmod(info) => {
build_divmod(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::Equal(info) => {
build_equal(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::FromFelt252(info) => {
build_u128s_from_felt252(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::GuaranteeMul(info) => {
build_guarantee_mul(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::IsZero(info) => {
build_is_zero(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::MulGuaranteeVerify(info) => {
build_mul_guarantee_verify(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::Operation(info) => {
build_operation(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::SquareRoot(info) => {
build_square_root(context, registry, entry, location, helper, metadata, info)
}
Uint128Concrete::ToFelt252(info) => {
build_to_felt252(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_i128<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Sint128Concrete,
) -> Result<()> {
match selector {
Sint128Concrete::Const(info) => {
build_const(context, registry, entry, location, helper, metadata, info)
}
Sint128Concrete::Diff(info) => {
build_diff(context, registry, entry, location, helper, metadata, info)
}
Sint128Concrete::Equal(info) => {
build_equal(context, registry, entry, location, helper, metadata, info)
}
Sint128Concrete::FromFelt252(info) => {
build_from_felt252(context, registry, entry, location, helper, metadata, info)
}
Sint128Concrete::Operation(info) => {
build_operation(context, registry, entry, location, helper, metadata, info)
}
Sint128Concrete::ToFelt252(info) => {
build_to_felt252(context, registry, entry, location, helper, metadata, info)
}
}
}
fn build_bitwise<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let bitwise = super::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
BITWISE_BUILTIN_SIZE,
)?;
let lhs = entry.arg(1)?;
let rhs = entry.arg(2)?;
let logical_and = entry.append_op_result(arith::andi(lhs, rhs, location))?;
let logical_xor = entry.append_op_result(arith::xori(lhs, rhs, location))?;
let logical_or = entry.append_op_result(arith::ori(lhs, rhs, location))?;
helper.br(
entry,
0,
&[bitwise, logical_and, logical_xor, logical_or],
location,
)
}
fn build_byte_reverse<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let bitwise = super::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
4 * BITWISE_BUILTIN_SIZE,
)?;
let value =
entry.append_op_result(ods::llvm::intr_bswap(context, entry.arg(1)?, location).into())?;
helper.br(entry, 0, &[bitwise, value], location)
}
fn build_const<'ctx, 'this, T>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &IntConstConcreteLibfunc<T>,
) -> Result<()>
where
T: IntTraits,
{
let value_ty = registry.build_type(
context,
helper,
metadata,
&info.signature.branch_signatures[0].vars[0].ty,
)?;
let constant: BigInt = info.c.into();
let value = entry.const_int_from_type(context, location, constant, value_ty)?;
helper.br(entry, 0, &[value], location)
}
fn build_diff<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let range_check = super::increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let lhs = entry.arg(1)?;
let rhs = entry.arg(2)?;
let is_greater_equal = entry.cmpi(context, CmpiPredicate::Sge, lhs, rhs, location)?;
let value_difference = entry.append_op_result(arith::subi(lhs, rhs, location))?;
helper.cond_br(
context,
entry,
is_greater_equal,
[0, 1],
[&[range_check, value_difference]; 2],
location,
)
}
fn build_divmod<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let lhs = entry.arg(1)?;
let rhs = entry.arg(2)?;
// Extract the ranges for the calculation of the range_check builtin increment.
let lhs_ty = registry.get_type(&info.param_signatures()[1].ty)?;
let rhs_ty = registry.get_type(&info.param_signatures()[2].ty)?;
let lhs_range = lhs_ty.integer_range(registry)?;
let rhs_range = rhs_ty.integer_range(registry)?;
let div_rem_algorithm = BoundedIntDivRemAlgorithm::try_new(&lhs_range, &rhs_range)
.to_native_assert_error(&format!(
"div_rem of ranges: lhs = {:#?} and rhs= {:#?} is not supported yet",
&lhs_range, &rhs_range
))?;
// The sierra-to-casm compiler uses the range check builtin 3 times if the algorithm
// is KnownSmallRhs. Otherwise it is used 4 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned.rs#L151C1-L155C11
let range_check = match div_rem_algorithm {
BoundedIntDivRemAlgorithm::KnownSmallRhs => {
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 3)?
}
BoundedIntDivRemAlgorithm::KnownSmallQuotient { .. }
| BoundedIntDivRemAlgorithm::KnownSmallLhs { .. } => {
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 4)?
}
};
let result_div = entry.append_op_result(arith::divui(lhs, rhs, location))?;
let result_rem = entry.append_op_result(arith::remui(lhs, rhs, location))?;
helper.br(entry, 0, &[range_check, result_div, result_rem], location)
}
fn build_equal<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let are_equal = entry.cmpi(
context,
CmpiPredicate::Eq,
entry.arg(0)?,
entry.arg(1)?,
location,
)?;
helper.cond_br(context, entry, are_equal, [1, 0], [&[]; 2], location)
}
fn build_from_felt252<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let value_ty = registry.get_type(&info.signature.branch_signatures[0].vars[1].ty)?;
let threshold = value_ty.integer_range(registry)?;
let threshold_size = threshold.size();
let value_ty = value_ty.build(
context,
helper,
registry,
metadata,
&info.signature.branch_signatures[0].vars[1].ty,
)?;
let input = entry.arg(1)?;
// Handle signedness separately.
let (is_in_range, value) = if threshold.lower.is_zero() {
let upper_threshold =
entry.const_int_from_type(context, location, threshold.upper, input.r#type())?;
let is_in_range = entry.cmpi(
context,
CmpiPredicate::Ult,
input,
upper_threshold,
location,
)?;
(is_in_range, input)
} else {
let lower_threshold = entry.const_int_from_type(
context,
location,
if threshold.lower.sign() == Sign::Minus {
&*PRIME - threshold.lower.magnitude()
} else {
threshold.lower.magnitude().clone()
},
input.r#type(),
)?;
let upper_threshold = entry.const_int_from_type(
context,
location,
if threshold.upper.sign() == Sign::Minus {
&*PRIME - threshold.upper.magnitude()
} else {
threshold.upper.magnitude().clone()
},
input.r#type(),
)?;
let lower_check = entry.cmpi(
context,
CmpiPredicate::Sge,
input,
lower_threshold,
location,
)?;
let upper_check = entry.cmpi(
context,
CmpiPredicate::Slt,
input,
upper_threshold,
location,
)?;
let is_in_range =
entry.append_op_result(arith::andi(lower_check, upper_check, location))?;
let k0 = entry.const_int_from_type(context, location, 0, input.r#type())?;
let is_negative = entry.cmpi(context, CmpiPredicate::Slt, input, k0, location)?;
let value = entry.append_op_result(scf::r#if(
is_negative,
&[input.r#type()],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let prime = block.const_int_from_type(
context,
location,
BigInt::from_biguint(Sign::Plus, PRIME.clone()),
input.r#type(),
)?;
let value = block.append_op_result(arith::subi(input, prime, location))?;
block.append_operation(scf::r#yield(&[value], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(&[input], location));
region
},
location,
))?;
(is_in_range, value)
};
// The sierra-to-casm compiler uses the range check builtin a total of:
// - 3 times if the value is not within the range.
// - 2 times if the value is within the range and the size of
// the range is less than the size of the range check.
// - 1 time if the value is within the range and the size of
// the range is greater than or equal to the size of the range check.
// With the range check size being 2**128
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/range_reduction.rs#L26
let rc_size = BigInt::from(1) << 128;
let range_check = super::increment_builtin_counter_conditionally_by(
context,
entry,
location,
entry.arg(0)?,
if threshold_size < rc_size { 2 } else { 1 },
3,
is_in_range,
)?;
let value = entry.trunci(value, value_ty, location)?;
helper.cond_br(
context,
entry,
is_in_range,
[0, 1],
[&[range_check, value], &[range_check]],
location,
)
}
fn build_guarantee_mul<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let guarantee_ty = registry.build_type(
context,
helper,
metadata,
&info.signature.branch_signatures[0].vars[2].ty,
)?;
let mul_op = entry.append_operation(arith::mului_extended(
entry.arg(0)?,
entry.arg(1)?,
location,
));
let lo = mul_op.result(0)?.into();
let hi = mul_op.result(1)?.into();
let guarantee = entry.append_op_result(llvm::undef(guarantee_ty, location))?;
helper.br(entry, 0, &[hi, lo, guarantee], location)
}
fn build_is_zero<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let input = entry.arg(0)?;
let k0 = entry.const_int_from_type(context, location, 0, input.r#type())?;
let is_zero = entry.cmpi(context, CmpiPredicate::Eq, input, k0, location)?;
helper.cond_br(context, entry, is_zero, [0, 1], [&[], &[input]], location)
}
fn build_mul_guarantee_verify<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 9 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned128.rs?plain=1#L112
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 9)?;
helper.br(entry, 0, &[range_check], location)
}
fn build_operation<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &IntOperationConcreteLibfunc,
) -> Result<()> {
// Regardless of the operation range, the range check builtin pointer is always increased at least once.
// * for signed ints: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/signed.rs#L68
// * for signed128: behaves the same as the signed ints case.
// * for unsinged ints:
// * for overflowing add: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned.rs#L19
// * for overflowing sub: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/mod.rs#L67
// * for unsigned128:
// * for overflowing add: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned128.rs#L45
// * for overflowing sub: https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/mod.rs#L104
let range_check = increment_builtin_counter(context, entry, location, entry.arg(0)?)?;
let value_ty = registry.get_type(&info.signature.param_signatures[1].ty)?;
let value_range = value_ty.integer_range(registry)?;
let is_signed = !value_range.lower.is_zero();
let value_ty = value_ty.build(
context,
helper,
registry,
metadata,
&info.signature.param_signatures[1].ty,
)?;
let op_name = match (is_signed, info.operator) {
(false, IntOperator::OverflowingAdd) => "llvm.intr.uadd.with.overflow",
(false, IntOperator::OverflowingSub) => "llvm.intr.usub.with.overflow",
(true, IntOperator::OverflowingAdd) => "llvm.intr.sadd.with.overflow",
(true, IntOperator::OverflowingSub) => "llvm.intr.ssub.with.overflow",
};
let result_with_overflow = entry.append_op_result(
OperationBuilder::new(op_name, location)
.add_operands(&[entry.arg(1)?, entry.arg(2)?])
.add_results(&[llvm::r#type::r#struct(
context,
&[value_ty, IntegerType::new(context, 1).into()],
false,
)])
.build()?,
)?;
let result = entry.extract_value(context, location, result_with_overflow, value_ty, 0)?;
let overflow = entry.extract_value(
context,
location,
result_with_overflow,
IntegerType::new(context, 1).into(),
1,
)?;
if is_signed {
let block_in_range = helper.append_block(Block::new(&[]));
let block_overflow = helper.append_block(Block::new(&[]));
entry.append_operation(cf::cond_br(
context,
overflow,
block_overflow,
block_in_range,
&[],
&[],
location,
));
{
let is_not_i128 = !(value_range.lower == i128::MIN.into()
&& value_range.upper - 1 == i128::MAX.into());
// if we are not handling an i128 and the in_range condition is met, increase the range check builtin by 1:
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/signed.rs#L105
let range_check = if is_not_i128 {
increment_builtin_counter_by(context, block_in_range, location, range_check, 1)?
} else {
range_check
};
helper.br(block_in_range, 0, &[range_check, result], location)?;
}
{
let k0 = block_overflow.const_int_from_type(context, location, 0, result.r#type())?;
let is_positive =
block_overflow.cmpi(context, CmpiPredicate::Sge, result, k0, location)?;
helper.cond_br(
context,
block_overflow,
is_positive,
[1, 2],
[&[range_check, result]; 2],
location,
)
}
} else {
helper.cond_br(
context,
entry,
overflow,
[1, 0],
[&[range_check, result]; 2],
location,
)
}
}
fn build_square_root<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
_metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range_check builtin 4 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned.rs#L73
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 4)?;
let input = entry.arg(1)?;
let (input_bits, value_bits) =
match registry.get_type(&info.signature.param_signatures[1].ty)? {
CoreTypeConcrete::Uint8(_) => (8, 8),
CoreTypeConcrete::Uint16(_) => (16, 8),
CoreTypeConcrete::Uint32(_) => (32, 16),
CoreTypeConcrete::Uint64(_) => (64, 32),
CoreTypeConcrete::Uint128(_) => (128, 64),
_ => native_panic!("invalid value type in int square root"),
};
let k1 = entry.const_int(context, location, 1, input_bits)?;
let is_small = entry.cmpi(context, CmpiPredicate::Ule, input, k1, location)?;
let value = entry.append_op_result(scf::r#if(
is_small,
&[IntegerType::new(context, value_bits).into()],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let value = block.trunci(
input,
IntegerType::new(context, value_bits).into(),
location,
)?;
block.append_operation(scf::r#yield(&[value], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let leading_zeros = block.append_op_result(
ods::llvm::intr_ctlz(
context,
IntegerType::new(context, input_bits).into(),
input,
IntegerAttribute::new(IntegerType::new(context, 1).into(), 1),
location,
)
.into(),
)?;
let k_bits = block.const_int(context, location, input_bits, input_bits)?;
let num_bits = block.append_op_result(arith::subi(k_bits, leading_zeros, location))?;
let shift_amount = block.addi(num_bits, k1, location)?;
let parity_mask = block.const_int(context, location, -2, input_bits)?;
let shift_amount =
block.append_op_result(arith::andi(shift_amount, parity_mask, location))?;
let k0 = block.const_int(context, location, 0, input_bits)?;
let value = block.append_op_result(scf::r#while(
&[k0, shift_amount],
&[
IntegerType::new(context, input_bits).into(),
IntegerType::new(context, input_bits).into(),
],
{
let region = Region::new();
let block = region.append_block(Block::new(&[
(IntegerType::new(context, input_bits).into(), location),
(IntegerType::new(context, input_bits).into(), location),
]));
let value = block.shli(block.arg(0)?, k1, location)?;
let large_candidate =
block.append_op_result(arith::xori(value, k1, location))?;
let large_candidate_squared =
block.muli(large_candidate, large_candidate, location)?;
let threshold = block.shrui(input, block.arg(1)?, location)?;
let threshold_is_poison =
block.cmpi(context, CmpiPredicate::Eq, block.arg(1)?, k_bits, location)?;
let threshold = block.append_op_result(arith::select(
threshold_is_poison,
k0,
threshold,
location,
))?;
let is_in_range = block.cmpi(
context,
CmpiPredicate::Ule,
large_candidate_squared,
threshold,
location,
)?;
let value = block.append_op_result(arith::select(
is_in_range,
large_candidate,
value,
location,
))?;
let k2 = block.const_int(context, location, 2, input_bits)?;
let shift_amount =
block.append_op_result(arith::subi(block.arg(1)?, k2, location))?;
let should_continue =
block.cmpi(context, CmpiPredicate::Sge, shift_amount, k0, location)?;
block.append_operation(scf::condition(
should_continue,
&[value, shift_amount],
location,
));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[
(IntegerType::new(context, input_bits).into(), location),
(IntegerType::new(context, input_bits).into(), location),
]));
block.append_operation(scf::r#yield(&[block.arg(0)?, block.arg(1)?], location));
region
},
location,
))?;
let value = if input_bits == value_bits {
value
} else {
block.trunci(
value,
IntegerType::new(context, value_bits).into(),
location,
)?
};
block.append_operation(scf::r#yield(&[value], location));
region
},
location,
))?;
helper.br(entry, 0, &[range_check, value], location)
}
fn build_to_felt252<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let value_ty = registry.get_type(&info.signature.param_signatures[0].ty)?;
let is_signed = !value_ty.integer_range(registry)?.lower.is_zero();
let felt252_ty = registry.build_type(
context,
helper,
metadata,
&info.signature.branch_signatures[0].vars[0].ty,
)?;
let value = if is_signed {
let prime = entry.const_int_from_type(
context,
location,
BigInt::from_biguint(Sign::Plus, PRIME.clone()),
felt252_ty,
)?;
let k0 = entry.const_int_from_type(
context,
location,
0,
value_ty.build(
context,
helper,
registry,
metadata,
&info.signature.param_signatures[0].ty,
)?,
)?;
let is_negative = entry.cmpi(context, CmpiPredicate::Slt, entry.arg(0)?, k0, location)?;
let value = entry.extui(entry.arg(0)?, felt252_ty, location)?;
let neg_value =
entry.append_op_result(math::absi(context, entry.arg(0)?, location).into())?;
let neg_value = entry.extui(neg_value, felt252_ty, location)?;
let neg_value = entry.append_op_result(arith::subi(prime, neg_value, location))?;
entry.append_op_result(arith::select(is_negative, neg_value, value, location))?
} else {
entry.extui(entry.arg(0)?, felt252_ty, location)?
};
helper.br(entry, 0, &[value], location)
}
fn build_u128s_from_felt252<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/felt252_dict.rs | src/libfuncs/felt252_dict.rs | //! # `Felt` dictionary libfuncs
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
execution_result::SEGMENT_ARENA_BUILTIN_SIZE,
metadata::{
felt252_dict::Felt252DictOverrides, runtime_bindings::RuntimeBindingsMeta, MetadataStorage,
},
native_panic,
types::TypeBuilder,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
felt252_dict::Felt252DictConcreteLibfunc,
lib_func::SignatureOnlyConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{llvm, ods},
helpers::{BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Felt252DictConcreteLibfunc,
) -> Result<()> {
match selector {
Felt252DictConcreteLibfunc::New(info) => {
build_new(context, registry, entry, location, helper, metadata, info)
}
Felt252DictConcreteLibfunc::Squash(info) => {
build_squash(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_new<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// We increase the segment arena builtin by 1 usage.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L45-L49
let segment_arena = super::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
SEGMENT_ARENA_BUILTIN_SIZE,
)?;
let value_type_id = match registry.get_type(&info.signature.branch_signatures[0].vars[1].ty)? {
CoreTypeConcrete::Felt252Dict(info) => &info.ty,
_ => native_panic!("entered unreachable code"),
};
let drop_fn = {
let mut dict_overrides = metadata
.remove::<Felt252DictOverrides>()
.unwrap_or_default();
let drop_fn = match dict_overrides.build_drop_fn(
context,
helper,
registry,
metadata,
value_type_id,
)? {
Some(drop_fn_symbol) => Some(
entry.append_op_result(
ods::llvm::mlir_addressof(
context,
llvm::r#type::pointer(context, 0),
drop_fn_symbol,
location,
)
.into(),
)?,
),
None => None,
};
metadata.insert(dict_overrides);
drop_fn
};
let runtime_bindings = metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
let dict_ptr = runtime_bindings.dict_new(
context,
helper,
entry,
location,
drop_fn,
registry.get_type(value_type_id)?.layout(registry)?,
)?;
helper.br(entry, 0, &[segment_arena, dict_ptr], location)
}
pub fn build_squash<'ctx, 'this>(
context: &'ctx Context,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
_info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let range_check = entry.arg(0)?;
let gas = entry.arg(1)?;
let segment_arena = entry.arg(2)?;
let dict_ptr = entry.arg(3)?;
// Increase the segment arena builtin by 1 usage.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/felt252_dict.rs?plain=1#L148-L151
let segment_arena = super::increment_builtin_counter_by(
context,
entry,
location,
segment_arena,
SEGMENT_ARENA_BUILTIN_SIZE,
)?;
let runtime_bindings = metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
let range_check_ptr =
entry.alloca1(context, location, IntegerType::new(context, 64).into(), 0)?;
entry.store(context, location, range_check_ptr, range_check)?;
let gas_ptr = entry.alloca1(context, location, IntegerType::new(context, 64).into(), 0)?;
entry.store(context, location, gas_ptr, gas)?;
runtime_bindings.dict_squash(
context,
helper,
entry,
dict_ptr,
range_check_ptr,
gas_ptr,
location,
)?;
let range_check = entry.load(
context,
location,
range_check_ptr,
IntegerType::new(context, 64).into(),
)?;
let gas_builtin = entry.load(
context,
location,
gas_ptr,
IntegerType::new(context, 64).into(),
)?;
helper.br(
entry,
0,
&[range_check, gas_builtin, segment_arena, entry.arg(3)?],
location,
)
}
#[cfg(test)]
mod test {
use crate::{
jit_dict, jit_enum, jit_struct, load_cairo,
utils::testing::{run_program, run_program_assert_output},
values::Value,
};
#[test]
fn run_dict_new() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() {
let mut _dict: Felt252Dict<u32> = Default::default();
}
);
run_program_assert_output(&program, "run_test", &[], jit_struct!());
}
#[test]
fn run_dict_insert() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() -> u32 {
let mut dict: Felt252Dict<u32> = Default::default();
dict.insert(2, 1_u32);
dict.get(2)
}
);
run_program_assert_output(&program, "run_test", &[], 1u32.into());
}
#[test]
fn run_dict_insert_ret_dict() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test() -> Felt252Dict<u32> {
let mut dict: Felt252Dict<u32> = Default::default();
dict.insert(1, 2_u32);
dict.insert(2, 3_u32);
dict.insert(3, 4_u32);
dict.insert(4, 5_u32);
dict.insert(5, 6_u32);
dict
}
);
run_program_assert_output(
&program,
"run_test",
&[],
jit_dict!(
1 => 2u32,
2 => 3u32,
3 => 4u32,
4 => 5u32,
5 => 6u32,
),
);
}
#[test]
fn run_dict_deserialize() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test(mut dict: Felt252Dict<u32>) -> Felt252Dict<u32> {
dict
}
);
run_program_assert_output(
&program,
"run_test",
&[jit_dict!(
1 => 2u32,
2 => 3u32,
3 => 4u32,
4 => 5u32,
5 => 6u32,
)],
jit_dict!(
1 => 2u32,
2 => 3u32,
3 => 4u32,
4 => 5u32,
5 => 6u32,
),
);
}
#[test]
fn run_dict_deserialize2() {
let program = load_cairo!(
use traits::Default;
use dict::Felt252DictTrait;
fn run_test(mut dict: Felt252Dict<u32>) -> (felt252, Felt252Dict<u32>) {
(0, dict)
}
);
run_program_assert_output(
&program,
"run_test",
&[jit_dict!(
1 => 2u32,
2 => 3u32,
3 => 4u32,
4 => 5u32,
5 => 6u32,
)],
jit_struct!(
Value::Felt252(0.into()),
jit_dict!(
1 => 2u32,
2 => 3u32,
3 => 4u32,
4 => 5u32,
5 => 6u32,
)
),
);
}
#[test]
fn run_dict_deserialize_struct() {
let program = load_cairo! {
use core::{dict::Felt252DictTrait, nullable::Nullable};
fn run_test() -> Felt252Dict<Nullable<(u32, u64, u128)>> {
let mut x: Felt252Dict<Nullable<(u32, u64, u128)>> = Default::default();
x.insert(0, NullableTrait::new((1_u32, 2_u64, 3_u128)));
x.insert(1, NullableTrait::new((2_u32, 3_u64, 4_u128)));
x.insert(2, NullableTrait::new((3_u32, 4_u64, 5_u128)));
x
}
};
run_program_assert_output(
&program,
"run_test",
&[],
jit_dict!(
0 => jit_struct!(1u32.into(), 2u64.into(), 3u128.into()),
1 => jit_struct!(2u32.into(), 3u64.into(), 4u128.into()),
2 => jit_struct!(3u32.into(), 4u64.into(), 5u128.into()),
),
);
}
#[test]
fn run_dict_deserialize_enum() {
let program = load_cairo! {
use core::{dict::Felt252DictTrait, nullable::Nullable};
#[derive(Drop)]
enum MyEnum {
A: u32,
B: u64,
C: u128,
}
fn run_test() -> Felt252Dict<Nullable<MyEnum>> {
let mut x: Felt252Dict<Nullable<MyEnum>> = Default::default();
x.insert(0, NullableTrait::new(MyEnum::A(1)));
x.insert(1, NullableTrait::new(MyEnum::B(2)));
x.insert(2, NullableTrait::new(MyEnum::C(3)));
x
}
};
run_program_assert_output(
&program,
"run_test",
&[],
jit_dict!(
0 => jit_enum!(0, 1u32.into()),
1 => jit_enum!(1, 2u64.into()),
2 => jit_enum!(2, 3u128.into()),
),
);
}
#[test]
fn run_dict_squash() {
let program = load_cairo! {
use core::dict::{Felt252Dict, Felt252DictEntryTrait, SquashedFelt252DictImpl};
pub fn main() {
// The squash libfunc has a fixed range check cost of 2.
// If no big keys, 3 per unique key access.
let mut dict: Felt252Dict<felt252> = Default::default();
dict.insert(1, 1); // 3
dict.insert(2, 2); // 3
dict.insert(3, 3); // 3
dict.insert(4, 4); // 3
dict.insert(5, 4); // 3
dict.insert(6, 4); // 3
let _ = dict.squash(); // 2
// SUBTOTAL: 20
// A dictionary has big keys if there is at least one key greater than
// the range check bound (2**128 - 1).
// If has big keys, 2 for first unique key access,
// and 6 each of the remaining unique key accesses.
let mut dict: Felt252Dict<felt252> = Default::default();
dict.insert(1, 1); // 2
dict.insert(0xF00000000000000000000000000000002, 1); // 6
dict.insert(3, 1); // 6
dict.insert(0xF00000000000000000000000000000004, 1); // 6
dict.insert(5, 1); // 6
dict.insert(0xF00000000000000000000000000000006, 1); // 6
dict.insert(7, 1); // 6
let _ = dict.squash(); // 2
// SUBTOTAL: 40
// If no big keys, 3 per unique key access.
// Each repeated key adds an extra range check usage.
let mut dict: Felt252Dict<felt252> = Default::default();
dict.insert(1, 1); // 3
dict.insert(2, 1); // 3
dict.insert(3, 1); // 3
dict.insert(4, 1); // 3
dict.insert(1, 1); // 1
dict.insert(2, 1); // 1
dict.insert(1, 1); // 1
dict.insert(2, 1); // 1
dict.insert(1, 1); // 1
dict.insert(2, 1); // 1
let _ = dict.squash(); // 2
// SUBTOTAL: 20
// If has big keys, 2 for first unique key access,
// and 6 each of the remaining unique key accesses.
// Each repeated key access adds an extra range check usage.
let mut dict: Felt252Dict<felt252> = Default::default();
dict.insert(1, 1); // 2
dict.insert(0xF00000000000000000000000000000002, 1); // 6
dict.insert(1, 1); // 1
dict.insert(0xF00000000000000000000000000000002, 1); // 1
dict.insert(1, 1); // 1
dict.insert(0xF00000000000000000000000000000002, 1); // 1
dict.insert(1, 1); // 1
dict.insert(0xF00000000000000000000000000000002, 1); // 1
dict.insert(1, 1); // 1
dict.insert(0xF00000000000000000000000000000002, 1); // 1
dict.insert(1, 1); // 1
dict.insert(0xF00000000000000000000000000000002, 1); // 1
let _ = dict.squash(); // 2
// SUBTOTAL: 20
// TOTAL: 100
}
};
let result = run_program(&program, "main", &[]);
assert_eq!(result.builtin_stats.range_check, 100);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/poseidon.rs | src/libfuncs/poseidon.rs | //! # Poseidon hashing libfuncs
//!
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Result},
execution_result::POSEIDON_BUILTIN_SIZE,
metadata::{runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
utils::{get_integer_layout, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
poseidon::PoseidonConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::ods,
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &PoseidonConcreteLibfunc,
) -> Result<()> {
match selector {
PoseidonConcreteLibfunc::HadesPermutation(info) => {
build_hades_permutation(context, registry, entry, location, helper, metadata, info)
}
}
}
pub fn build_hades_permutation<'ctx>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'ctx Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, '_>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
// The sierra-to-casm compiler uses the poseidon builtin a total of 1 time.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/poseidon.rs?plain=1#L19
let poseidon_builtin = super::increment_builtin_counter_by(
context,
entry,
location,
entry.arg(0)?,
POSEIDON_BUILTIN_SIZE,
)?;
let felt252_ty =
registry.build_type(context, helper, metadata, &info.param_signatures()[1].ty)?;
let i256_ty = IntegerType::new(context, 256).into();
let layout_i256 = get_integer_layout(256);
let op0 = entry.arg(1)?;
let op1 = entry.arg(2)?;
let op2 = entry.arg(3)?;
// We must extend to i256 because bswap must be an even number of bytes.
let op0_ptr = helper
.init_block()
.alloca1(context, location, i256_ty, layout_i256.align())?;
let op1_ptr = helper
.init_block()
.alloca1(context, location, i256_ty, layout_i256.align())?;
let op2_ptr = helper
.init_block()
.alloca1(context, location, i256_ty, layout_i256.align())?;
let op0_i256 =
entry.append_op_result(ods::arith::extui(context, i256_ty, op0, location).into())?;
let op1_i256 =
entry.append_op_result(ods::arith::extui(context, i256_ty, op1, location).into())?;
let op2_i256 =
entry.append_op_result(ods::arith::extui(context, i256_ty, op2, location).into())?;
entry.store(context, location, op0_ptr, op0_i256)?;
entry.store(context, location, op1_ptr, op1_i256)?;
entry.store(context, location, op2_ptr, op2_i256)?;
let runtime_bindings = metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime library should be available")?;
runtime_bindings
.libfunc_hades_permutation(context, helper, entry, op0_ptr, op1_ptr, op2_ptr, location)?;
let op0_i256 = entry.load(context, location, op0_ptr, i256_ty)?;
let op1_i256 = entry.load(context, location, op1_ptr, i256_ty)?;
let op2_i256 = entry.load(context, location, op2_ptr, i256_ty)?;
let op0 = entry.trunci(op0_i256, felt252_ty, location)?;
let op1 = entry.trunci(op1_i256, felt252_ty, location)?;
let op2 = entry.trunci(op2_i256, felt252_ty, location)?;
helper.br(entry, 0, &[poseidon_builtin, op0, op1, op2], location)
}
#[cfg(test)]
mod test {
use crate::{jit_struct, load_cairo, utils::testing::run_program_assert_output};
use starknet_types_core::felt::Felt;
#[test]
fn run_hades_permutation() {
let program = load_cairo!(
use core::poseidon::hades_permutation;
fn run_test(a: felt252, b: felt252, c: felt252) -> (felt252, felt252, felt252) {
hades_permutation(a, b, c)
}
);
run_program_assert_output(
&program,
"run_test",
&[
Felt::from(2).into(),
Felt::from(4).into(),
Felt::from(4).into(),
],
jit_struct!(
Felt::from_dec_str(
"1627044480024625333712068603977073585655327747658231320998869768849911913066"
)
.unwrap()
.into(),
Felt::from_dec_str(
"2368195581807763724810563135784547417602556730014791322540110420941926079965"
)
.unwrap()
.into(),
Felt::from_dec_str(
"2381325839211954898363395375151559373051496038592329842107874845056395867189"
)
.unwrap()
.into(),
),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/uint512.rs | src/libfuncs/uint512.rs | //! # `u512`-related libfuncs
use super::LibfuncHelper;
use crate::{error::Result, metadata::MetadataStorage, utils::ProgramRegistryExt};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
int::unsigned512::Uint512Concrete,
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{arith, llvm},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, Location, Value},
Context,
};
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Uint512Concrete,
) -> Result<()> {
match selector {
Uint512Concrete::DivModU256(info) => {
build_divmod_u256(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `u512_safe_divmod_by_u256` libfunc.
pub fn build_divmod_u256<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// The sierra-to-casm compiler uses the range check builtin a total of 12 times.
// https://github.com/starkware-libs/cairo/blob/v2.12.0-dev.1/crates/cairo-lang-sierra-to-casm/src/invocations/int/unsigned512.rs?plain=1#L23
let range_check =
super::increment_builtin_counter_by(context, entry, location, entry.arg(0)?, 12)?;
let i128_ty = IntegerType::new(context, 128).into();
let i512_ty = IntegerType::new(context, 512).into();
let guarantee_type =
registry.build_type(context, helper, metadata, &info.output_types()[0][3])?;
let lhs_struct: Value = entry.arg(1)?;
let rhs_struct: Value = entry.arg(2)?;
let dividend = (
entry.extract_value(context, location, lhs_struct, i128_ty, 0)?,
entry.extract_value(context, location, lhs_struct, i128_ty, 1)?,
entry.extract_value(context, location, lhs_struct, i128_ty, 2)?,
entry.extract_value(context, location, lhs_struct, i128_ty, 3)?,
);
let divisor = (
entry.extract_value(context, location, rhs_struct, i128_ty, 0)?,
entry.extract_value(context, location, rhs_struct, i128_ty, 1)?,
);
let dividend = (
entry.extui(dividend.0, i512_ty, location)?,
entry.extui(dividend.1, i512_ty, location)?,
entry.extui(dividend.2, i512_ty, location)?,
entry.extui(dividend.3, i512_ty, location)?,
);
let divisor = (
entry.extui(divisor.0, i512_ty, location)?,
entry.extui(divisor.1, i512_ty, location)?,
);
let k128 = entry.const_int_from_type(context, location, 128, i512_ty)?;
let k256 = entry.const_int_from_type(context, location, 256, i512_ty)?;
let k384 = entry.const_int_from_type(context, location, 384, i512_ty)?;
let dividend = (
dividend.0,
entry.shli(dividend.1, k128, location)?,
entry.shli(dividend.2, k256, location)?,
entry.shli(dividend.3, k384, location)?,
);
let divisor = (divisor.0, entry.shli(divisor.1, k128, location)?);
let dividend = {
let lhs_01 = entry.append_op_result(arith::ori(dividend.0, dividend.1, location))?;
let lhs_23 = entry.append_op_result(arith::ori(dividend.2, dividend.3, location))?;
entry.append_op_result(arith::ori(lhs_01, lhs_23, location))?
};
let divisor = entry.append_op_result(arith::ori(divisor.0, divisor.1, location))?;
let result_div = entry.append_op_result(arith::divui(dividend, divisor, location))?;
let result_rem = entry.append_op_result(arith::remui(dividend, divisor, location))?;
let result_div = (
entry.trunci(result_div, i128_ty, location)?,
entry.trunci(entry.shrui(result_div, k128, location)?, i128_ty, location)?,
entry.trunci(entry.shrui(result_div, k256, location)?, i128_ty, location)?,
entry.trunci(entry.shrui(result_div, k384, location)?, i128_ty, location)?,
);
let result_rem = (
entry.trunci(result_rem, i128_ty, location)?,
entry.trunci(entry.shrui(result_rem, k128, location)?, i128_ty, location)?,
);
let result_div_val = entry.append_op_result(llvm::undef(
llvm::r#type::r#struct(context, &[i128_ty, i128_ty, i128_ty, i128_ty], false),
location,
))?;
let result_div_val = entry.insert_values(
context,
location,
result_div_val,
&[result_div.0, result_div.1, result_div.2, result_div.3],
)?;
let result_rem_val = entry.append_op_result(llvm::undef(
llvm::r#type::r#struct(context, &[i128_ty, i128_ty], false),
location,
))?;
let result_rem_val = entry.insert_values(
context,
location,
result_rem_val,
&[result_rem.0, result_rem.1],
)?;
let guarantee = entry.append_op_result(llvm::undef(guarantee_type, location))?;
helper.br(
entry,
0,
&[
range_check,
result_div_val,
result_rem_val,
guarantee,
guarantee,
guarantee,
guarantee,
guarantee,
],
location,
)
}
#[cfg(test)]
mod test {
use crate::{jit_struct, load_cairo, utils::testing::run_program_assert_output, values::Value};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use num_bigint::BigUint;
use num_traits::One;
lazy_static! {
static ref UINT512_DIVMOD_U256: (String, Program) = load_cairo! {
use core::integer::{u512, u512_safe_divmod_by_u256};
fn run_test(lhs: u512, rhs: NonZero<u256>) -> (u512, u256) {
let (lhs, rhs, _, _, _, _, _) = u512_safe_divmod_by_u256(lhs, rhs);
(lhs, rhs)
}
};
}
#[test]
fn u512_safe_divmod_by_u256() {
fn u512(value: BigUint) -> Value {
assert!(value.bits() <= 512);
jit_struct!(
Value::Uint128((&value & &u128::MAX.into()).try_into().unwrap()),
Value::Uint128(((&value >> 128u32) & &u128::MAX.into()).try_into().unwrap()),
Value::Uint128(((&value >> 256u32) & &u128::MAX.into()).try_into().unwrap()),
Value::Uint128(((&value >> 384u32) & &u128::MAX.into()).try_into().unwrap()),
)
}
fn u256(value: BigUint) -> Value {
assert!(value.bits() <= 256);
jit_struct!(
Value::Uint128((&value & &u128::MAX.into()).try_into().unwrap()),
Value::Uint128(((&value >> 128u32) & &u128::MAX.into()).try_into().unwrap()),
)
}
#[track_caller]
fn r2(lhs: BigUint, rhs: BigUint, output_u512: BigUint, output_u256: BigUint) {
let lhs = u512(lhs);
let rhs = u256(rhs);
let output_u512 = u512(output_u512);
let output_u256 = u256(output_u256);
run_program_assert_output(
&UINT512_DIVMOD_U256,
"run_test",
&[lhs, rhs],
jit_struct!(output_u512, output_u256),
);
}
r2(0u32.into(), 1u32.into(), 0u32.into(), 0u32.into());
r2(
0u32.into(),
(BigUint::one() << 256u32) - 2u32,
0u32.into(),
0u32.into(),
);
r2(
0u32.into(),
(BigUint::one() << 256u32) - 1u32,
0u32.into(),
0u32.into(),
);
r2(1u32.into(), 1u32.into(), 1u32.into(), 0u32.into());
r2(
1u32.into(),
(BigUint::one() << 256u32) - 2u32,
0u32.into(),
1u32.into(),
);
r2(
1u32.into(),
(BigUint::one() << 256u32) - 1u32,
0u32.into(),
1u32.into(),
);
r2(
(BigUint::one() << 512u32) - 2u32,
(BigUint::one() << 256u32) - 2u32,
(BigUint::one() << 256) + 2u32,
2u32.into(),
);
r2(
(BigUint::one() << 512u32) - 2u32,
1u32.into(),
(BigUint::one() << 512u32) - 2u32,
0u32.into(),
);
r2(
(BigUint::one() << 512u32) - 2u32,
(BigUint::one() << 256u32) - 2u32,
(BigUint::one() << 256) + 2u32,
2u32.into(),
);
r2(
(BigUint::one() << 512u32) - 2u32,
(BigUint::one() << 256u32) - 1u32,
BigUint::one() << 256u32,
(BigUint::one() << 256u32) - 2u32,
);
r2(
(BigUint::one() << 512u32) - 1u32,
1u32.into(),
(BigUint::one() << 512u32) - 1u32,
0u32.into(),
);
r2(
(BigUint::one() << 512u32) - 1u32,
(BigUint::one() << 256u32) - 2u32,
(BigUint::one() << 256) + 2u32,
3u32.into(),
);
r2(
(BigUint::one() << 512u32) - 1u32,
(BigUint::one() << 256u32) - 1u32,
(BigUint::one() << 256) + 1u32,
0u32.into(),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/enum.rs | src/libfuncs/enum.rs | //! # Enum-related libfuncs
//!
//! Check out [the enum type](crate::types::enum) for more information on enum layouts.
use super::LibfuncHelper;
use crate::{
error::{panic::ToNativeAssertError, Error, Result},
metadata::{enum_snapshot_variants::EnumSnapshotVariantsMeta, MetadataStorage},
native_assert, native_panic,
types::TypeBuilder,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
enm::{EnumConcreteLibfunc, EnumFromBoundedIntConcreteLibfunc, EnumInitConcreteLibfunc},
lib_func::SignatureOnlyConcreteLibfunc,
ConcreteLibfunc,
},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use melior::{
dialect::{arith, cf, llvm, ods},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{DenseI64ArrayAttribute, IntegerAttribute},
r#type::IntegerType,
Block, BlockLike, Location, Value,
},
Context,
};
use std::num::TryFromIntError;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &EnumConcreteLibfunc,
) -> Result<()> {
match selector {
EnumConcreteLibfunc::Init(info) => {
build_init(context, registry, entry, location, helper, metadata, info)
}
EnumConcreteLibfunc::Match(info) => {
build_match(context, registry, entry, location, helper, metadata, info)
}
EnumConcreteLibfunc::SnapshotMatch(info) => {
build_snapshot_match(context, registry, entry, location, helper, metadata, info)
}
EnumConcreteLibfunc::FromBoundedInt(info) => {
build_from_bounded_int(context, registry, entry, location, helper, metadata, info)
}
}
}
/// Generate MLIR operations for the `enum_init` libfunc.
pub fn build_init<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &EnumInitConcreteLibfunc,
) -> Result<()> {
#[cfg(feature = "with-debug-utils")]
if let Some(auto_breakpoint) =
metadata.get::<crate::metadata::auto_breakpoint::AutoBreakpoint>()
{
auto_breakpoint.maybe_breakpoint(
entry,
location,
metadata,
&crate::metadata::auto_breakpoint::BreakpointEvent::EnumInit {
type_id: info.signature.branch_signatures[0].vars[0].ty.clone(),
variant_idx: info.index,
},
)?;
}
let val = build_enum_value(
context,
registry,
entry,
location,
helper,
metadata,
entry.arg(0)?,
&info.branch_signatures()[0].vars[0].ty,
&info.signature.param_signatures[0].ty,
info.index,
)?;
helper.br(entry, 0, &[val], location)
}
#[allow(clippy::too_many_arguments)]
pub fn build_enum_value<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
payload_value: Value<'ctx, 'this>,
enum_type: &ConcreteTypeId,
variant_type: &ConcreteTypeId,
variant_index: usize,
) -> Result<Value<'ctx, 'this>> {
let type_info = registry.get_type(enum_type)?;
let payload_type_info = registry.get_type(variant_type)?;
let (layout, (tag_ty, _), variant_tys) = crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
type_info
.variants()
.to_native_assert_error("found non-enum type where an enum is required")?,
)?;
Ok(match variant_tys.len() {
0 => native_panic!("attempt to initialize a zero-variant enum"),
1 => payload_value,
_ => {
let enum_ty = llvm::r#type::r#struct(
context,
&[
tag_ty,
if payload_type_info.is_zst(registry)? {
llvm::r#type::array(IntegerType::new(context, 8).into(), 0)
} else {
variant_tys[variant_index].0
},
],
false,
);
let tag_val = entry
.append_operation(arith::constant(
context,
IntegerAttribute::new(tag_ty, variant_index.try_into()?).into(),
location,
))
.result(0)?
.into();
let val = entry.append_op_result(llvm::undef(enum_ty, location))?;
let val = entry.insert_value(context, location, val, tag_val, 0)?;
let mut val = if payload_type_info.is_zst(registry)? {
val
} else {
entry.insert_value(context, location, val, payload_value, 1)?
};
if type_info.is_memory_allocated(registry)? {
let stack_ptr = helper.init_block().alloca1(
context,
location,
type_info.build(context, helper, registry, metadata, enum_type)?,
layout.align(),
)?;
// Convert the enum from the concrete variant to the internal representation.
entry.store(context, location, stack_ptr, val)?;
val = entry.load(
context,
location,
stack_ptr,
type_info.build(context, helper, registry, metadata, enum_type)?,
)?;
};
val
}
})
}
/// Generate MLIR operations for the `enum_from_bounded_int` libfunc.
///
/// # Constraints
///
/// - The target `Enum` must contain the same number of empty variants as the number
/// of possible values in the `BoundedInt` range.
/// - The range of the `BoundedInt` must start from **0**.
///
/// # Signature
///
/// ```cairo
/// fn enum_from_bounded_int<T, U>(index: U) -> T nopanic
/// ```
pub fn build_from_bounded_int<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &EnumFromBoundedIntConcreteLibfunc,
) -> Result<()> {
let inp_ty = registry.get_type(&info.param_signatures()[0].ty)?;
let varaint_selector_type: IntegerType = inp_ty
.build(
context,
helper,
registry,
metadata,
&info.param_signatures()[0].ty,
)?
.try_into()?;
let enum_type = registry.get_type(&info.branch_signatures()[0].vars[0].ty)?;
let enum_ty = enum_type.build(
context,
helper,
registry,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
let tag_bits = info.n_variants.next_power_of_two().trailing_zeros();
let tag_type = IntegerType::new(context, tag_bits);
let mut tag_value: Value = entry.arg(0)?;
match tag_type.width().cmp(&varaint_selector_type.width()) {
std::cmp::Ordering::Less => {
tag_value = entry.append_op_result(
ods::llvm::trunc(context, tag_type.into(), tag_value, location).into(),
)?;
}
std::cmp::Ordering::Equal => {}
std::cmp::Ordering::Greater => {
tag_value = entry.append_op_result(
ods::llvm::zext(context, tag_type.into(), tag_value, location).into(),
)?;
}
};
let value = entry.append_op_result(llvm::undef(enum_ty, location))?;
let value = entry.insert_value(context, location, value, tag_value, 0)?;
helper.br(entry, 0, &[value], location)
}
/// Generate MLIR operations for the `enum_match` libfunc.
pub fn build_match<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let type_info = registry.get_type(&info.param_signatures()[0].ty)?;
let variant_ids = type_info
.variants()
.to_native_assert_error("found non-enum type where an enum is required")?;
match variant_ids.len() {
0 => {
// The Cairo compiler will generate an enum match for enums without variants, so this
// case cannot be a compile-time error. We're assuming that even though it's been
// generated, it's just dead code and can be made into an assertion that always fails.
let k0 = entry.const_int(context, location, 0, 1)?;
entry.append_operation(cf::assert(
context,
k0,
"attempt to match a zero-variant enum",
location,
));
entry.append_operation(llvm::unreachable(location));
}
1 => {
helper.br(entry, 0, &[entry.arg(0)?], location)?;
}
_ => {
let (layout, (tag_ty, _), variant_tys) = crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
variant_ids,
)?;
let (stack_ptr, tag_val) = if type_info.is_memory_allocated(registry)? {
let stack_ptr = helper.init_block().alloca1(
context,
location,
type_info.build(
context,
helper,
registry,
metadata,
&info.param_signatures()[0].ty,
)?,
layout.align(),
)?;
entry.store(context, location, stack_ptr, entry.arg(0)?)?;
let tag_val = entry.load(context, location, stack_ptr, tag_ty)?;
(Some(stack_ptr), tag_val)
} else {
let tag_val = entry
.append_operation(llvm::extract_value(
context,
entry.arg(0)?,
DenseI64ArrayAttribute::new(context, &[0]),
tag_ty,
location,
))
.result(0)?
.into();
(None, tag_val)
};
let default_block = helper.append_block(Block::new(&[]));
let variant_blocks = variant_tys
.iter()
.map(|_| helper.append_block(Block::new(&[])))
.collect::<Vec<_>>();
let case_values = (0..variant_tys.len())
.map(i64::try_from)
.collect::<std::result::Result<Vec<_>, TryFromIntError>>()?;
entry.append_operation(cf::switch(
context,
&case_values,
tag_val,
tag_ty,
(default_block, &[]),
&variant_blocks
.iter()
.copied()
.map(|block| (block, [].as_slice()))
.collect::<Vec<_>>(),
location,
)?);
// Default block.
{
let val = default_block
.append_operation(arith::constant(
context,
IntegerAttribute::new(IntegerType::new(context, 1).into(), 0).into(),
location,
))
.result(0)?
.into();
default_block.append_operation(cf::assert(
context,
val,
"Invalid enum tag.",
location,
));
default_block.append_operation(llvm::unreachable(location));
}
// Enum variants.
for (i, (block, (payload_ty, _))) in
variant_blocks.into_iter().zip(variant_tys).enumerate()
{
let enum_ty = llvm::r#type::r#struct(context, &[tag_ty, payload_ty], false);
let payload_val = match stack_ptr {
Some(stack_ptr) => {
let val = block.load(context, location, stack_ptr, enum_ty)?;
block.extract_value(context, location, val, payload_ty, 1)?
}
None => {
// If the enum is not memory-allocated it means that:
// - Either it's a C-style enum and all payloads have the same type.
// - Or the enum only has a single non-memory-allocated variant.
if variant_ids.len() == 1 {
entry.arg(0)?
} else {
native_assert!(
registry.get_type(&variant_ids[i])?.is_zst(registry)?,
"should be zero sized"
);
block
.append_operation(llvm::undef(payload_ty, location))
.result(0)?
.into()
}
}
};
helper.br(block, i, &[payload_val], location)?;
}
}
}
Ok(())
}
/// Generate MLIR operations for the `enum_snapshot_match` libfunc.
pub fn build_snapshot_match<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
let type_info = registry.get_type(&info.param_signatures()[0].ty)?;
// This libfunc's implementation is identical to `enum_match` aside from fetching the snapshotted enum's variants from the metadata:
let variant_ids = metadata
.get::<EnumSnapshotVariantsMeta>()
.ok_or(Error::MissingMetadata)?
.get_variants(&info.param_signatures()[0].ty)
.to_native_assert_error("enum should always have variants")?
.clone();
match variant_ids.len() {
0 => {
// The Cairo compiler will generate an enum match for enums without variants, so this
// case cannot be a compile-time error. We're assuming that even though it's been
// generated, it's just dead code and can be made into an assertion that always fails.
let k0 = entry.const_int(context, location, 0, 1)?;
entry.append_operation(cf::assert(
context,
k0,
"attempt to match a zero-variant enum",
location,
));
entry.append_operation(llvm::unreachable(location));
}
1 => {
helper.br(entry, 0, &[entry.arg(0)?], location)?;
}
_ => {
let (layout, (tag_ty, _), variant_tys) = crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&variant_ids,
)?;
let (stack_ptr, tag_val) = if type_info.is_memory_allocated(registry)? {
let stack_ptr = helper.init_block().alloca1(
context,
location,
type_info.build(
context,
helper,
registry,
metadata,
&info.param_signatures()[0].ty,
)?,
layout.align(),
)?;
entry.store(context, location, stack_ptr, entry.arg(0)?)?;
let tag_val = entry.load(context, location, stack_ptr, tag_ty)?;
(Some(stack_ptr), tag_val)
} else {
let tag_val = entry.extract_value(context, location, entry.arg(0)?, tag_ty, 0)?;
(None, tag_val)
};
let default_block = helper.append_block(Block::new(&[]));
let variant_blocks = variant_tys
.iter()
.map(|_| helper.append_block(Block::new(&[])))
.collect::<Vec<_>>();
let case_values = (0..variant_tys.len())
.map(i64::try_from)
.collect::<std::result::Result<Vec<_>, TryFromIntError>>()?;
entry.append_operation(cf::switch(
context,
&case_values,
tag_val,
tag_ty,
(default_block, &[]),
&variant_blocks
.iter()
.copied()
.map(|block| (block, [].as_slice()))
.collect::<Vec<_>>(),
location,
)?);
// Default block.
{
let val = default_block.const_int(context, location, 0, 1)?;
default_block.append_operation(cf::assert(
context,
val,
"Invalid enum tag.",
location,
));
default_block.append_operation(llvm::unreachable(location));
}
// Enum variants.
for (i, (block, (payload_ty, _))) in
variant_blocks.into_iter().zip(variant_tys).enumerate()
{
let enum_ty = llvm::r#type::r#struct(context, &[tag_ty, payload_ty], false);
let payload_val = match stack_ptr {
Some(stack_ptr) => {
let val = block.load(context, location, stack_ptr, enum_ty)?;
block.extract_value(context, location, val, payload_ty, 1)?
}
None => {
// If the enum is not memory-allocated it means that:
// - Either it's a C-style enum and all payloads have the same type.
// - Or the enum only has a single non-memory-allocated variant.
if variant_ids.len() == 1 {
entry.arg(0)?
} else {
native_assert!(
registry.get_type(&variant_ids[i])?.is_zst(registry)?,
"should be zero sized"
);
block.append_op_result(llvm::undef(payload_ty, location))?
}
}
};
helper.br(block, i, &[payload_val], location)?;
}
}
}
Ok(())
}
#[cfg(test)]
mod test {
use crate::{
context::NativeContext, jit_enum, jit_struct, load_cairo,
utils::testing::run_program_assert_output, Value,
};
use cairo_lang_sierra::program::Program;
use lazy_static::lazy_static;
use starknet_types_core::felt::Felt;
lazy_static! {
static ref ENUM_INIT: (String, Program) = load_cairo! {
enum MySmallEnum {
A: felt252,
}
enum MyEnum {
A: felt252,
B: u8,
C: u16,
D: u32,
E: u64,
}
fn run_test() -> (MySmallEnum, MyEnum, MyEnum, MyEnum, MyEnum, MyEnum) {
(
MySmallEnum::A(-1),
MyEnum::A(5678),
MyEnum::B(90),
MyEnum::C(9012),
MyEnum::D(34567890),
MyEnum::E(1234567890123456),
)
}
};
static ref ENUM_MATCH: (String, Program) = load_cairo! {
enum MyEnum {
A: felt252,
B: u8,
C: u16,
D: u32,
E: u64,
}
fn match_a() -> felt252 {
let x = MyEnum::A(5);
match x {
MyEnum::A(x) => x,
MyEnum::B(_) => 0,
MyEnum::C(_) => 1,
MyEnum::D(_) => 2,
MyEnum::E(_) => 3,
}
}
fn match_b() -> u8 {
let x = MyEnum::B(5_u8);
match x {
MyEnum::A(_) => 0_u8,
MyEnum::B(x) => x,
MyEnum::C(_) => 1_u8,
MyEnum::D(_) => 2_u8,
MyEnum::E(_) => 3_u8,
}
}
};
}
#[test]
fn enum_init() {
run_program_assert_output(
&ENUM_INIT,
"run_test",
&[],
jit_struct!(
jit_enum!(0, Felt::from(-1).into()),
jit_enum!(0, Felt::from(5678).into()),
jit_enum!(1, 90u8.into()),
jit_enum!(2, 9012u16.into()),
jit_enum!(3, 34567890u32.into()),
jit_enum!(4, 1234567890123456u64.into()),
),
);
}
#[test]
fn enum_match() {
run_program_assert_output(&ENUM_MATCH, "match_a", &[], Felt::from(5).into());
run_program_assert_output(&ENUM_MATCH, "match_b", &[], 5u8.into());
}
#[test]
fn compile_enum_match_without_variants() {
let (_, program) = load_cairo! {
enum MyEnum {}
fn main(value: MyEnum) {
match value {}
}
};
let native_context = NativeContext::new();
native_context
.compile(&program, false, Some(Default::default()), None)
.unwrap();
}
#[test]
fn create_enum_from_bounded_int() {
let program = load_cairo! {
#[feature("bounded-int-utils")]
use core::internal::bounded_int::BoundedInt;
mod b0x4 {
#[feature("bounded-int-utils")]
use core::internal::bounded_int::BoundedInt;
pub extern fn enum_from_bounded_int<T>(index: BoundedInt<0, 4>) -> T nopanic;
// This wrapper is required so that the compiler won't assume extern `enum_from_bounded_int` is a
// branch function. Without it, the program does not compile.
fn wrapper<T>(index: BoundedInt<0, 4>) -> T {
enum_from_bounded_int(index)
}
}
mod b0x0 {
#[feature("bounded-int-utils")]
use core::internal::bounded_int::BoundedInt;
pub extern fn enum_from_bounded_int<T>(index: BoundedInt<0, 0>) -> T nopanic;
// This wrapper is required so that the compiler won't assume extern `enum_from_bounded_int` is a
// branch function. Without it, the program does not compile.
fn wrapper<T>(index: BoundedInt<0, 0>) -> T {
enum_from_bounded_int(index)
}
}
enum Enum1 {
Zero
}
enum Enum5 {
Zero,
One,
Two,
Three,
Four
}
fn test_1_variants(input: felt252) -> Enum1 {
let bi: BoundedInt<0, 0> = input.try_into().unwrap();
b0x0::wrapper(bi)
}
fn test_5_variants(input: felt252) -> Enum5 {
let bi: BoundedInt<0, 4> = input.try_into().unwrap();
b0x4::wrapper(bi)
}
};
run_program_assert_output(
&program,
"test_1_variants",
&[Value::Felt252(0.into())],
jit_enum!(0, jit_struct!(jit_enum!(0, jit_struct!()))),
);
run_program_assert_output(
&program,
"test_5_variants",
&[Value::Felt252(0.into())],
jit_enum!(0, jit_struct!(jit_enum!(0, jit_struct!()))),
);
run_program_assert_output(
&program,
"test_5_variants",
&[Value::Felt252(4.into())],
jit_enum!(0, jit_struct!(jit_enum!(4, jit_struct!()))),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/starknet/secp256.rs | src/libfuncs/starknet/secp256.rs | use crate::{
error::Result,
libfuncs::LibfuncHelper,
metadata::MetadataStorage,
starknet::handler::StarknetSyscallHandlerCallbacks,
utils::{get_integer_layout, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
lib_func::SignatureOnlyConcreteLibfunc,
starknet::secp256::{Secp256ConcreteLibfunc, Secp256OpConcreteLibfunc},
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm::{self, LoadStoreOptions},
helpers::{GepIndex, LlvmBlockExt},
ir::{
attribute::DenseI32ArrayAttribute, operation::OperationBuilder, r#type::IntegerType, Block,
BlockLike, Location,
},
Context,
};
use std::alloc::Layout;
/// Select and call the correct libfunc builder function from the selector.
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
selector: &Secp256ConcreteLibfunc,
) -> Result<()> {
match selector {
Secp256ConcreteLibfunc::K1(selector) => match selector {
Secp256OpConcreteLibfunc::New(info) => {
build_k1_new(context, registry, entry, location, helper, metadata, info)
}
Secp256OpConcreteLibfunc::Add(info) => {
build_k1_add(context, registry, entry, location, helper, metadata, info)
}
Secp256OpConcreteLibfunc::Mul(info) => {
build_k1_mul(context, registry, entry, location, helper, metadata, info)
}
Secp256OpConcreteLibfunc::GetPointFromX(info) => build_k1_get_point_from_x(
context, registry, entry, location, helper, metadata, info,
),
Secp256OpConcreteLibfunc::GetXy(info) => {
build_k1_get_xy(context, registry, entry, location, helper, metadata, info)
}
},
Secp256ConcreteLibfunc::R1(selector) => match selector {
Secp256OpConcreteLibfunc::New(info) => {
build_r1_new(context, registry, entry, location, helper, metadata, info)
}
Secp256OpConcreteLibfunc::Add(info) => {
build_r1_add(context, registry, entry, location, helper, metadata, info)
}
Secp256OpConcreteLibfunc::Mul(info) => {
build_r1_mul(context, registry, entry, location, helper, metadata, info)
}
Secp256OpConcreteLibfunc::GetPointFromX(info) => build_r1_get_point_from_x(
context, registry, entry, location, helper, metadata, info,
),
Secp256OpConcreteLibfunc::GetXy(info) => {
build_r1_get_xy(context, registry, entry, location, helper, metadata, info)
}
},
}
}
pub fn build_k1_new<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry
.append_operation(llvm::load(
context,
entry.argument(1)?.into(),
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
info.branch_signatures()[0].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(0)?.into(),
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
let (x_ty, x_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[2].ty,
)?;
let (y_ty, y_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[3].ty,
)?;
// Allocate `x` argument and write the value.
let x_arg_ptr = helper
.init_block()
.alloca1(context, location, x_ty, x_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(2)?.into(),
x_arg_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `y` argument and write the value.
let y_arg_ptr = helper
.init_block()
.alloca1(context, location, y_ty, y_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(3)?.into(),
y_arg_ptr,
location,
LoadStoreOptions::default(),
));
let fn_ptr = entry
.append_operation(llvm::get_element_ptr(
context,
entry.argument(1)?.into(),
DenseI32ArrayAttribute::new(
context,
&[StarknetSyscallHandlerCallbacks::<()>::SECP256K1_NEW.try_into()?],
),
llvm::r#type::pointer(context, 0),
llvm::r#type::pointer(context, 0),
location,
))
.result(0)?
.into();
let fn_ptr = entry
.append_operation(llvm::load(
context,
fn_ptr,
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
x_arg_ptr,
y_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.argument(1)?.into(), payload_err],
&[remaining_gas, entry.argument(1)?.into(), payload_ok],
],
location,
)
}
pub fn build_k1_add<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry
.append_operation(llvm::load(
context,
entry.argument(1)?.into(),
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
info.branch_signatures()[0].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(0)?.into(),
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
let (p0_ty, p0_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[2].ty,
)?;
let (p1_ty, p1_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[3].ty,
)?;
// Allocate `p0` argument and write the value.
let p0_arg_ptr = helper
.init_block()
.alloca1(context, location, p0_ty, p0_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(2)?.into(),
p0_arg_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `p1` argument and write the value.
let p1_arg_ptr = helper
.init_block()
.alloca1(context, location, p1_ty, p1_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(3)?.into(),
p1_arg_ptr,
location,
LoadStoreOptions::default(),
));
let fn_ptr = entry
.append_operation(llvm::get_element_ptr(
context,
entry.argument(1)?.into(),
DenseI32ArrayAttribute::new(
context,
&[StarknetSyscallHandlerCallbacks::<()>::SECP256K1_ADD.try_into()?],
),
llvm::r#type::pointer(context, 0),
llvm::r#type::pointer(context, 0),
location,
))
.result(0)?
.into();
let fn_ptr = entry
.append_operation(llvm::load(
context,
fn_ptr,
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
p0_arg_ptr,
p1_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.argument(1)?.into(), payload_err],
&[remaining_gas, entry.argument(1)?.into(), payload_ok],
],
location,
)
}
pub fn build_k1_mul<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry
.append_operation(llvm::load(
context,
entry.argument(1)?.into(),
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
info.branch_signatures()[0].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(0)?.into(),
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
let (p_ty, p_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[2].ty,
)?;
let (scalar_ty, scalar_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[3].ty,
)?;
// Allocate `p` argument and write the value.
let p_arg_ptr = helper
.init_block()
.alloca1(context, location, p_ty, p_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(2)?.into(),
p_arg_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `scalar` argument and write the value.
let scalar_arg_ptr =
helper
.init_block()
.alloca1(context, location, scalar_ty, scalar_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(3)?.into(),
scalar_arg_ptr,
location,
LoadStoreOptions::default(),
));
let fn_ptr = entry
.append_operation(llvm::get_element_ptr(
context,
entry.argument(1)?.into(),
DenseI32ArrayAttribute::new(
context,
&[StarknetSyscallHandlerCallbacks::<()>::SECP256K1_MUL.try_into()?],
),
llvm::r#type::pointer(context, 0),
llvm::r#type::pointer(context, 0),
location,
))
.result(0)?
.into();
let fn_ptr = entry
.append_operation(llvm::load(
context,
fn_ptr,
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
p_arg_ptr,
scalar_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.argument(1)?.into(), payload_err],
&[remaining_gas, entry.argument(1)?.into(), payload_ok],
],
location,
)
}
pub fn build_k1_get_point_from_x<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry
.append_operation(llvm::load(
context,
entry.argument(1)?.into(),
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) =
crate::types::r#enum::get_type_for_variants(
context,
helper,
registry,
metadata,
&[
info.branch_signatures()[0].vars[2].ty.clone(),
info.branch_signatures()[1].vars[2].ty.clone(),
],
)?;
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(0)?.into(),
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
let (x_ty, x_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[2].ty,
)?;
// Allocate `x` argument and write the value.
let x_arg_ptr = helper
.init_block()
.alloca1(context, location, x_ty, x_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(2)?.into(),
x_arg_ptr,
location,
LoadStoreOptions::default(),
));
// Allocate `y_parity` argument and write the value.
let y_parity_arg_ptr =
helper
.init_block()
.alloca_int(context, location, 1, get_integer_layout(1).align())?;
entry.append_operation(llvm::store(
context,
entry.argument(3)?.into(),
y_parity_arg_ptr,
location,
LoadStoreOptions::default(),
));
let fn_ptr = entry
.append_operation(llvm::get_element_ptr(
context,
entry.argument(1)?.into(),
DenseI32ArrayAttribute::new(
context,
&[StarknetSyscallHandlerCallbacks::<()>::SECP256K1_GET_POINT_FROM_X.try_into()?],
),
llvm::r#type::pointer(context, 0),
llvm::r#type::pointer(context, 0),
location,
))
.result(0)?
.into();
let fn_ptr = entry
.append_operation(llvm::load(
context,
fn_ptr,
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[
fn_ptr,
result_ptr,
ptr,
gas_builtin_ptr,
x_arg_ptr,
y_parity_arg_ptr,
])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
// Load the two variants of the result returned by the syscall handler.
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[0].0)?
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
&[remaining_gas, entry.argument(1)?.into(), payload_err],
&[remaining_gas, entry.argument(1)?.into(), payload_ok],
],
location,
)
}
pub fn build_k1_get_xy<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &SignatureOnlyConcreteLibfunc,
) -> Result<()> {
// Extract self pointer.
let ptr = entry
.append_operation(llvm::load(
context,
entry.argument(1)?.into(),
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
// Allocate space for the return value.
let (result_layout, (result_tag_ty, result_tag_layout), variant_tys) = {
// Note: This libfunc has multiple return values when successful, therefore the method used
// for the other libfuncs cannot be reused here.
let u128_layout = get_integer_layout(128);
let u256_layout = u128_layout.extend(u128_layout)?.0;
let u256_ty = llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 128).into(),
IntegerType::new(context, 128).into(),
],
false,
);
let (ok_ty, ok_layout) = (
llvm::r#type::r#struct(context, &[u256_ty, u256_ty], false),
u256_layout.extend(u256_layout)?.0,
);
let (err_ty, err_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.branch_signatures()[1].vars[2].ty,
)?;
let (tag_ty, tag_layout) = (IntegerType::new(context, 1).into(), get_integer_layout(1));
(
tag_layout
.extend(Layout::from_size_align(
ok_layout.size().max(err_layout.size()),
ok_layout.align().max(err_layout.align()),
)?)?
.0,
(tag_ty, tag_layout),
[(ok_ty, ok_layout), (err_ty, err_layout)],
)
};
let result_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
result_layout.align(),
)?;
// Allocate space and write the current gas.
let (gas_ty, gas_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.param_signatures()[0].ty,
)?;
let gas_builtin_ptr =
helper
.init_block()
.alloca1(context, location, gas_ty, gas_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(0)?.into(),
gas_builtin_ptr,
location,
LoadStoreOptions::default(),
));
let (p_ty, p_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.signature.param_signatures[2].ty,
)?;
// Allocate `p` argument and write the value.
let p_arg_ptr = helper
.init_block()
.alloca1(context, location, p_ty, p_layout.align())?;
entry.append_operation(llvm::store(
context,
entry.argument(2)?.into(),
p_arg_ptr,
location,
LoadStoreOptions::default(),
));
let fn_ptr = entry
.append_operation(llvm::get_element_ptr(
context,
entry.argument(1)?.into(),
DenseI32ArrayAttribute::new(
context,
&[StarknetSyscallHandlerCallbacks::<()>::SECP256K1_GET_XY.try_into()?],
),
llvm::r#type::pointer(context, 0),
llvm::r#type::pointer(context, 0),
location,
))
.result(0)?
.into();
let fn_ptr = entry
.append_operation(llvm::load(
context,
fn_ptr,
llvm::r#type::pointer(context, 0),
location,
LoadStoreOptions::default(),
))
.result(0)?
.into();
entry.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[fn_ptr, result_ptr, ptr, gas_builtin_ptr, p_arg_ptr])
.build()?,
);
let result = entry.load(
context,
location,
result_ptr,
llvm::r#type::r#struct(
context,
&[
result_tag_ty,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
(result_layout.size() - 1).try_into()?,
),
],
false,
),
)?;
let result_tag = entry.extract_value(
context,
location,
result,
IntegerType::new(context, 1).into(),
0,
)?;
let payload_ok = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[0].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
let value = entry.load(context, location, ptr, variant_tys[0].0)?;
let x_value = entry.extract_value(
context,
location,
value,
llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 128).into(),
IntegerType::new(context, 128).into(),
],
false,
),
0,
)?;
let y_value = entry.extract_value(
context,
location,
value,
llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 128).into(),
IntegerType::new(context, 128).into(),
],
false,
),
1,
)?;
(x_value, y_value)
};
let payload_err = {
let ptr = entry.gep(
context,
location,
result_ptr,
&[GepIndex::Const(
result_tag_layout.extend(variant_tys[1].1)?.1.try_into()?,
)],
IntegerType::new(context, 8).into(),
)?;
entry.load(context, location, ptr, variant_tys[1].0)?
};
let remaining_gas = entry.load(context, location, gas_builtin_ptr, gas_ty)?;
helper.cond_br(
context,
entry,
result_tag,
[1, 0],
[
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/libfuncs/starknet/testing.rs | src/libfuncs/starknet/testing.rs | #![cfg(feature = "with-cheatcode")]
use crate::{
error::{panic::ToNativeAssertError, Result},
libfuncs::LibfuncHelper,
metadata::{runtime_bindings::RuntimeBindingsMeta, MetadataStorage},
utils::{get_integer_layout, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
starknet::testing::CheatcodeConcreteLibfunc,
ConcreteLibfunc,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm::{self, alloca, AllocaOptions, LoadStoreOptions},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{IntegerAttribute, TypeAttribute},
r#type::IntegerType,
Block, BlockLike, Location,
},
Context,
};
pub fn build<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
entry: &'this Block<'ctx>,
location: Location<'ctx>,
helper: &LibfuncHelper<'ctx, 'this>,
metadata: &mut MetadataStorage,
info: &CheatcodeConcreteLibfunc,
) -> Result<()> {
// Calculate the result layout and type, based on the branch signature
let (result_type, result_layout) = registry.build_type_with_layout(
context,
helper,
metadata,
&info.branch_signatures()[0].vars[0].ty,
)?;
// Allocate the result pointer with calculated layout and type
let result_ptr = helper
.init_block()
.append_operation(alloca(
context,
helper.init_block().const_int(context, location, 1, 64)?,
llvm::r#type::pointer(context, 0),
location,
AllocaOptions::new()
.align(Some(IntegerAttribute::new(
IntegerType::new(context, 64).into(),
result_layout.align().try_into()?,
)))
.elem_type(Some(TypeAttribute::new(result_type))),
))
.result(0)?
.into();
// Allocate and store selector. The type contains 256 bits as its interpreted as a [u8;32] from the runtime
let selector = helper
.init_block()
.const_int(context, location, info.selector.clone(), 256)?;
let selector_ptr =
helper
.init_block()
.alloca_int(context, location, 256, get_integer_layout(256).align())?;
helper
.init_block()
.store(context, location, selector_ptr, selector)?;
// Allocate and store arguments. The cairo type is a Span<Felt252> (the outer struct),
// which contains an Array<Felt252> (the inner struct)
let span_felt252_type = llvm::r#type::r#struct(
context,
&[llvm::r#type::r#struct(
context,
&[
llvm::r#type::pointer(context, 0),
IntegerType::new(context, 32).into(),
IntegerType::new(context, 32).into(),
IntegerType::new(context, 32).into(),
],
false,
)],
false,
);
let args_ptr = helper.init_block().alloca1(
context,
location,
span_felt252_type,
get_integer_layout(64).align(),
)?;
entry.store(context, location, args_ptr, entry.argument(0)?.into())?;
// Call runtime cheatcode syscall wrapper
metadata
.get_mut::<RuntimeBindingsMeta>()
.to_native_assert_error("runtime bindings should be available")?
.vtable_cheatcode(
context,
helper,
entry,
location,
result_ptr,
selector_ptr,
args_ptr,
)?;
// Load result from result ptr and branch
let result = entry.append_op_result(llvm::load(
context,
result_ptr,
result_type,
location,
LoadStoreOptions::new(),
))?;
helper.br(entry, 0, &[result], location)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/enum_snapshot_variants.rs | src/metadata/enum_snapshot_variants.rs | use cairo_lang_sierra::ids::ConcreteTypeId;
use std::collections::HashMap;
// Maps a Snapshot<Enum> type to its enum variant types
#[derive(Default)]
pub struct EnumSnapshotVariantsMeta {
map: HashMap<ConcreteTypeId, Vec<ConcreteTypeId>>,
}
impl EnumSnapshotVariantsMeta {
pub fn set_mapping(&mut self, snapshot_id: &ConcreteTypeId, variants: &[ConcreteTypeId]) {
self.map.insert(snapshot_id.clone(), variants.to_vec());
}
pub fn get_variants(&self, snapshot_id: &ConcreteTypeId) -> Option<&Vec<ConcreteTypeId>> {
self.map.get(snapshot_id)
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/realloc_bindings.rs | src/metadata/realloc_bindings.rs | //! # Memory allocation external bindings
//!
//! This metadata ensures that the bindings to the C function `realloc` exist in the current
//! compilation context.
use melior::{
dialect::llvm,
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
operation::OperationBuilder,
r#type::IntegerType,
BlockLike, Identifier, Location, Module, Operation, Region, Value,
},
Context, Error,
};
/// Memory allocation `realloc` metadata.
#[derive(Debug)]
pub struct ReallocBindingsMeta;
impl ReallocBindingsMeta {
/// Register the bindings to the `realloc` C function and return the metadata.
pub fn new(context: &Context, module: &Module) -> Self {
module.body().append_operation(llvm::func(
context,
StringAttribute::new(context, "realloc"),
TypeAttribute::new(llvm::r#type::function(
llvm::r#type::pointer(context, 0),
&[
llvm::r#type::pointer(context, 0),
IntegerType::new(context, 64).into(),
],
false,
)),
Region::new(),
&[(
Identifier::new(context, "sym_visibility"),
StringAttribute::new(context, "private").into(),
)],
Location::unknown(context),
));
module.body().append_operation(llvm::func(
context,
StringAttribute::new(context, "free"),
TypeAttribute::new(llvm::r#type::function(
llvm::r#type::void(context),
&[llvm::r#type::pointer(context, 0)],
false,
)),
Region::new(),
&[(
Identifier::new(context, "sym_visibility"),
StringAttribute::new(context, "private").into(),
)],
Location::unknown(context),
));
Self
}
/// Calls the `realloc` function, returns a op with 1 result: an opaque pointer.
pub fn realloc<'c, 'a>(
context: &'c Context,
ptr: Value<'c, 'a>,
len: Value<'c, 'a>,
location: Location<'c>,
) -> Result<Operation<'c>, Error> {
OperationBuilder::new("llvm.call", location)
.add_attributes(&[(
Identifier::new(context, "callee"),
FlatSymbolRefAttribute::new(context, "realloc").into(),
)])
.add_operands(&[ptr, len])
.add_results(&[llvm::r#type::pointer(context, 0)])
.build()
}
/// Calls the `free` function.
pub fn free<'c>(
context: &'c Context,
ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<Operation<'c>, Error> {
OperationBuilder::new("llvm.call", location)
.add_attributes(&[(
Identifier::new(context, "callee"),
FlatSymbolRefAttribute::new(context, "free").into(),
)])
.add_operands(&[ptr])
.build()
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/profiler.rs | src/metadata/profiler.rs | #![cfg(feature = "with-libfunc-profiling")]
//! The libfunc profiling feature is used to generate information about every libfunc executed in a sierra program.
//!
//! When this feature is used, the compiler will call the important methods:
//!
//! 1. `measure_timestamp`: called before every libfunc execution.
//!
//! 2. `push_frame`: called before every branching operation. This method will also call `measure_timestamp`. This,
//! with the timestamp calculated before the execution, will allow to measure each statement's execution time.
//! If for some reason, the statement delta time could not be gathered, we just record an unit value, recording that
//! we executed the given statement.
//!
//! Once the program execution finished and the information was gathered, the `get_profile` method can be called.
//! It groups the samples by libfunc, and returns all data related to each libfunc.
//!
//! As well as with the trace-dump feature, in the context of Starknet contracts, we need to add support for building
//! profiles for multiple executions. To do so, we need two important elements, which must be set before every contract
//! execution:
//!
//! 1. A global static hashmap to map every profile ID to its respective profiler. See `LIBFUNC_PROFILE`.
//!
//! 2. A counter to track the ID of the current profiler, which gets updated every time we switch to another
//! contract. Since a contract can call other contracts, we need a way of restoring the counter after every execution.
//!
//! See `cairo-native-run` for an example on how to do it.
use crate::error::{Error, Result};
use cairo_lang_sierra::{
ids::ConcreteLibfuncId,
program::{Program, Statement, StatementIdx},
};
use melior::{
dialect::{
arith::{self, CmpiPredicate},
llvm::{self},
memref, ods,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
operation::OperationBuilder,
r#type::{IntegerType, MemRefType},
Attribute, Block, BlockLike, Identifier, Location, Module, Region, Value,
},
Context,
};
use std::{
cell::RefCell,
collections::{HashMap, HashSet},
ffi::c_void,
ptr,
sync::{LazyLock, Mutex},
};
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum ProfilerBinding {
PushStmt,
ProfileId,
}
impl ProfilerBinding {
pub const fn symbol(self) -> &'static str {
match self {
ProfilerBinding::PushStmt => "cairo_native__profiler__push_stmt",
ProfilerBinding::ProfileId => "cairo_native__profiler__profile_id",
}
}
const fn function_ptr(self) -> *const () {
match self {
ProfilerBinding::PushStmt => ProfilerImpl::push_stmt as *const (),
ProfilerBinding::ProfileId => ptr::null(),
}
}
}
#[derive(Clone, Default)]
pub struct ProfilerMeta {
active_map: RefCell<HashSet<ProfilerBinding>>,
}
impl ProfilerMeta {
pub fn new() -> Self {
Self {
active_map: RefCell::new(HashSet::new()),
}
}
/// Register the global for the given binding, if not yet registered, and return
/// a pointer to the stored value.
///
/// For the function to be available, `setup_runtime` must be called before running the module
fn build_function<'c, 'a>(
&self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
binding: ProfilerBinding,
) -> Result<Value<'c, 'a>> {
if self.active_map.borrow_mut().insert(binding) {
module.body().append_operation(
ods::llvm::mlir_global(
context,
Region::new(),
TypeAttribute::new(llvm::r#type::pointer(context, 0)),
StringAttribute::new(context, binding.symbol()),
Attribute::parse(context, "#llvm.linkage<weak>")
.ok_or(Error::ParseAttributeError)?,
location,
)
.into(),
);
}
let global_address = block.append_op_result(
ods::llvm::mlir_addressof(
context,
llvm::r#type::pointer(context, 0),
FlatSymbolRefAttribute::new(context, binding.symbol()),
location,
)
.into(),
)?;
Ok(block.load(
context,
location,
global_address,
llvm::r#type::pointer(context, 0),
)?)
}
pub fn build_profile_id<'c, 'a>(
&self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
) -> Result<Value<'c, 'a>> {
if self
.active_map
.borrow_mut()
.insert(ProfilerBinding::ProfileId)
{
module.body().append_operation(memref::global(
context,
ProfilerBinding::ProfileId.symbol(),
None,
MemRefType::new(IntegerType::new(context, 64).into(), &[], None, None),
None,
false,
None,
location,
));
}
let trace_profile_ptr = block
.append_op_result(memref::get_global(
context,
ProfilerBinding::ProfileId.symbol(),
MemRefType::new(IntegerType::new(context, 64).into(), &[], None, None),
location,
))
.unwrap();
Ok(block.append_op_result(memref::load(trace_profile_ptr, &[], location))?)
}
/// Gets the current timestamp.
///
/// The values returned are:
/// 1. Timestamp: CPU cycles since its reset.
/// 2. CPU's id core in which the execution is running (only for x86 arch).
/// In case of arm, 0 is always returned as there's no way to know in which
/// CPU core the execution was run.
///
/// We use the last value to ensure that both the initial and the end timestamp of
/// a libfunc's execution were calculated by the same core. This is to avoid gathering
/// invalid data
#[cfg(target_arch = "x86_64")]
pub fn measure_timestamp<'c, 'a>(
&self,
context: &'c Context,
block: &'a Block<'c>,
location: Location<'c>,
) -> Result<(Value<'c, 'a>, Value<'c, 'a>)> {
let i32_ty = IntegerType::new(context, 32).into();
let i64_ty = IntegerType::new(context, 64).into();
let k32 = block.const_int_from_type(context, location, 32, i64_ty)?;
// edx:eax := TimeStampCounter (clock value)
// ecx := IA32_TSC_AUX[31:0] (core ID)
let value = block.append_op_result(
OperationBuilder::new("llvm.inline_asm", location)
.add_attributes(&[
(
Identifier::new(context, "asm_string"),
StringAttribute::new(context, "mfence\nrdtscp\nlfence").into(),
),
(
Identifier::new(context, "has_side_effects"),
Attribute::unit(context),
),
(
Identifier::new(context, "constraints"),
StringAttribute::new(context, "={edx},={eax},={ecx}").into(),
),
])
.add_results(&[llvm::r#type::r#struct(
context,
&[i32_ty, i32_ty, i32_ty],
false,
)])
.build()?,
)?;
let value_hi = block.extract_value(context, location, value, i32_ty, 0)?;
let value_lo = block.extract_value(context, location, value, i32_ty, 1)?;
let core_idx = block.extract_value(context, location, value, i32_ty, 2)?;
let value_hi = block.extui(value_hi, i64_ty, location)?;
let value_lo = block.extui(value_lo, i64_ty, location)?;
let value = block.shli(value_hi, k32, location)?;
let value = block.append_op_result(arith::ori(value, value_lo, location))?;
Ok((value, core_idx))
}
/// Gets the current timestamp.
///
/// The values returned are:
/// 1. Timestamp: CPU cycles since its reset.
/// 2. CPU's id core in which the program is running (only for x86 arch).
/// In case of arm, 0 is always returned as there's no way to know in which
/// CPU core the execution was run.
///
/// We use the last value to ensure that both the initial and the end timestamp of
/// a libfunc's execution were calculated by the same core. This is to avoid gathering
/// invalid data
#[cfg(target_arch = "aarch64")]
pub fn measure_timestamp<'c, 'a>(
&self,
context: &'c Context,
block: &'a Block<'c>,
location: Location<'c>,
) -> Result<(Value<'c, 'a>, Value<'c, 'a>)> {
use melior::helpers::ArithBlockExt;
let i64_ty = IntegerType::new(context, 64).into();
let value = block.append_op_result(
OperationBuilder::new("llvm.inline_asm", location)
.add_attributes(&[
(
Identifier::new(context, "asm_string"),
StringAttribute::new(context, "isb\nmrs $0, CNTVCT_EL0\nisb").into(),
),
(
Identifier::new(context, "has_side_effects"),
Attribute::unit(context),
),
(
Identifier::new(context, "constraints"),
StringAttribute::new(context, "=r").into(),
),
])
.add_results(&[i64_ty])
.build()?,
)?;
let core_idx = block.const_int(context, location, 0, 64)?;
Ok((value, core_idx))
}
#[allow(clippy::too_many_arguments)]
/// Receives two timestamps, if they were originated in the same CPU core,
/// the delta time between these two is calculated. If not, then the delta time is
/// assigned to -1. Then it pushes the frame, which is made of the statement index
/// the delta time.
pub fn push_frame<'c>(
&self,
context: &'c Context,
module: &Module,
block: &Block<'c>,
statement_idx: usize,
// (timestamp, core_idx)
t0: (Value<'c, '_>, Value<'c, '_>),
t1: (Value<'c, '_>, Value<'c, '_>),
location: Location<'c>,
) -> Result<()> {
// If core idx matches:
// Calculate time delta.
// Write statement idx and time delta.
// If core idx does not match:
// Write statement idx and -1.
let trace_id = self.build_profile_id(context, module, block, location)?;
let i64_ty = IntegerType::new(context, 64).into();
let statement_idx = block.const_int_from_type(context, location, statement_idx, i64_ty)?;
let is_same_core = block.cmpi(context, CmpiPredicate::Eq, t0.1, t1.1, location)?;
let delta_value = block.append_op_result(arith::subi(t1.0, t0.0, location))?;
let invalid_value = block.const_int_from_type(context, location, u64::MAX, i64_ty)?;
let delta_value = block.append_op_result(arith::select(
is_same_core,
delta_value,
invalid_value,
location,
))?;
let callback_ptr =
self.build_function(context, module, block, location, ProfilerBinding::PushStmt)?;
block.append_operation(
ods::llvm::call(
context,
&[callback_ptr, trace_id, statement_idx, delta_value],
location,
)
.into(),
);
Ok(())
}
}
/// Represents the entire profile of the execution.
///
/// It maps the libfunc ID to a libfunc profile.
type Profile = HashMap<ConcreteLibfuncId, LibfuncProfileData>;
/// Represents the profile data for a particular libfunc.
#[derive(Default)]
pub struct LibfuncProfileData {
/// A vector of execution times, for each time the libfunc was executed.
/// It expreses the number of CPU cycles completed during the execution.
pub deltas: Vec<u64>,
/// If the time delta for a particular execution could not be gathered,
/// we just increase `extra_counts` by 1.
pub extra_counts: u64,
}
pub static LIBFUNC_PROFILE: LazyLock<Mutex<HashMap<u64, ProfilerImpl>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
#[derive(Default)]
pub struct ProfilerImpl {
/// The samples recorded by the profiler. A value of `u64::MAX` implies
/// that the delta time for a statement could not be gathered.
pub samples: Vec<(StatementIdx, u64)>,
}
impl ProfilerImpl {
pub fn new() -> Self {
Self {
samples: Vec::new(),
}
}
// Push a profiler frame
pub extern "C" fn push_stmt(profile_id: u64, statement_idx: u64, tick_delta: u64) {
let mut profiler = LIBFUNC_PROFILE.lock().unwrap();
let Some(profiler) = profiler.get_mut(&profile_id) else {
eprintln!("Could not find libfunc profiler!");
return;
};
profiler
.samples
.push((StatementIdx(statement_idx as usize), tick_delta));
}
/// Returns the execution profile, grouped by libfunc
pub fn get_profile(&self, sierra_program: &Program) -> Profile {
let mut profile = HashMap::<ConcreteLibfuncId, LibfuncProfileData>::new();
for (statement_idx, tick_delta) in self.samples.iter() {
if let Statement::Invocation(invocation) = &sierra_program.statements[statement_idx.0] {
let LibfuncProfileData {
deltas,
extra_counts,
} = profile.entry(invocation.libfunc_id.clone()).or_default();
// A tick_delta equal to u64::MAX implies it is invalid, so we don't take it
// into account
if *tick_delta != u64::MAX {
deltas.push(*tick_delta);
} else {
*extra_counts += 1;
}
}
}
profile
}
}
pub fn setup_runtime(find_symbol_ptr: impl Fn(&str) -> Option<*mut c_void>) {
let bindings = &[ProfilerBinding::PushStmt];
for binding in bindings {
if let Some(global) = find_symbol_ptr(binding.symbol()) {
let global = global.cast::<*const ()>();
unsafe { *global = binding.function_ptr() };
}
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/drop_overrides.rs | src/metadata/drop_overrides.rs | //! # Dropping logic overrides
//!
//! By default, values are discarded, but some cases (like arrays, boxes, nullables, dictionaries
//! and some structs and enums) need a drop implementation instad. This metadata is a register of
//! types that require a drop implementation as well as the logic to register and invoke those
//! implementations.
//!
//! ## Drop implementations
//!
//! The drop logic is implemented as a function for each type that requires it. It has to be a
//! function to allow self-referencing types. If we inlined the drop implementations,
//! self-referencing types would generate infinite code thus overflowing the stack when generating
//! code.
//!
//! The generated functions are not public (they are internal) and follow this naming convention:
//!
//! ```text
//! drop${type id}
//! ```
//!
//! where `{type id}` is the numeric value of the `ConcreteTypeId`.
use super::MetadataStorage;
use crate::{
error::{Error, Result},
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use melior::{
dialect::{cf, func, llvm},
helpers::{BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
r#type::FunctionType,
Attribute, Block, BlockLike, Identifier, Location, Module, Region, Value,
},
Context,
};
use std::collections::HashSet;
#[derive(Debug, Default)]
pub struct DropOverridesMeta {
overriden_types: HashSet<ConcreteTypeId>,
}
impl DropOverridesMeta {
/// Register a drop override using a closure.
///
/// This function does several things:
/// - Registers `DropOverrideMeta` if it wasn't already present.
/// - If the type id was already registered it returns and does nothing.
/// - Registers the type (without it being actually registered yet).
/// - Calls the closure, which returns an `Option<Region>`.
/// - If the closure returns a region, generates the function implementation.
/// - If the closure returns `None`, it removes the registry entry for the type.
///
/// The type need to be registered before calling the closure, otherwise self-referencing types
/// would cause stack overflow when registering themselves.
///
/// The callback serves two purposes:
/// - To generate the drop implementation, if necessary.
/// - To check if we need to generate the implementation (for example, in structs and enums).
pub(crate) fn register_with<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
f: impl FnOnce(&mut MetadataStorage) -> Result<Option<Region<'ctx>>>,
) -> Result<()> {
{
let drop_override_meta = metadata.get_or_insert_with(Self::default);
if drop_override_meta.overriden_types.contains(id) {
return Ok(());
}
drop_override_meta.overriden_types.insert(id.clone());
}
match f(metadata)? {
Some(region) => {
let location = Location::unknown(context);
let ty = registry.build_type(context, module, metadata, id)?;
let ptr_ty = llvm::r#type::pointer(context, 0);
let sierra_ty = registry.get_type(id)?;
let is_memory_allocated = sierra_ty.is_memory_allocated(registry)?;
let signature_ty = if is_memory_allocated { ptr_ty } else { ty };
// For memory allocated types, the generated function receives
// a pointer as argument. However, the user provided callback
// generates a region that receives a concrete value as
// argument. To workaround this, we insert a block at the start
// of the region that dereferences the pointer, and jumps to the
// user provided implementation.
if is_memory_allocated {
let entry_block = region.first_block().unwrap();
let pre_entry_block =
region.insert_block_before(entry_block, Block::new(&[(ptr_ty, location)]));
pre_entry_block.append_operation(cf::br(
&entry_block,
&[pre_entry_block.load(context, location, pre_entry_block.arg(0)?, ty)?],
location,
));
}
module.body().append_operation(func::func(
context,
StringAttribute::new(context, &format!("drop${}", id.id)),
TypeAttribute::new(FunctionType::new(context, &[signature_ty], &[]).into()),
region,
&[
(
Identifier::new(context, "sym_visibility"),
StringAttribute::new(context, "public").into(),
),
(
Identifier::new(context, "llvm.CConv"),
Attribute::parse(context, "#llvm.cconv<fastcc>")
.ok_or(Error::ParseAttributeError)?,
),
(
Identifier::new(context, "llvm.linkage"),
Attribute::parse(context, "#llvm.linkage<private>")
.ok_or(Error::ParseAttributeError)?,
),
],
Location::unknown(context),
));
}
None => {
// The following getter should always return a value, but the if statement is kept
// just in case the meta has been removed (which it shouldn't).
if let Some(drop_override_meta) = metadata.get_mut::<Self>() {
drop_override_meta.overriden_types.remove(id);
}
}
}
Ok(())
}
/// Returns whether a type has a registered drop implementation.
pub(crate) fn is_overriden(metadata: &mut MetadataStorage, id: &ConcreteTypeId) -> bool {
metadata
.get_or_insert_with(Self::default)
.overriden_types
.contains(id)
}
/// Generates code to invoke a drop implementation for a type, or does nothing if no
/// implementation was registered.
#[allow(clippy::too_many_arguments)]
pub(crate) fn invoke_override<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
module: &Module<'ctx>,
init_block: &'this Block<'ctx>,
block: &'this Block<'ctx>,
location: Location<'ctx>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
value: Value<'ctx, 'this>,
) -> Result<()> {
if Self::is_overriden(metadata, id) {
let ty = registry.build_type(context, module, metadata, id)?;
let sierra_ty = registry.get_type(id)?;
let is_memory_allocated = sierra_ty.is_memory_allocated(registry)?;
// From memory allocated types, the drop function receives a pointer
// as argument, so we need to alloc the given value onto the stack
// and pass a pointer to it instead.
let value = if is_memory_allocated {
// The init_block is guaranteed to not be executed multiple
// times on tail-recursive functions.
let value_ptr = init_block.alloca1(
context,
location,
ty,
sierra_ty.layout(registry)?.align(),
)?;
block.store(context, location, value_ptr, value)?;
value_ptr
} else {
value
};
block.append_operation(func::call(
context,
FlatSymbolRefAttribute::new(context, &format!("drop${}", id.id)),
&[value],
&[],
location,
));
}
Ok(())
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/trace_dump.rs | src/metadata/trace_dump.rs | #![cfg(feature = "with-trace-dump")]
use crate::error::{Error, Result};
use cairo_lang_sierra::{
ids::{ConcreteTypeId, VarId},
program::StatementIdx,
};
use melior::{
dialect::{llvm, memref, ods},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
operation::OperationBuilder,
r#type::{IntegerType, MemRefType},
Attribute, Block, BlockLike, Location, Module, Region, Value,
},
Context,
};
use std::{collections::HashSet, ffi::c_void, ptr};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum TraceBinding {
State,
Push,
TraceId,
}
impl TraceBinding {
pub const fn symbol(self) -> &'static str {
match self {
TraceBinding::State => "cairo_native__trace_dump__add_variable_to_state",
TraceBinding::Push => "cairo_native__trace_dump__push_state_to_trace_dump",
TraceBinding::TraceId => "cairo_native__trace_dump__trace_id",
}
}
const fn function_ptr(self) -> *const () {
match self {
TraceBinding::State => trace_dump_runtime::add_variable_to_state as *const (),
TraceBinding::Push => trace_dump_runtime::push_state_to_trace_dump as *const (),
// it has no function pointer, as its a global constant
TraceBinding::TraceId => ptr::null(),
}
}
}
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct TraceDumpMeta {
active_map: HashSet<TraceBinding>,
}
impl TraceDumpMeta {
/// Register the global for the given binding, if not yet registered, and return
/// a pointer to the stored value.
///
/// For the function to be available, `setup_runtime` must be called before running the module
fn build_function<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
binding: TraceBinding,
) -> Result<Value<'c, 'a>> {
if self.active_map.insert(binding) {
module.body().append_operation(
ods::llvm::mlir_global(
context,
Region::new(),
TypeAttribute::new(llvm::r#type::pointer(context, 0)),
StringAttribute::new(context, binding.symbol()),
Attribute::parse(context, "#llvm.linkage<weak>")
.ok_or(Error::ParseAttributeError)?,
location,
)
.into(),
);
}
let global_address = block.append_op_result(
ods::llvm::mlir_addressof(
context,
llvm::r#type::pointer(context, 0),
FlatSymbolRefAttribute::new(context, binding.symbol()),
location,
)
.into(),
)?;
Ok(block.load(
context,
location,
global_address,
llvm::r#type::pointer(context, 0),
)?)
}
#[allow(clippy::too_many_arguments)]
pub fn build_state(
&mut self,
context: &Context,
module: &Module,
block: &Block,
var_id: &VarId,
value_ty: &ConcreteTypeId,
value_ptr: Value,
location: Location,
) -> Result<()> {
let trace_id = self.build_trace_id(context, module, block, location)?;
let var_id = block.const_int(context, location, var_id.id, 64)?;
let value_ty = block.const_int(context, location, value_ty.id, 64)?;
let function =
self.build_function(context, module, block, location, TraceBinding::State)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[trace_id, var_id, value_ty, value_ptr])
.build()?,
);
Ok(())
}
pub fn build_push(
&mut self,
context: &Context,
module: &Module,
block: &Block,
statement_idx: StatementIdx,
location: Location,
) -> Result<()> {
let trace_id = self.build_trace_id(context, module, block, location)?;
let statement_idx = block.const_int(context, location, statement_idx.0, 64)?;
let function = self.build_function(context, module, block, location, TraceBinding::Push)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[trace_id, statement_idx])
.build()?,
);
Ok(())
}
pub fn build_trace_id<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
) -> Result<Value<'c, 'a>> {
if self.active_map.insert(TraceBinding::TraceId) {
module.body().append_operation(memref::global(
context,
TraceBinding::TraceId.symbol(),
None,
MemRefType::new(IntegerType::new(context, 64).into(), &[], None, None),
None,
false,
None,
location,
));
}
let trace_id_ptr = block
.append_op_result(memref::get_global(
context,
TraceBinding::TraceId.symbol(),
MemRefType::new(IntegerType::new(context, 64).into(), &[], None, None),
location,
))
.unwrap();
Ok(block.append_op_result(memref::load(trace_id_ptr, &[], location))?)
}
}
pub fn setup_runtime(find_symbol_ptr: impl Fn(&str) -> Option<*mut c_void>) {
let bindings = &[TraceBinding::State, TraceBinding::Push];
for binding in bindings {
if let Some(global) = find_symbol_ptr(binding.symbol()) {
let global = global.cast::<*const ()>();
unsafe { *global = binding.function_ptr() };
}
}
}
pub mod trace_dump_runtime {
#![allow(non_snake_case)]
use cairo_lang_sierra::{
extensions::{
bounded_int::BoundedIntConcreteType,
circuit::CircuitTypeConcrete,
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
starknet::{secp256::Secp256PointTypeConcrete, StarknetTypeConcrete},
},
ids::{ConcreteTypeId, VarId},
program::{GenericArg, StatementIdx},
program_registry::ProgramRegistry,
};
use cairo_lang_utils::ordered_hash_map::OrderedHashMap;
use itertools::Itertools;
use num_bigint::{BigInt, BigUint, Sign};
use num_traits::One;
use sierra_emu::{
starknet::{
Secp256k1Point as EmuSecp256k1Point, Secp256r1Point as EmuSecp256r1Point,
U256 as EmuU256,
},
ProgramTrace, StateDump, Value,
};
use starknet_types_core::felt::Felt;
use std::{
alloc::Layout,
collections::HashMap,
mem::swap,
ops::Range,
ptr::NonNull,
sync::{LazyLock, Mutex},
};
use crate::{
starknet::ArrayAbi,
types::TypeBuilder,
utils::{get_integer_layout, layout_repeat},
};
use crate::runtime::FeltDict;
pub static TRACE_DUMP: LazyLock<Mutex<HashMap<u64, TraceDump>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
/// An in-progress trace dump for a particular execution
pub struct TraceDump {
pub trace: ProgramTrace,
/// Represents the latest state. All values are added to
/// this state until pushed to the trace.
state: OrderedHashMap<VarId, Value>,
registry: ProgramRegistry<CoreType, CoreLibfunc>,
}
impl TraceDump {
pub fn new(registry: ProgramRegistry<CoreType, CoreLibfunc>) -> Self {
Self {
trace: ProgramTrace::default(),
state: OrderedHashMap::default(),
registry,
}
}
}
/// Adds a new variable to the current state of the trace dump with the
/// given identifier.
///
/// Receives a pointer to the value, even if the value is a pointer itself.
pub unsafe extern "C" fn add_variable_to_state(
trace_id: u64,
var_id: u64,
type_id: u64,
value_ptr: NonNull<()>,
) {
let mut trace_dump = TRACE_DUMP.lock().unwrap();
let Some(trace_dump) = trace_dump.get_mut(&trace_id) else {
eprintln!("Could not find trace dump!");
return;
};
let type_id = ConcreteTypeId::new(type_id);
let value = value_from_ptr(&trace_dump.registry, &type_id, value_ptr);
trace_dump.state.insert(VarId::new(var_id), value);
}
/// Pushes the latest state to the trace dump with the given identifier.
///
/// It is called after all variables have been added with `add_variable_to_state`.
pub unsafe extern "C" fn push_state_to_trace_dump(trace_id: u64, statement_idx: u64) {
let mut trace_dump = TRACE_DUMP.lock().unwrap();
let Some(trace_dump) = trace_dump.get_mut(&trace_id) else {
eprintln!("Could not find trace dump!");
return;
};
let mut items = OrderedHashMap::default();
swap(&mut items, &mut trace_dump.state);
trace_dump
.trace
.push(StateDump::new(StatementIdx(statement_idx as usize), items));
}
/// TODO: Can we reuse `cairo_native::Value::from_ptr`?
unsafe fn value_from_ptr(
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
type_id: &ConcreteTypeId,
value_ptr: NonNull<()>,
) -> Value {
let type_info = registry.get_type(type_id).unwrap();
match type_info {
CoreTypeConcrete::Felt252(_)
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::ContractAddress(_))
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::ClassHash(_))
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::StorageAddress(_))
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::StorageBaseAddress(_)) => {
Value::Felt(Felt::from_bytes_le(value_ptr.cast().as_ref()))
}
CoreTypeConcrete::Uint8(_) => Value::U8(value_ptr.cast().read()),
CoreTypeConcrete::Uint16(_) => Value::U16(value_ptr.cast().read()),
CoreTypeConcrete::Uint32(_) => Value::U32(value_ptr.cast().read()),
CoreTypeConcrete::Uint64(_) | CoreTypeConcrete::GasBuiltin(_) => {
Value::U64(value_ptr.cast().read())
}
CoreTypeConcrete::Uint128(_) => Value::U128(value_ptr.cast().read()),
CoreTypeConcrete::BoundedInt(BoundedIntConcreteType { range, .. }) => {
let n_bits = ((range.size() - BigInt::one()).bits() as u32).max(1);
let n_bytes = n_bits.next_multiple_of(8) >> 3;
let data = NonNull::slice_from_raw_parts(value_ptr.cast::<u8>(), n_bytes as usize);
let value = BigInt::from_bytes_le(num_bigint::Sign::Plus, data.as_ref());
Value::BoundedInt {
range: Range {
start: range.lower.clone(),
end: range.upper.clone(),
},
value: value + &range.lower,
}
}
CoreTypeConcrete::EcPoint(_) => {
let layout = Layout::new::<()>();
let (x, layout) = {
let (layout, offset) = layout.extend(Layout::new::<[u128; 2]>()).unwrap();
(
Felt::from_bytes_le(value_ptr.byte_add(offset).cast().as_ref()),
layout,
)
};
let (y, _) = {
let (layout, offset) = layout.extend(Layout::new::<[u128; 2]>()).unwrap();
(
Felt::from_bytes_le(value_ptr.byte_add(offset).cast().as_ref()),
layout,
)
};
Value::EcPoint { x, y }
}
CoreTypeConcrete::EcState(_) => {
let layout = Layout::new::<()>();
let (x0, layout) = {
let (layout, offset) = layout.extend(Layout::new::<[u128; 2]>()).unwrap();
(
Felt::from_bytes_le(value_ptr.byte_add(offset).cast().as_ref()),
layout,
)
};
let (y0, layout) = {
let (layout, offset) = layout.extend(Layout::new::<[u128; 2]>()).unwrap();
(
Felt::from_bytes_le(value_ptr.byte_add(offset).cast().as_ref()),
layout,
)
};
let (x1, layout) = {
let (layout, offset) = layout.extend(Layout::new::<[u128; 2]>()).unwrap();
(
Felt::from_bytes_le(value_ptr.byte_add(offset).cast().as_ref()),
layout,
)
};
let (y1, _) = {
let (layout, offset) = layout.extend(Layout::new::<[u128; 2]>()).unwrap();
(
Felt::from_bytes_le(value_ptr.byte_add(offset).cast().as_ref()),
layout,
)
};
Value::EcState { x0, y0, x1, y1 }
}
CoreTypeConcrete::Uninitialized(info) => Value::Uninitialized {
ty: info.ty.clone(),
},
CoreTypeConcrete::Box(info) => {
value_from_ptr(registry, &info.ty, value_ptr.cast::<NonNull<()>>().read())
}
CoreTypeConcrete::Array(info) => {
let array = value_ptr.cast::<ArrayAbi<()>>().read();
let layout = registry
.get_type(&info.ty)
.unwrap()
.layout(registry)
.unwrap()
.pad_to_align();
let mut data = Vec::with_capacity((array.until - array.since) as usize);
if !array.ptr.is_null() {
let data_ptr = array.ptr.read();
for index in (array.since)..array.until {
let index = index as usize;
data.push(value_from_ptr(
registry,
&info.ty,
NonNull::new(data_ptr.byte_add(layout.size() * index)).unwrap(),
));
}
}
Value::Array {
ty: info.ty.clone(),
data,
}
}
CoreTypeConcrete::Struct(info) => {
let mut layout = Layout::new::<()>();
let mut members = Vec::with_capacity(info.members.len());
for member_ty in &info.members {
let type_info = registry.get_type(member_ty).unwrap();
let member_layout = type_info.layout(registry).unwrap();
let offset;
(layout, offset) = layout.extend(member_layout).unwrap();
let current_ptr = value_ptr.byte_add(offset);
members.push(value_from_ptr(registry, member_ty, current_ptr));
}
Value::Struct(members)
}
CoreTypeConcrete::Enum(info) => {
let tag_bits = info.variants.len().next_power_of_two().trailing_zeros();
let (tag_value, layout) = match tag_bits {
0 => (0, Layout::new::<()>()),
width if width <= 8 => {
(value_ptr.cast::<u8>().read() as usize, Layout::new::<u8>())
}
width if width <= 16 => (
value_ptr.cast::<u16>().read() as usize,
Layout::new::<u16>(),
),
width if width <= 32 => (
value_ptr.cast::<u32>().read() as usize,
Layout::new::<u32>(),
),
width if width <= 64 => (
value_ptr.cast::<u64>().read() as usize,
Layout::new::<u64>(),
),
width if width <= 128 => (
value_ptr.cast::<u128>().read() as usize,
Layout::new::<u128>(),
),
_ => todo!(),
};
let payload = {
let (_, offset) = layout
.extend(
registry
.get_type(&info.variants[tag_value])
.unwrap()
.layout(registry)
.unwrap(),
)
.unwrap();
value_from_ptr(
registry,
&info.variants[tag_value],
value_ptr.byte_add(offset),
)
};
Value::Enum {
self_ty: type_id.clone(),
index: tag_value,
payload: Box::new(payload),
}
}
CoreTypeConcrete::NonZero(info) | CoreTypeConcrete::Snapshot(info) => {
value_from_ptr(registry, &info.ty, value_ptr)
}
// Builtins and other unit types:
CoreTypeConcrete::Bitwise(_)
| CoreTypeConcrete::EcOp(_)
| CoreTypeConcrete::Pedersen(_)
| CoreTypeConcrete::Poseidon(_)
| CoreTypeConcrete::RangeCheck96(_)
| CoreTypeConcrete::RangeCheck(_)
| CoreTypeConcrete::SegmentArena(_)
| CoreTypeConcrete::Starknet(StarknetTypeConcrete::System(_))
| CoreTypeConcrete::Uint128MulGuarantee(_) => Value::Unit,
CoreTypeConcrete::BuiltinCosts(_) => {
let builtin_costs = value_ptr.cast::<&[u64; 7]>().read();
Value::BuiltinCosts(sierra_emu::BuiltinCosts {
r#const: builtin_costs[0],
pedersen: builtin_costs[1],
bitwise: builtin_costs[2],
ecop: builtin_costs[3],
poseidon: builtin_costs[4],
add_mod: builtin_costs[5],
mul_mod: builtin_costs[6],
})
}
// TODO:
CoreTypeConcrete::Coupon(_) => todo!("CoreTypeConcrete::Coupon"),
CoreTypeConcrete::Circuit(circuit) => match circuit {
CircuitTypeConcrete::AddMod(_) => Value::Unit,
CircuitTypeConcrete::MulMod(_) => Value::Unit,
CircuitTypeConcrete::AddModGate(_) => Value::Unit,
CircuitTypeConcrete::Circuit(_) => Value::Unit,
CircuitTypeConcrete::CircuitData(info) => {
let Some(GenericArg::Type(circuit_type_id)) =
info.info.long_id.generic_args.first()
else {
panic!("generic arg should be a type");
};
let CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(circuit)) =
registry.get_type(circuit_type_id).unwrap()
else {
panic!("generic arg should be a Circuit");
};
let u384_layout = Layout::from_size_align(48, 16).unwrap();
let n_inputs = circuit.circuit_info.n_inputs;
let mut values = Vec::with_capacity(n_inputs);
let value_ptr = value_ptr.cast::<[u8; 48]>();
for i in 0..n_inputs {
let size = u384_layout.pad_to_align().size();
let current_ptr = value_ptr.byte_add(size * i);
let current_value = current_ptr.as_ref();
values.push(BigUint::from_bytes_le(current_value));
}
Value::Circuit(values)
}
CircuitTypeConcrete::CircuitOutputs(info) => {
let Some(GenericArg::Type(circuit_type_id)) =
info.info.long_id.generic_args.first()
else {
panic!("generic arg should be a type");
};
let CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(circuit)) =
registry.get_type(circuit_type_id).unwrap()
else {
panic!("generic arg should be a Circuit");
};
let u96_layout = get_integer_layout(96);
let n_outputs = circuit.circuit_info.values.len();
let mut values = Vec::with_capacity(n_outputs);
let (u384_struct_layout, _) = layout_repeat(&u96_layout, 4).unwrap();
let (gates_array_layout, gate_stride) =
layout_repeat(&u384_struct_layout, n_outputs).unwrap();
let (_, modulus_offset) =
gates_array_layout.extend(u384_struct_layout).unwrap();
let value_ptr = value_ptr.cast::<[u8; 12]>();
// get gate values
for i in 0..n_outputs {
let gate_ptr = value_ptr.byte_add(gate_stride * i);
values.push(u384_struct_to_bigint(gate_ptr, 4));
}
// get modulus value
let modulus_ptr = value_ptr.byte_add(modulus_offset);
let modulus = u384_struct_to_bigint(modulus_ptr, 4);
Value::CircuitOutputs {
circuits: values,
modulus,
}
}
CircuitTypeConcrete::CircuitPartialOutputs(_) => {
todo!("CircuitTypeConcrete::CircuitPartialOutputs")
}
CircuitTypeConcrete::CircuitDescriptor(_) => Value::Unit,
CircuitTypeConcrete::CircuitFailureGuarantee(_) => {
todo!("CircuitTypeConcrete::CircuitFailureGuarantee")
}
CircuitTypeConcrete::CircuitInput(_) => {
todo!("CircuitTypeConcrete::CircuitInput")
}
CircuitTypeConcrete::CircuitInputAccumulator(info) => {
let Some(GenericArg::Type(circuit_type_id)) =
info.info.long_id.generic_args.first()
else {
panic!("generic arg should be a type");
};
let CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(_)) =
registry.get_type(circuit_type_id).unwrap()
else {
panic!("generic arg should be a Circuit");
};
let u64_layout = Layout::new::<u64>();
let u384_layout = Layout::from_size_align(48, 16).unwrap();
let length = unsafe { *value_ptr.cast::<u64>().as_ptr() };
let (_, input_start_offset) = u64_layout.extend(u384_layout).unwrap();
let start_ptr = value_ptr.byte_add(input_start_offset).cast::<[u8; 48]>();
let mut values = Vec::with_capacity(length as usize);
for i in 0..length {
let size = u384_layout.pad_to_align().size();
let current_ptr = start_ptr.byte_add(size * i as usize);
let current_value = current_ptr.as_ref();
values.push(BigUint::from_bytes_le(current_value));
}
Value::Circuit(values)
}
CircuitTypeConcrete::CircuitModulus(_) => {
let value_ptr = value_ptr.cast::<[u8; 48]>();
let value = unsafe { value_ptr.as_ref() };
Value::CircuitModulus(BigUint::from_bytes_le(value))
}
CircuitTypeConcrete::InverseGate(_) => Value::Unit,
CircuitTypeConcrete::MulModGate(_) => Value::Unit,
CircuitTypeConcrete::SubModGate(_) => Value::Unit,
CircuitTypeConcrete::U96Guarantee(_) => {
let value_ptr = value_ptr.cast::<[u8; 12]>();
let value = unsafe { value_ptr.as_ref() };
let mut array_value = [0u8; 16];
array_value[..12].clone_from_slice(value);
Value::U128(u128::from_le_bytes(array_value))
}
CircuitTypeConcrete::U96LimbsLessThanGuarantee(info) => {
let value_ptr = value_ptr.cast::<[u8; 12]>();
let u96_layout = get_integer_layout(96);
let (u384_struct_layout, struct_stride) =
layout_repeat(&u96_layout, info.limb_count).unwrap();
let output_limbs = (0..info.limb_count)
.map(|i| {
let current_ptr = value_ptr.byte_add(struct_stride * i);
Value::BoundedInt {
range: 0.into()..BigInt::one() << 96,
value: BigInt::from_bytes_le(Sign::Plus, current_ptr.as_ref()),
}
})
.collect::<Vec<_>>();
let modulus_ptr = value_ptr.byte_add(u384_struct_layout.size());
let modulus_limbs = (0..info.limb_count)
.map(|i| {
let current_ptr = modulus_ptr.byte_add(struct_stride * i);
Value::BoundedInt {
range: 0.into()..BigInt::one() << 96,
value: BigInt::from_bytes_le(Sign::Plus, current_ptr.as_ref()),
}
})
.collect::<Vec<_>>();
Value::Struct(vec![
Value::Struct(output_limbs),
Value::Struct(modulus_limbs),
])
}
},
CoreTypeConcrete::Const(_) => todo!("CoreTypeConcrete::Const"),
CoreTypeConcrete::Sint8(_) => Value::I8(value_ptr.cast().read()),
CoreTypeConcrete::Sint16(_) => todo!("CoreTypeConcrete::Sint16"),
CoreTypeConcrete::Sint32(_) => Value::I32(value_ptr.cast().read()),
CoreTypeConcrete::Sint64(_) => todo!("CoreTypeConcrete::Sint64"),
CoreTypeConcrete::Sint128(_) => Value::I128(value_ptr.cast().read()),
CoreTypeConcrete::Nullable(info) => {
let inner_ptr = value_ptr.cast::<*mut ()>().read();
match NonNull::new(inner_ptr) {
Some(inner_ptr) => value_from_ptr(registry, &info.ty, inner_ptr),
None => Value::Uninitialized {
ty: info.ty.clone(),
},
}
}
CoreTypeConcrete::SquashedFelt252Dict(info) | CoreTypeConcrete::Felt252Dict(info) => {
let value = value_ptr.cast::<&FeltDict>().read();
let data = value
.mappings
.iter()
.map(|(k, &i)| {
let p = value
.elements
.byte_offset((value.layout.size() * i) as isize);
let v = match NonNull::new(p) {
Some(value_ptr) => value_from_ptr(registry, &info.ty, value_ptr.cast()),
None => Value::Uninitialized {
ty: info.ty.clone(),
},
};
let k = Felt::from_bytes_le(k);
(k, v)
})
.collect::<HashMap<Felt, Value>>();
Value::FeltDict {
ty: info.ty.clone(),
count: value.count,
data,
}
}
CoreTypeConcrete::Felt252DictEntry(info) => {
let value = value_ptr.cast::<FeltDictEntry>().read();
let data = value
.dict
.mappings
.iter()
.map(|(k, &i)| {
let p = value
.dict
.elements
.byte_offset((value.dict.layout.size() * i) as isize);
let v = match NonNull::new(p) {
Some(value_ptr) => value_from_ptr(registry, &info.ty, value_ptr.cast()),
None => Value::Uninitialized {
ty: info.ty.clone(),
},
};
let k = Felt::from_bytes_le(k);
(k, v)
})
.collect::<HashMap<Felt, Value>>();
let key = Felt::from_bytes_le(value.key);
Value::FeltDictEntry {
ty: info.ty.clone(),
data,
count: value.dict.count,
key,
}
}
CoreTypeConcrete::Span(_) => todo!("CoreTypeConcrete::Span"),
CoreTypeConcrete::Starknet(selector) => match selector {
StarknetTypeConcrete::Secp256Point(selector) => match selector {
Secp256PointTypeConcrete::K1(_) => {
let point: Secp256Point = value_ptr.cast().read();
let emu_point = EmuSecp256k1Point {
x: EmuU256 {
lo: point.x.lo,
hi: point.x.hi,
},
y: EmuU256 {
lo: point.y.lo,
hi: point.y.hi,
},
};
emu_point.into_value()
}
Secp256PointTypeConcrete::R1(_) => {
let point: Secp256Point = value_ptr.cast().read();
let emu_point = EmuSecp256r1Point {
x: EmuU256 {
lo: point.x.lo,
hi: point.x.hi,
},
y: EmuU256 {
lo: point.y.lo,
hi: point.y.hi,
},
};
emu_point.into_value()
}
},
StarknetTypeConcrete::Sha256StateHandle(_) => {
let raw_data = value_ptr.cast::<NonNull<[u32; 8]>>().read().read();
let data = raw_data.into_iter().map(Value::U32).collect_vec();
Value::Struct(data)
}
_ => unreachable!(),
},
CoreTypeConcrete::Bytes31(_) => {
let original_data: [u8; 31] = value_ptr.cast().read();
let mut data = [0u8; 32];
for (i, v) in original_data.into_iter().enumerate() {
data[i] = v
}
Value::Bytes31(Felt::from_bytes_le(&data))
}
CoreTypeConcrete::IntRange(info) => {
let type_info = registry.get_type(&info.ty).unwrap();
match type_info {
CoreTypeConcrete::Sint8(_) => {
let value = value_ptr.cast::<IntRange<i8>>().read();
Value::IntRange {
x: Box::new(value.x.into()),
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/debug_utils.rs | src/metadata/debug_utils.rs | //! # Debug utilities
//!
//! A collection of utilities to debug values in MLIR in execution.
//!
//! ## Example
//!
//! ```rust,ignore
//! # use cairo_lang_sierra::{
//! # extensions::{
//! # core::{CoreLibfunc, CoreType},
//! # lib_func::SignatureAndTypeConcreteLibfunc,
//! # GenericType,
//! # GenericLibfunc,
//! # },
//! # program_registry::ProgramRegistry,
//! # };
//! # use cairo_native::{
//! # error::{
//! # Error, Result,
//! # },
//! # libfuncs::{LibfuncBuilder, LibfuncHelper},
//! # metadata::{debug_utils::DebugUtils, MetadataStorage},
//! # types::TypeBuilder,
//! # utils::ProgramRegistryExt,
//! # };
//! # use melior::{
//! # dialect::llvm,
//! # ir::{
//! # attribute::DenseI64ArrayAttribute,
//! # r#type::IntegerType,
//! # Block,
//! # Location,
//! # },
//! # Context,
//! # };
//!
//! pub fn build_array_len<'ctx, 'this>(
//! context: &'ctx Context,
//! registry: &ProgramRegistry<CoreType, CoreLibfunc>,
//! entry: &'this Block<'ctx>,
//! location: Location<'ctx>,
//! helper: &LibfuncHelper<'ctx, 'this>,
//! metadata: &mut MetadataStorage,
//! info: &SignatureAndTypeConcreteLibfunc,
//! ) -> Result<()>
//! {
//! let array_val = entry.arg(0)?;
//! let elem_ty = registry.build_type(context, helper, registry, metadata, &info.ty)?;
//!
//! #[cfg(feature = "with-debug-utils")]
//! {
//! let array_ptr = entry
//! .append_operation(llvm::extract_value(
//! context,
//! array_val,
//! DenseI64ArrayAttribute::new(context, &[0]),
//! elem_ty,
//! location,
//! ))
//! .result(0)?
//! .into();
//!
//! metadata.get_mut::<DebugUtils>()
//! .ok_or(Error::MissingMetadata)?
//! .print_pointer(context, helper, entry, array_ptr, location)?;
//! }
//!
//! let array_len = entry
//! .append_operation(llvm::extract_value(
//! context,
//! array_val,
//! DenseI64ArrayAttribute::new(context, &[1]),
//! IntegerType::new(context, 32).into(),
//! location,
//! ))
//! .result(0)?
//! .into();
//!
//! entry.append_operation(helper.br(0, &[array_len], location));
//! Ok(())
//! }
//! ```
#![cfg(feature = "with-debug-utils")]
use crate::{
error::{Error, Result},
utils::get_integer_layout,
};
use melior::{
dialect::{
arith,
llvm::{self},
ods,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, IntegerAttribute, StringAttribute, TypeAttribute},
operation::OperationBuilder,
r#type::IntegerType,
Attribute, Block, BlockLike, Location, Module, Region, Value,
},
Context,
};
use num_bigint::BigUint;
use std::{collections::HashSet, ffi::c_void};
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)]
enum DebugBinding {
BreakpointMarker,
PrintStr,
PrintI1,
PrintI8,
PrintI32,
PrintI64,
PrintI128,
PrintPointer,
PrintFelt252,
DumpMemRegion,
}
impl DebugBinding {
const fn symbol(self) -> &'static str {
match self {
DebugBinding::BreakpointMarker => "cairo_native__debug__breakpoint_marker_impl",
DebugBinding::PrintStr => "cairo_native__debug__print_str_impl",
DebugBinding::PrintI1 => "cairo_native__debug__print_i1_impl",
DebugBinding::PrintI8 => "cairo_native__debug__print_i8_impl",
DebugBinding::PrintI32 => "cairo_native__debug__print_i32_impl",
DebugBinding::PrintI64 => "cairo_native__debug__print_i64_impl",
DebugBinding::PrintI128 => "cairo_native__debug__print_i128_impl",
DebugBinding::PrintPointer => "cairo_native__debug__print_pointer_impl",
DebugBinding::PrintFelt252 => "cairo_native__debug__print_felt252_impl",
DebugBinding::DumpMemRegion => "cairo_native__debug__dump_mem_region_impl",
}
}
const fn function_ptr(self) -> *const () {
match self {
DebugBinding::BreakpointMarker => breakpoint_marker_impl as *const (),
DebugBinding::PrintStr => print_str_impl as *const (),
DebugBinding::PrintI1 => print_i1_impl as *const (),
DebugBinding::PrintI8 => print_i8_impl as *const (),
DebugBinding::PrintI32 => print_i32_impl as *const (),
DebugBinding::PrintI64 => print_i64_impl as *const (),
DebugBinding::PrintI128 => print_i128_impl as *const (),
DebugBinding::PrintPointer => print_pointer_impl as *const (),
DebugBinding::PrintFelt252 => print_felt252_impl as *const (),
DebugBinding::DumpMemRegion => dump_mem_region_impl as *const (),
}
}
}
#[derive(Debug, Default)]
pub struct DebugUtils {
active_map: HashSet<DebugBinding>,
}
impl DebugUtils {
/// Register the global for the given binding, if not yet registered, and return
/// a pointer to the stored function.
///
/// For the function to be available, `setup_runtime` must be called before running the module
fn build_function<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
binding: DebugBinding,
) -> Result<Value<'c, 'a>> {
if self.active_map.insert(binding) {
module.body().append_operation(
ods::llvm::mlir_global(
context,
Region::new(),
TypeAttribute::new(llvm::r#type::pointer(context, 0)),
StringAttribute::new(context, binding.symbol()),
Attribute::parse(context, "#llvm.linkage<weak>")
.ok_or(Error::ParseAttributeError)?,
location,
)
.into(),
);
}
let global_address = block.append_op_result(
ods::llvm::mlir_addressof(
context,
llvm::r#type::pointer(context, 0),
FlatSymbolRefAttribute::new(context, binding.symbol()),
location,
)
.into(),
)?;
Ok(block.load(
context,
location,
global_address,
llvm::r#type::pointer(context, 0),
)?)
}
pub fn breakpoint_marker(
&mut self,
context: &Context,
module: &Module,
block: &Block,
location: Location,
) -> Result<()> {
let function = self.build_function(
context,
module,
block,
location,
DebugBinding::BreakpointMarker,
)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.build()?,
);
Ok(())
}
pub fn debug_breakpoint_trap(&self, block: &Block, location: Location) -> Result<()> {
block.append_operation(OperationBuilder::new("llvm.intr.debugtrap", location).build()?);
Ok(())
}
/// Prints the given &str.
pub fn debug_print(
&mut self,
context: &Context,
module: &Module,
block: &Block,
message: &str,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintStr)?;
let ty = llvm::r#type::array(
IntegerType::new(context, 8).into(),
message
.len()
.try_into()
.map_err(|_| Error::IntegerConversion)?,
);
let ptr = block.alloca1(context, location, ty, get_integer_layout(8).align())?;
let msg = block
.append_operation(
ods::llvm::mlir_constant(
context,
llvm::r#type::array(
IntegerType::new(context, 8).into(),
message
.len()
.try_into()
.map_err(|_| Error::IntegerConversion)?,
),
StringAttribute::new(context, message).into(),
location,
)
.into(),
)
.result(0)?
.into();
block.append_operation(ods::llvm::store(context, msg, ptr, location).into());
let len = block
.append_operation(arith::constant(
context,
IntegerAttribute::new(
IntegerType::new(context, 64).into(),
message
.len()
.try_into()
.map_err(|_| Error::IntegerConversion)?,
)
.into(),
location,
))
.result(0)?
.into();
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[ptr, len])
.build()?,
);
Ok(())
}
pub fn print_pointer(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintPointer)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[value])
.build()?,
);
Ok(())
}
pub fn print_i1(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintI1)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[value])
.build()?,
);
Ok(())
}
pub fn print_felt252(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintFelt252)?;
let k64 = block
.append_operation(arith::constant(
context,
IntegerAttribute::new(IntegerType::new(context, 252).into(), 64).into(),
location,
))
.result(0)?
.into();
let l0 = block
.append_operation(arith::trunci(
value,
IntegerType::new(context, 64).into(),
location,
))
.result(0)?
.into();
let value = block
.append_operation(arith::shrui(value, k64, location))
.result(0)?
.into();
let l1 = block
.append_operation(arith::trunci(
value,
IntegerType::new(context, 64).into(),
location,
))
.result(0)?
.into();
let value = block
.append_operation(arith::shrui(value, k64, location))
.result(0)?
.into();
let l2 = block
.append_operation(arith::trunci(
value,
IntegerType::new(context, 64).into(),
location,
))
.result(0)?
.into();
let value = block
.append_operation(arith::shrui(value, k64, location))
.result(0)?
.into();
let l3 = block
.append_operation(arith::trunci(
value,
IntegerType::new(context, 64).into(),
location,
))
.result(0)?
.into();
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[l0, l1, l2, l3])
.build()?,
);
Ok(())
}
pub fn print_i8(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintI8)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[value])
.build()?,
);
Ok(())
}
pub fn print_i32(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintI32)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[value])
.build()?,
);
Ok(())
}
pub fn print_i64(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintI64)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[value])
.build()?,
);
Ok(())
}
pub fn print_i128(
&mut self,
context: &Context,
module: &Module,
block: &Block,
value: Value,
location: Location,
) -> Result<()> {
let function =
self.build_function(context, module, block, location, DebugBinding::PrintI128)?;
let i64_ty = IntegerType::new(context, 64).into();
let k64 = block
.append_operation(arith::constant(
context,
IntegerAttribute::new(IntegerType::new(context, 128).into(), 64).into(),
location,
))
.result(0)?
.into();
let value_lo = block
.append_operation(arith::trunci(value, i64_ty, location))
.result(0)?
.into();
let value_hi = block
.append_operation(arith::shrui(value, k64, location))
.result(0)?
.into();
let value_hi = block
.append_operation(arith::trunci(value_hi, i64_ty, location))
.result(0)?
.into();
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[value_lo, value_hi])
.build()?,
);
Ok(())
}
/// Dump a memory region at runtime.
///
/// Requires the pointer (at runtime) and its length in bytes (at compile-time).
pub fn dump_mem(
&mut self,
context: &Context,
module: &Module,
block: &Block,
ptr: Value,
len: usize,
location: Location,
) -> Result<()> {
let function = self.build_function(
context,
module,
block,
location,
DebugBinding::DumpMemRegion,
)?;
let len = block.const_int(context, location, len, 64)?;
block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[ptr, len])
.build()?,
);
Ok(())
}
}
pub fn setup_runtime(find_symbol_ptr: impl Fn(&str) -> Option<*mut c_void>) {
for binding in [
DebugBinding::BreakpointMarker,
DebugBinding::PrintStr,
DebugBinding::PrintI1,
DebugBinding::PrintI8,
DebugBinding::PrintI32,
DebugBinding::PrintI64,
DebugBinding::PrintI128,
DebugBinding::PrintPointer,
DebugBinding::PrintFelt252,
DebugBinding::DumpMemRegion,
] {
if let Some(global) = find_symbol_ptr(binding.symbol()) {
let global = global.cast::<*const ()>();
unsafe { *global = binding.function_ptr() };
}
}
}
extern "C" fn breakpoint_marker_impl() {
println!("[DEBUG] Breakpoint marker.");
}
extern "C" fn print_str_impl(message: *const std::ffi::c_char, len: u64) {
// llvm constant strings are not zero terminated
let slice = unsafe { std::slice::from_raw_parts(message as *const u8, len as usize) };
let message = std::str::from_utf8(slice);
if let Ok(message) = message {
println!("[DEBUG] {}", message);
} else {
println!("[DEBUG] {:?}", message);
}
}
extern "C" fn print_i1_impl(value: bool) {
println!("[DEBUG] {value}");
}
extern "C" fn print_i8_impl(value: u8) {
println!("[DEBUG] {value}");
}
extern "C" fn print_i32_impl(value: u32) {
println!("[DEBUG] {value}");
}
extern "C" fn print_i64_impl(value: u64) {
println!("[DEBUG] {value}");
}
extern "C" fn print_i128_impl(value_lo: u64, value_hi: u64) {
let value = ((value_hi as u128) << 64) | value_lo as u128;
println!("[DEBUG] {value}");
}
extern "C" fn print_pointer_impl(value: *const ()) {
println!("[DEBUG] {value:018x?}");
}
unsafe extern "C" fn dump_mem_region_impl(ptr: *const (), len: u64) {
println!("[DEBUG] Memory dump at {ptr:?}:");
for chunk in (0..len).step_by(8) {
print!(" {:?}:", ptr.byte_add(chunk as usize));
for offset in chunk..chunk + 8 {
print!(" {:02x}", ptr.byte_add(offset as usize).cast::<u8>().read());
}
println!();
}
}
extern "C" fn print_felt252_impl(l0: u64, l1: u64, l2: u64, l3: u64) {
println!(
"[DEBUG] {}",
BigUint::from_bytes_le(
&l0.to_le_bytes()
.into_iter()
.chain(l1.to_le_bytes())
.chain(l2.to_le_bytes())
.chain(l3.to_le_bytes())
.collect::<Vec<_>>(),
),
);
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/auto_breakpoint.rs | src/metadata/auto_breakpoint.rs | #![cfg(feature = "with-debug-utils")]
use super::{debug_utils::DebugUtils, MetadataStorage};
use crate::error::Error;
use cairo_lang_sierra::ids::ConcreteTypeId;
use melior::ir::{Block, Location};
use std::collections::HashSet;
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum BreakpointEvent {
EnumInit {
type_id: ConcreteTypeId,
variant_idx: usize,
},
}
#[derive(Clone, Debug, Default)]
pub struct AutoBreakpoint {
events: HashSet<BreakpointEvent>,
}
impl AutoBreakpoint {
pub fn add_event(&mut self, event: BreakpointEvent) {
self.events.insert(event);
}
pub fn has_event(&self, event: &BreakpointEvent) -> bool {
self.events.contains(event)
}
pub fn maybe_breakpoint(
&self,
block: &Block,
location: Location,
metadata: &MetadataStorage,
event: &BreakpointEvent,
) -> Result<(), Error> {
if self.has_event(event) {
metadata
.get::<DebugUtils>()
.ok_or(Error::MissingMetadata)?
.debug_breakpoint_trap(block, location)?;
}
Ok(())
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/runtime_bindings.rs | src/metadata/runtime_bindings.rs | //! # Runtime library bindings
//!
//! This metadata ensures that the bindings to the runtime functions exist in the current
//! compilation context.
use crate::{
error::{Error, Result},
libfuncs::LibfuncHelper,
};
use itertools::Itertools;
use melior::{
dialect::{
arith::{self, CmpiPredicate},
cf, llvm, ods,
},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
operation::OperationBuilder,
r#type::IntegerType,
Attribute, Block, BlockLike, Identifier, Location, Module, OperationRef, Region, Type,
Value,
},
Context,
};
use std::{
alloc::Layout,
collections::HashSet,
ffi::{c_int, c_void},
};
#[derive(Clone, Copy, Debug, Hash, Eq, PartialEq)]
enum RuntimeBinding {
Pedersen,
HadesPermutation,
EcStateTryFinalizeNz,
EcStateAddMul,
EcStateInit,
EcStateAdd,
EcPointTryNewNz,
EcPointFromXNz,
DictNew,
DictGet,
DictSquash,
DictDrop,
DictDup,
GetCostsBuiltin,
DebugPrint,
ExtendedEuclideanAlgorithm,
CircuitArithOperation,
#[cfg(feature = "with-cheatcode")]
VtableCheatcode,
}
impl RuntimeBinding {
const fn symbol(self) -> &'static str {
match self {
RuntimeBinding::DebugPrint => "cairo_native__libfunc__debug__print",
RuntimeBinding::Pedersen => "cairo_native__libfunc__pedersen",
RuntimeBinding::HadesPermutation => "cairo_native__libfunc__hades_permutation",
RuntimeBinding::EcStateTryFinalizeNz => {
"cairo_native__libfunc__ec__ec_state_try_finalize_nz"
}
RuntimeBinding::EcStateAddMul => "cairo_native__libfunc__ec__ec_state_add_mul",
RuntimeBinding::EcStateInit => "cairo_native__libfunc__ec__ec_state_init",
RuntimeBinding::EcStateAdd => "cairo_native__libfunc__ec__ec_state_add",
RuntimeBinding::EcPointTryNewNz => "cairo_native__libfunc__ec__ec_point_try_new_nz",
RuntimeBinding::EcPointFromXNz => "cairo_native__libfunc__ec__ec_point_from_x_nz",
RuntimeBinding::DictNew => "cairo_native__dict_new",
RuntimeBinding::DictGet => "cairo_native__dict_get",
RuntimeBinding::DictSquash => "cairo_native__dict_squash",
RuntimeBinding::DictDrop => "cairo_native__dict_drop",
RuntimeBinding::DictDup => "cairo_native__dict_dup",
RuntimeBinding::GetCostsBuiltin => "cairo_native__get_costs_builtin",
RuntimeBinding::ExtendedEuclideanAlgorithm => {
"cairo_native__extended_euclidean_algorithm"
}
RuntimeBinding::CircuitArithOperation => "cairo_native__circuit_arith_operation",
#[cfg(feature = "with-cheatcode")]
RuntimeBinding::VtableCheatcode => "cairo_native__vtable_cheatcode",
}
}
/// Returns an `Option` with a function pointer depending on how the binding is implemented.
///
/// - For external bindings (implemented in Rust), it returns `Some`, containing
/// a pointer to the corresponding Rust function
/// - For internal bindings (implemented in MLIR), it returns `None`, since those
/// functions are defined within MLIR and invoked by name
const fn function_ptr(self) -> Option<*const ()> {
let function_ptr = match self {
RuntimeBinding::DebugPrint => {
crate::runtime::cairo_native__libfunc__debug__print as *const ()
}
RuntimeBinding::Pedersen => {
crate::runtime::cairo_native__libfunc__pedersen as *const ()
}
RuntimeBinding::HadesPermutation => {
crate::runtime::cairo_native__libfunc__hades_permutation as *const ()
}
RuntimeBinding::EcStateTryFinalizeNz => {
crate::runtime::cairo_native__libfunc__ec__ec_state_try_finalize_nz as *const ()
}
RuntimeBinding::EcStateAddMul => {
crate::runtime::cairo_native__libfunc__ec__ec_state_add_mul as *const ()
}
RuntimeBinding::EcStateInit => {
crate::runtime::cairo_native__libfunc__ec__ec_state_init as *const ()
}
RuntimeBinding::EcStateAdd => {
crate::runtime::cairo_native__libfunc__ec__ec_state_add as *const ()
}
RuntimeBinding::EcPointTryNewNz => {
crate::runtime::cairo_native__libfunc__ec__ec_point_try_new_nz as *const ()
}
RuntimeBinding::EcPointFromXNz => {
crate::runtime::cairo_native__libfunc__ec__ec_point_from_x_nz as *const ()
}
RuntimeBinding::DictNew => crate::runtime::cairo_native__dict_new as *const (),
RuntimeBinding::DictGet => crate::runtime::cairo_native__dict_get as *const (),
RuntimeBinding::DictSquash => crate::runtime::cairo_native__dict_squash as *const (),
RuntimeBinding::DictDrop => crate::runtime::cairo_native__dict_drop as *const (),
RuntimeBinding::DictDup => crate::runtime::cairo_native__dict_dup as *const (),
RuntimeBinding::GetCostsBuiltin => {
crate::runtime::cairo_native__get_costs_builtin as *const ()
}
RuntimeBinding::ExtendedEuclideanAlgorithm => return None,
RuntimeBinding::CircuitArithOperation => return None,
#[cfg(feature = "with-cheatcode")]
RuntimeBinding::VtableCheatcode => {
crate::starknet::cairo_native__vtable_cheatcode as *const ()
}
};
Some(function_ptr)
}
}
// This enum is used when performing circuit arithmetic operations.
// Inversion is not included because it is handled separately.
#[repr(u8)]
#[derive(Clone, Copy)]
pub enum CircuitArithOperationType {
Add,
Sub,
Mul,
}
/// Runtime library bindings metadata.
#[derive(Debug, Default)]
pub struct RuntimeBindingsMeta {
active_map: HashSet<RuntimeBinding>,
}
impl RuntimeBindingsMeta {
/// Register the global for the given binding, if not yet registered, and return
/// a pointer to the stored function.
///
/// For the function to be available, `setup_runtime` must be called before running the module
fn build_function<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
binding: RuntimeBinding,
) -> Result<Value<'c, 'a>> {
if self.active_map.insert(binding) {
module.body().append_operation(
ods::llvm::mlir_global(
context,
Region::new(),
TypeAttribute::new(llvm::r#type::pointer(context, 0)),
StringAttribute::new(context, binding.symbol()),
Attribute::parse(context, "#llvm.linkage<weak>")
.ok_or(Error::ParseAttributeError)?,
location,
)
.into(),
);
}
let global_address = block.append_op_result(
ods::llvm::mlir_addressof(
context,
llvm::r#type::pointer(context, 0),
FlatSymbolRefAttribute::new(context, binding.symbol()),
location,
)
.into(),
)?;
Ok(block.load(
context,
location,
global_address,
llvm::r#type::pointer(context, 0),
)?)
}
/// Build if necessary the extended euclidean algorithm used in circuit inverse gates.
///
/// After checking, calls the MLIR function with arguments `a` and `b` which are the initial remainders
/// used in the algorithm and returns a `Value` containing a struct where the first element is the
/// greatest common divisor of `a` and `b` and the second element is the bezout coefficient x.
pub fn extended_euclidean_algorithm<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
a: Value<'c, '_>,
b: Value<'c, '_>,
) -> Result<Value<'c, 'a>>
where
'c: 'a,
{
let func_symbol = RuntimeBinding::ExtendedEuclideanAlgorithm.symbol();
if self
.active_map
.insert(RuntimeBinding::ExtendedEuclideanAlgorithm)
{
build_egcd_function(module, context, location, func_symbol)?;
}
let integer_type: Type = IntegerType::new(context, 384).into();
// The struct returned by the function that contains both of the results
let return_type = llvm::r#type::r#struct(context, &[integer_type, integer_type], false);
Ok(block
.append_operation(
OperationBuilder::new("llvm.call", location)
.add_attributes(&[(
Identifier::new(context, "callee"),
FlatSymbolRefAttribute::new(context, func_symbol).into(),
)])
.add_operands(&[a, b])
.add_results(&[return_type])
.build()?,
)
.result(0)?
.into())
}
/// Builds, if necessary, the circuit operation function, used to perform
/// circuit arithmetic operations.
///
/// ## Operands
/// - `op`: an enum telling which arithmetic operation to perform.
/// - `lhs_value`: u384 operand.
/// - `rhs_value`: u384 operand.
/// - `circuit_modulus`: u384 circuit modulus.
///
/// This function only handles addition, substraction and multiplication
/// operations. The inversion operation was excluded as it is already handled
/// by the [`extended_euclidean_algorithm`]
#[allow(clippy::too_many_arguments)]
pub fn circuit_arith_operation<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
op_type: CircuitArithOperationType,
lhs_value: Value<'c, '_>,
rhs_value: Value<'c, '_>,
circuit_modulus: Value<'c, '_>,
) -> Result<Value<'c, 'a>>
where
'c: 'a,
{
let func_symbol = RuntimeBinding::CircuitArithOperation.symbol();
if self
.active_map
.insert(RuntimeBinding::CircuitArithOperation)
{
build_circuit_arith_operation(context, module, location, func_symbol)?;
}
let op_tag = block.const_int(context, location, op_type as u8, 2)?;
let return_type = IntegerType::new(context, 384).into();
Ok(block.append_op_result(
OperationBuilder::new("llvm.call", location)
.add_attributes(&[(
Identifier::new(context, "callee"),
FlatSymbolRefAttribute::new(context, func_symbol).into(),
)])
.add_operands(&[op_tag, lhs_value, rhs_value, circuit_modulus])
.add_results(&[return_type])
.build()?,
)?)
}
/// Register if necessary, then invoke the `debug::print()` function.
#[allow(clippy::too_many_arguments)]
pub fn libfunc_debug_print<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
target_fd: Value<'c, '_>,
values_ptr: Value<'c, '_>,
values_len: Value<'c, '_>,
location: Location<'c>,
) -> Result<Value<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::DebugPrint)?;
Ok(block
.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[target_fd, values_ptr, values_len])
.add_results(&[IntegerType::new(context, 32).into()])
.build()?,
)
.result(0)?
.into())
}
/// Register if necessary, then invoke the `pedersen()` function.
#[allow(clippy::too_many_arguments)]
pub fn libfunc_pedersen<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
dst_ptr: Value<'c, '_>,
lhs_ptr: Value<'c, '_>,
rhs_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::Pedersen)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[dst_ptr, lhs_ptr, rhs_ptr])
.build()?,
))
}
/// Register if necessary, then invoke the `poseidon()` function.
/// The passed pointers serve both as in/out pointers. I.E results are stored in the given pointers.
#[allow(clippy::too_many_arguments)]
pub fn libfunc_hades_permutation<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
op0_ptr: Value<'c, '_>,
op1_ptr: Value<'c, '_>,
op2_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::HadesPermutation,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[op0_ptr, op1_ptr, op2_ptr])
.build()?,
))
}
/// Register if necessary, then invoke the `ec_point_from_x_nz()` function.
pub fn libfunc_ec_point_from_x_nz<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
point_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::EcPointFromXNz,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[point_ptr])
.add_results(&[IntegerType::new(context, 1).into()])
.build()?,
))
}
/// Register if necessary, then invoke the `ec_point_try_new_nz()` function.
pub fn libfunc_ec_point_try_new_nz<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
point_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::EcPointTryNewNz,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[point_ptr])
.add_results(&[IntegerType::new(context, 1).into()])
.build()?,
))
}
/// Register if necessary, then invoke the `ec_state_init()` function.
pub fn libfunc_ec_state_init<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
state_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::EcStateInit,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[state_ptr])
.build()?,
))
}
/// Register if necessary, then invoke the `ec_state_add()` function.
pub fn libfunc_ec_state_add<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
state_ptr: Value<'c, '_>,
point_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::EcStateAdd)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[state_ptr, point_ptr])
.build()?,
))
}
/// Register if necessary, then invoke the `ec_state_add_mul()` function.
#[allow(clippy::too_many_arguments)]
pub fn libfunc_ec_state_add_mul<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
state_ptr: Value<'c, '_>,
scalar_ptr: Value<'c, '_>,
point_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::EcStateAddMul,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[state_ptr, scalar_ptr, point_ptr])
.build()?,
))
}
pub fn libfunc_ec_state_try_finalize_nz<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
point_ptr: Value<'c, '_>,
state_ptr: Value<'c, '_>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::EcStateTryFinalizeNz,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[point_ptr, state_ptr])
.add_results(&[IntegerType::new(context, 1).into()])
.build()?,
))
}
/// Register if necessary, then invoke the `dict_alloc_new()` function.
///
/// Returns a opaque pointer as the result.
#[allow(clippy::too_many_arguments)]
pub fn dict_new<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
drop_fn: Option<Value<'c, 'a>>,
layout: Layout,
) -> Result<Value<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::DictNew)?;
let i64_ty = IntegerType::new(context, 64).into();
let size = block.const_int_from_type(context, location, layout.size(), i64_ty)?;
let align = block.const_int_from_type(context, location, layout.align(), i64_ty)?;
let drop_fn = match drop_fn {
Some(x) => x,
None => {
block.append_op_result(llvm::zero(llvm::r#type::pointer(context, 0), location))?
}
};
Ok(block.append_op_result(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[size, align, drop_fn])
.add_results(&[llvm::r#type::pointer(context, 0)])
.build()?,
)?)
}
/// Register if necessary, then invoke the `dict_alloc_new()` function.
///
/// Returns a opaque pointer as the result.
#[allow(clippy::too_many_arguments)]
pub fn dict_drop<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
ptr: Value<'c, 'a>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::DictDrop)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[ptr])
.build()?,
))
}
/// Register if necessary, then invoke the `dict_alloc_new()` function.
///
/// Returns a opaque pointer as the result.
#[allow(clippy::too_many_arguments)]
pub fn dict_dup<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
ptr: Value<'c, 'a>,
location: Location<'c>,
) -> Result<Value<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::DictDup)?;
Ok(block.append_op_result(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[ptr])
.add_results(&[llvm::r#type::pointer(context, 0)])
.build()?,
)?)
}
/// Register if necessary, then invoke the `dict_get()` function.
///
/// Gets the value for a given key, the returned pointer is null if not found.
///
/// Returns a opaque pointer as the result.
#[allow(clippy::too_many_arguments)]
pub fn dict_get<'c, 'a>(
&mut self,
context: &'c Context,
helper: &LibfuncHelper<'c, 'a>,
block: &'a Block<'c>,
dict_ptr: Value<'c, 'a>, // ptr to the dict
key_ptr: Value<'c, 'a>, // key must be a ptr to Felt
location: Location<'c>,
) -> Result<(Value<'c, 'a>, Value<'c, 'a>)>
where
'c: 'a,
{
let function =
self.build_function(context, helper, block, location, RuntimeBinding::DictGet)?;
let value_ptr = helper.init_block().alloca1(
context,
location,
llvm::r#type::pointer(context, 0),
align_of::<*mut ()>(),
)?;
let is_present = block.append_op_result(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[dict_ptr, key_ptr, value_ptr])
.add_results(&[IntegerType::new(context, c_int::BITS).into()])
.build()?,
)?;
let value_ptr = block.load(
context,
location,
value_ptr,
llvm::r#type::pointer(context, 0),
)?;
Ok((is_present, value_ptr))
}
/// Register if necessary, then invoke the `dict_gas_refund()` function.
///
/// Compute the total gas refund for the dictionary.
///
/// Returns a u64 of the result.
#[allow(clippy::too_many_arguments)]
pub fn dict_squash<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
dict_ptr: Value<'c, 'a>, // ptr to the dict
range_check_ptr: Value<'c, 'a>, // ptr to range check
gas_ptr: Value<'c, 'a>, // ptr to gas
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function =
self.build_function(context, module, block, location, RuntimeBinding::DictSquash)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[dict_ptr, range_check_ptr, gas_ptr])
.add_results(&[IntegerType::new(context, 64).into()])
.build()?,
))
}
// Register if necessary, then invoke the `get_costs_builtin()` function.
#[allow(clippy::too_many_arguments)]
pub fn get_costs_builtin<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::GetCostsBuiltin,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_results(&[llvm::r#type::pointer(context, 0)])
.build()?,
))
}
/// Register if necessary, then invoke the `vtable_cheatcode()` runtime function.
///
/// Calls the cheatcode syscall with the given arguments.
///
/// The result is stored in `result_ptr`.
#[allow(clippy::too_many_arguments)]
#[cfg(feature = "with-cheatcode")]
pub fn vtable_cheatcode<'c, 'a>(
&mut self,
context: &'c Context,
module: &Module,
block: &'a Block<'c>,
location: Location<'c>,
result_ptr: Value<'c, 'a>,
selector_ptr: Value<'c, 'a>,
args: Value<'c, 'a>,
) -> Result<OperationRef<'c, 'a>>
where
'c: 'a,
{
let function = self.build_function(
context,
module,
block,
location,
RuntimeBinding::VtableCheatcode,
)?;
Ok(block.append_operation(
OperationBuilder::new("llvm.call", location)
.add_operands(&[function])
.add_operands(&[result_ptr, selector_ptr, args])
.build()?,
))
}
}
pub fn setup_runtime(find_symbol_ptr: impl Fn(&str) -> Option<*mut c_void>) {
for binding in [
RuntimeBinding::DebugPrint,
RuntimeBinding::Pedersen,
RuntimeBinding::HadesPermutation,
RuntimeBinding::EcStateTryFinalizeNz,
RuntimeBinding::EcStateAddMul,
RuntimeBinding::EcStateInit,
RuntimeBinding::EcStateAdd,
RuntimeBinding::EcPointTryNewNz,
RuntimeBinding::EcPointFromXNz,
RuntimeBinding::DictNew,
RuntimeBinding::DictGet,
RuntimeBinding::DictSquash,
RuntimeBinding::DictDrop,
RuntimeBinding::DictDup,
RuntimeBinding::GetCostsBuiltin,
RuntimeBinding::DebugPrint,
#[cfg(feature = "with-cheatcode")]
RuntimeBinding::VtableCheatcode,
] {
if let Some(global) = find_symbol_ptr(binding.symbol()) {
let global = global.cast::<*const ()>();
unsafe {
if let Some(function_ptr) = binding.function_ptr() {
*global = function_ptr;
};
}
}
}
}
/// Build the extended euclidean algorithm MLIR function.
///
/// The extended euclidean algorithm calculates the greatest common divisor
/// (gcd) of two integers `a` and `b`, as well as the Bézout coefficients `x`
/// and `y` such that `ax + by = gcd(a,b)`. If `gcd(a,b) = 1`, then `x` is the
/// modular multiplicative inverse of `a` modulo `b`.
///
/// This function declares a MLIR function that given two 384 bit integers `a`
/// and `b`, returns a MLIR struct with `gcd(a,b)` and the Bézout coefficient
/// `x`. The declaration is done in the body of the module.
fn build_egcd_function<'ctx>(
module: &Module,
context: &'ctx Context,
location: Location<'ctx>,
func_symbol: &str,
) -> Result<()> {
let integer_width = 384;
let integer_type = IntegerType::new(context, integer_width).into();
// Pseudocode for calculating the EGCD of two integers `a` and `b`.
// https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#Pseudocode.
//
// ```
// (old_r, new_r) := (a, b)
// (old_s, new_s) := (1, 0)
//
// while new_r != 0 do
// quotient := old_r / new_r
// (old_r, new_r) := (new_r, old_r − quotient * new_r)
// (old_s, new_s) := (new_s, old_s − quotient * new_s)
//
// old_s is equal to Bézout coefficient X
// old_r is equal to GCD
// ```
//
// Note that when `b > a`, the first iteration inverts the values. Our
// implementation does it manually as we already know that `b > a`.
//
// The core idea of the method is that `gcd(a,b) = gcd(a,b-a)`, and that
// `gcd(a,b) = gcd(b,a)`. As an optimization, we can actually substract `a`
// from `b` as many times as possible, so `gcd(a,b) = gcd(b%a,a)`.
//
// Take, for example, `a=21` and `b=54`:
//
// gcd(21, 54)
// = gcd(12, 21)
// = gcd(9, 12)
// = gcd(3, 9)
// = gcd(0, 3)
// = 3
//
// Thus, the algorithm works by calculating a series of remainders `r` which
// starts with b,a,... being `r[i]` the remainder of dividing `r[i-2]` by
// `r[i-1]`. At each step, `r[i]` can be calculated as:
//
// r[i] = r[i-2] - r[i-1] * quotient
//
// The GCD will be the last non-zero remainder.
//
// [54; 21; 12; 9; 3; 0]
// ^
//
// See Dr. Katherine Stange's Youtube video for a better explanation on how
// this works: https://www.youtube.com/watch?v=Jwf6ncRmhPg.
//
// The extended algorithm also obtains the Bézout coefficients
// by calculating a series of coefficients `s`. See Dr. Katherine
// Stange's Youtube video for a better explanation on how this works:
// https://www.youtube.com/watch?v=IwRtISxAHY4.
// Define entry block for function. Receives arguments `a` and `b`.
let region = Region::new();
let entry_block = region.append_block(Block::new(&[
(integer_type, location), // a
(integer_type, location), // b
]));
// Define loop block for function. Each iteration last two values from each series.
let loop_block = region.append_block(Block::new(&[
(integer_type, location), // old_r
(integer_type, location), // new_r
(integer_type, location), // old_s
(integer_type, location), // new_s
]));
// Define end block for function.
let end_block = region.append_block(Block::new(&[
(integer_type, location), // old_r
(integer_type, location), // old_s
]));
// Jump to loop block from entry block, with initial values.
// - old_r = b
// - new_r = a
// - old_s = 0
// - new_s = 1
entry_block.append_operation(cf::br(
&loop_block,
&[
entry_block.arg(1)?,
entry_block.arg(0)?,
entry_block.const_int_from_type(context, location, 0, integer_type)?,
entry_block.const_int_from_type(context, location, 1, integer_type)?,
],
location,
));
// LOOP BLOCK
{
let old_r = loop_block.arg(0)?;
let new_r = loop_block.arg(1)?;
let old_s = loop_block.arg(2)?;
let new_s = loop_block.arg(3)?;
// First calculate quotient of old_r/new_r.
let quotient = loop_block.append_op_result(arith::divui(old_r, new_r, location))?;
// Multiply quotient by new_r and new_s.
let quotient_by_new_r = loop_block.muli(quotient, new_r, location)?;
let quotient_by_new_s = loop_block.muli(quotient, new_s, location)?;
// Calculate new values for next iteration.
// - next_new_r := old_r − quotient * new_r
// - next_new_s := old_s − quotient * new_s
let next_new_r =
loop_block.append_op_result(arith::subi(old_r, quotient_by_new_r, location))?;
let next_new_s =
loop_block.append_op_result(arith::subi(old_s, quotient_by_new_s, location))?;
// Jump to end block if next_new_r is zero.
let zero = loop_block.const_int_from_type(context, location, 0, integer_type)?;
let next_new_r_is_zero =
loop_block.cmpi(context, CmpiPredicate::Eq, next_new_r, zero, location)?;
loop_block.append_operation(cf::cond_br(
context,
next_new_r_is_zero,
&end_block,
&loop_block,
&[new_r, new_s],
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | true |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/gas.rs | src/metadata/gas.rs | //! This file contains the gas calculation metadata.
//!
//! Each statement has an associated `GasCost`, which represents the cost of
//! executing that statement, in terms of tokens.
//!
//! To calculate the actual cost, the amount of tokens is multiplied by the cost
//! of the given token type. The cost of each token type is specified on runtime,
//! with the `BuiltinCosts` structure.
//!
//! When implementing libfuncs, the `GasCost` metadata entry already contains
//! the `GasCost` for the current sierra statement
use cairo_lang_runner::token_gas_cost;
use cairo_lang_sierra::{
extensions::gas::CostTokenType,
ids::FunctionId,
program::{Program, StatementIdx},
};
use cairo_lang_sierra_ap_change::{ap_change_info::ApChangeInfo, ApChangeError};
use cairo_lang_sierra_gas::{gas_info::GasInfo, CostError};
use cairo_lang_sierra_to_casm::metadata::{
calc_metadata, calc_metadata_ap_change_only, Metadata as CairoGasMetadata,
MetadataComputationConfig, MetadataError as CairoGasMetadataError,
};
use cairo_lang_sierra_type_size::ProgramRegistryInfo;
use crate::{
error::{Error, Result as NativeResult},
native_panic,
};
use std::{collections::BTreeMap, fmt, ops::Deref};
/// Holds global gas info.
#[derive(Default)]
pub struct GasMetadata(pub CairoGasMetadata);
/// The gas cost associated to a determined sierra statement.
///
/// It contains the amount of tokens for each token type,
/// that a given sierra statement costs.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct GasCost(pub Vec<(u64, CostTokenType)>);
/// Error for metadata calculations.
#[derive(Debug, thiserror::Error, Eq, PartialEq)]
pub enum GasMetadataError {
#[error(transparent)]
ApChangeError(#[from] ApChangeError),
#[error(transparent)]
CostError(#[from] CostError),
#[error("Not enough gas to run the operation. Required: {:?}, Available: {:?}.", gas.0, gas.1)]
NotEnoughGas { gas: Box<(u64, u64)> },
}
impl GasMetadata {
pub fn new(
sierra_program: &Program,
sierra_program_info: &ProgramRegistryInfo,
config: Option<MetadataComputationConfig>,
) -> Result<GasMetadata, GasMetadataError> {
let cairo_gas_metadata = if let Some(metadata_config) = config {
calc_metadata(sierra_program, sierra_program_info, metadata_config)?
} else {
calc_metadata_ap_change_only(sierra_program, sierra_program_info)?
};
Ok(GasMetadata::from(cairo_gas_metadata))
}
/// Returns the initial value for the gas counter.
/// If `available_gas` is None returns 0.
pub fn get_initial_available_gas(
&self,
func: &FunctionId,
available_gas: Option<u64>,
) -> Result<u64, Error> {
let Some(available_gas) = available_gas else {
return Ok(0);
};
// In case we don't have any costs - it means no gas equations were solved (and we are in
// the case of no gas checking enabled) - so the gas builtin is irrelevant, and we
// can return any value.
let Some(required_gas) = self.initial_required_gas(func)? else {
return Ok(0);
};
available_gas
.checked_sub(required_gas)
.ok_or(Error::GasMetadataError(GasMetadataError::NotEnoughGas {
gas: Box::new((required_gas, available_gas)),
}))
}
pub fn initial_required_gas(&self, func: &FunctionId) -> Result<Option<u64>, Error> {
if self.gas_info.function_costs.is_empty() {
return Ok(None);
}
Ok(Some(
self.gas_info.function_costs[func]
.iter()
.map(|(token_type, val)| {
let Ok(val) = TryInto::<usize>::try_into(*val) else {
native_panic!("could not cast gas cost from i64 to usize");
};
Ok(val * token_gas_cost(*token_type))
})
.collect::<Result<Vec<_>, _>>()?
.iter()
.sum::<usize>() as u64,
))
}
pub fn initial_required_gas_for_entry_points(
&self,
) -> NativeResult<BTreeMap<u64, BTreeMap<u64, u64>>> {
self.gas_info
.function_costs
.iter()
.map(|func| {
Ok((func.0.id, {
let mut costs = BTreeMap::new();
for (token, val) in func.1.iter() {
let offset: u64 = match token {
CostTokenType::Const => 0,
CostTokenType::Pedersen => 1,
CostTokenType::Bitwise => 2,
CostTokenType::EcOp => 3,
CostTokenType::Poseidon => 4,
CostTokenType::AddMod => 5,
CostTokenType::MulMod => 6,
_ => native_panic!("matched an unexpected CostTokenType"),
};
costs.insert(offset, *val as u64);
}
costs
}))
})
.collect()
}
pub fn get_gas_costs_for_statement(&self, idx: StatementIdx) -> Vec<(u64, CostTokenType)> {
let mut costs = Vec::new();
for cost_type in CostTokenType::iter_casm_tokens() {
if let Some(cost_count) =
self.get_gas_cost_for_statement_and_cost_token_type(idx, *cost_type)
{
if cost_count > 0 {
costs.push((cost_count, *cost_type));
}
}
}
costs
}
pub fn get_gas_cost_for_statement_and_cost_token_type(
&self,
idx: StatementIdx,
cost_type: CostTokenType,
) -> Option<u64> {
self.gas_info
.variable_values
.get(&(idx, cost_type))
.copied()
.map(|x| x.try_into().expect("gas cost couldn't be converted to u64"))
}
}
impl From<CairoGasMetadata> for GasMetadata {
fn from(value: CairoGasMetadata) -> Self {
Self(value)
}
}
impl Deref for GasMetadata {
type Target = CairoGasMetadata;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl fmt::Debug for GasMetadata {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("GasMetadata")
.field("ap_change_info", &self.ap_change_info)
.field("gas_info", &self.gas_info)
.finish()
}
}
impl Clone for GasMetadata {
fn clone(&self) -> Self {
Self(CairoGasMetadata {
ap_change_info: ApChangeInfo {
variable_values: self.ap_change_info.variable_values.clone(),
function_ap_change: self.ap_change_info.function_ap_change.clone(),
},
gas_info: GasInfo {
variable_values: self.gas_info.variable_values.clone(),
function_costs: self.gas_info.function_costs.clone(),
},
})
}
}
impl From<CairoGasMetadataError> for GasMetadataError {
fn from(value: CairoGasMetadataError) -> Self {
match value {
CairoGasMetadataError::ApChangeError(x) => GasMetadataError::ApChangeError(x),
CairoGasMetadataError::CostError(x) => GasMetadataError::CostError(x),
}
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/tail_recursion.rs | src/metadata/tail_recursion.rs | //! # Tail recursion information
//!
//! Whenever the compiler detects a direct tail-recursive function call it'll insert this metadata.
//! If the libfunc handler decides to use it by setting a return target, the compiler will insert
//! the required instructions to make it really tail-recursive.
//!
//! Directly recursive functions are detected by checking if the current statement is a function
//! call. Indirect recursion is not handled and generates normal recursive code.
//!
//! Next, the compiler check whether those direct recursive calls are in fact tail recursive.
//! Recursive function calls are tail recursive if nothing declared before the function call is used
//! after it. Due to the way Sierra works, this is as simple as checking whether the state is empty
//! after taking the variables sent to the function call from itself.
//!
//! When tail recursion is detected, a counter is added which counts the current recursion depth.
//! The counter being zero means that no recursion has taken place (either because the program
//! hasn't reached the recursion point yet, or because it has completely unwinded already).
//!
//! Every time the recursive function call is invoked, the libfunc builder should increment this
//! counter by one and jump to the recursion target block provided by the meta. This recursion
//! target is provided by the compiler and should point to the beginning of the function and have
//! the same arguments. The function call libfunc builder should then set the return target to a
//! block which jumps to the next libfunc (a standard libfunc builder terminator as in every other
//! libfunc).
//!
//! When the compiler generates the code for a return statement of a tail recursive function, it'll
//! first check whether the depth counter is zero or not. If zero, a normal return statement will be
//! generated since it'd mean our parent frame is a different function. However if the counter is
//! not zero, the counter is decremented by one and a jump to the return target is generated, which
//! will return control to the function call libfunc builder. The builder should then jump to the
//! next libfunc statement as if it were a normal function call.
//!
//! After calling the libfunc builder, the metadata is removed from storage to avoid collisions
//! later on.
//!
//! The same algorithm can be applied multiple times if there are multiple tail-recursive calls
//! within a function. The compiler should create a different depth counter for each recursive call
//! in the function.
use melior::ir::{Block, BlockRef, Value, ValueLike};
use mlir_sys::{MlirBlock, MlirValue};
/// The tail recursion metadata.
///
/// Check out [the module](self) for more information about how tail recursion works.
// TODO: Find a way to pass stuff with lifetimes while keeping the compiler happy.
#[derive(Debug)]
pub struct TailRecursionMeta {
depth_counter: MlirValue,
recursion_target: MlirBlock,
return_target: Option<MlirBlock>,
}
impl TailRecursionMeta {
/// Create the tail recursion meta.
pub fn new(depth_counter: Value, recursion_target: &Block) -> Self {
Self {
depth_counter: depth_counter.to_raw(),
recursion_target: recursion_target.to_raw(),
return_target: None,
}
}
/// Return the current depth counter value.
pub fn depth_counter<'ctx, 'this>(&self) -> Value<'ctx, 'this> {
unsafe { Value::from_raw(self.depth_counter) }
}
/// Return the recursion target block.
pub fn recursion_target<'ctx, 'this>(&self) -> BlockRef<'ctx, 'this> {
unsafe { BlockRef::from_raw(self.recursion_target) }
}
/// Return the return target block, if set.
pub fn return_target<'ctx, 'this>(&self) -> Option<BlockRef<'ctx, 'this>> {
self.return_target
.map(|ptr| unsafe { BlockRef::from_raw(ptr) })
}
/// Set the return target block.
pub fn set_return_target(&mut self, block: &Block) {
self.return_target = Some(block.to_raw());
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/felt252_dict.rs | src/metadata/felt252_dict.rs | use super::{drop_overrides::DropOverridesMeta, MetadataStorage};
use crate::{
error::{Error, Result},
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm,
helpers::{BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
Attribute, Block, BlockLike, Identifier, Location, Module, Region,
},
Context,
};
use std::collections::{hash_map::Entry, HashMap};
#[derive(Clone, Debug, Default)]
pub struct Felt252DictOverrides {
drop_overrides: HashMap<ConcreteTypeId, String>,
}
impl Felt252DictOverrides {
pub fn get_drop_fn(&self, type_id: &ConcreteTypeId) -> Option<&str> {
self.drop_overrides.get(type_id).map(String::as_str)
}
pub fn build_drop_fn<'ctx>(
&mut self,
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
type_id: &ConcreteTypeId,
) -> Result<Option<FlatSymbolRefAttribute<'ctx>>> {
let location = Location::unknown(context);
let inner_ty = registry.build_type(context, module, metadata, type_id)?;
Ok(if DropOverridesMeta::is_overriden(metadata, type_id) {
let drop_fn_symbol = format!("drop${}$item", type_id.id);
let flat_symbol_ref = FlatSymbolRefAttribute::new(context, &drop_fn_symbol);
if let Entry::Vacant(entry) = self.drop_overrides.entry(type_id.clone()) {
let drop_fn_symbol = entry.insert(drop_fn_symbol);
let region = Region::new();
let entry = region
.append_block(Block::new(&[(llvm::r#type::pointer(context, 0), location)]));
let value = entry.load(context, location, entry.arg(0)?, inner_ty)?;
DropOverridesMeta::invoke_override(
context, registry, module, &entry, &entry, location, metadata, type_id, value,
)?;
entry.append_operation(llvm::r#return(None, location));
module.body().append_operation(llvm::func(
context,
StringAttribute::new(context, drop_fn_symbol),
TypeAttribute::new(llvm::r#type::function(
llvm::r#type::void(context),
&[llvm::r#type::pointer(context, 0)],
false,
)),
region,
&[
(
Identifier::new(context, "sym_visibility"),
StringAttribute::new(context, "public").into(),
),
(
Identifier::new(context, "llvm.linkage"),
Attribute::parse(context, "#llvm.linkage<private>")
.ok_or(Error::ParseAttributeError)?,
),
],
location,
));
}
Some(flat_symbol_ref)
} else {
None
})
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/metadata/dup_overrides.rs | src/metadata/dup_overrides.rs | //! # Duplication logic overrides
//!
//! By default, values are copied (aka. `memcpy`'d), but some cases (like arrays, boxes, nullables,
//! dictionaries and some structs and enums) need a clone implementation instad. This metadata is
//! a register of types that require a clone implementation as well as the logic to register and
//! invoke those implementations.
//!
//! ## Clone implementations
//!
//! The clone logic is implemented as a function for each type that requires it. It has to be a
//! function to allow self-referencing types. If we inlined the clone implementations,
//! self-referencing types would generate infinite code thus overflowing the stack when generating
//! code.
//!
//! The generated functions are not public (they are internal) and follow this naming convention:
//!
//! ```text
//! dup${type id}
//! ```
//!
//! where `{type id}` is the numeric value of the `ConcreteTypeId`.
use super::MetadataStorage;
use crate::{
error::{Error, Result},
types::TypeBuilder,
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use melior::{
dialect::{cf, func, llvm},
helpers::{BuiltinBlockExt, LlvmBlockExt},
ir::{
attribute::{FlatSymbolRefAttribute, StringAttribute, TypeAttribute},
r#type::FunctionType,
Attribute, Block, BlockLike, Identifier, Location, Module, Region, Value,
},
Context,
};
use std::collections::HashSet;
#[derive(Debug, Default)]
pub struct DupOverridesMeta {
overriden_types: HashSet<ConcreteTypeId>,
}
impl DupOverridesMeta {
/// Register a dup override using a closure.
///
/// This function does several things:
/// - Registers `DupOverrideMeta` if it wasn't already present.
/// - If the type id was already registered it returns and does nothing.
/// - Registers the type (without it being actually registered yet).
/// - Calls the closure, which returns an `Option<Region>`.
/// - If the closure returns a region, generates the function implementation.
/// - If the closure returns `None`, it removes the registry entry for the type.
///
/// The type need to be registered before calling the closure, otherwise self-referencing types
/// would cause stack overflow when registering themselves.
///
/// The callback serves two purposes:
/// - To generate the dup implementation, if necessary.
/// - To check if we need to generate the implementation (for example, in structs and enums).
pub(crate) fn register_with<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
f: impl FnOnce(&mut MetadataStorage) -> Result<Option<Region<'ctx>>>,
) -> Result<()> {
{
let dup_override_meta = metadata.get_or_insert_with(Self::default);
if dup_override_meta.overriden_types.contains(id) {
return Ok(());
}
dup_override_meta.overriden_types.insert(id.clone());
}
match f(metadata)? {
Some(region) => {
let location = Location::unknown(context);
let ty = registry.build_type(context, module, metadata, id)?;
let ptr_ty = llvm::r#type::pointer(context, 0);
let sierra_ty = registry.get_type(id)?;
let is_memory_allocated = sierra_ty.is_memory_allocated(registry)?;
let signature_ty = if is_memory_allocated { ptr_ty } else { ty };
// For memory allocated types, the generated function receives
// a pointer as argument. However, the user provided callback
// generates a region that receives a concrete value as
// argument. To workaround this, we insert a block at the start
// of the region that dereferences the pointer, and jumps to the
// user provided implementation.
if is_memory_allocated {
let entry_block = region.first_block().unwrap();
let pre_entry_block =
region.insert_block_before(entry_block, Block::new(&[(ptr_ty, location)]));
pre_entry_block.append_operation(cf::br(
&entry_block,
&[pre_entry_block.load(context, location, pre_entry_block.arg(0)?, ty)?],
location,
));
}
module.body().append_operation(func::func(
context,
StringAttribute::new(context, &format!("dup${}", id.id)),
TypeAttribute::new(
FunctionType::new(context, &[signature_ty], &[ty, ty]).into(),
),
region,
&[
(
Identifier::new(context, "sym_visibility"),
StringAttribute::new(context, "public").into(),
),
(
Identifier::new(context, "llvm.CConv"),
Attribute::parse(context, "#llvm.cconv<fastcc>")
.ok_or(Error::ParseAttributeError)?,
),
(
Identifier::new(context, "llvm.linkage"),
Attribute::parse(context, "#llvm.linkage<private>")
.ok_or(Error::ParseAttributeError)?,
),
],
Location::unknown(context),
));
}
None => {
// The following getter should always return a value, but the if statement is kept
// just in case the meta has been removed (which it shouldn't).
if let Some(dup_override_meta) = metadata.get_mut::<Self>() {
dup_override_meta.overriden_types.remove(id);
}
}
}
Ok(())
}
/// Returns whether a type has a registered dup implementation.
pub(crate) fn is_overriden(metadata: &mut MetadataStorage, id: &ConcreteTypeId) -> bool {
metadata
.get_or_insert_with(Self::default)
.overriden_types
.contains(id)
}
/// Generates code to invoke a dup implementation for a type, or just returns the same value
/// twice if no implementation was registered.
#[allow(clippy::too_many_arguments)]
pub(crate) fn invoke_override<'ctx, 'this>(
context: &'ctx Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
module: &Module<'ctx>,
init_block: &'this Block<'ctx>,
block: &'this Block<'ctx>,
location: Location<'ctx>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
value: Value<'ctx, 'this>,
) -> Result<(Value<'ctx, 'this>, Value<'ctx, 'this>)> {
Ok(if Self::is_overriden(metadata, id) {
let ty = registry.build_type(context, module, metadata, id)?;
let sierra_ty = registry.get_type(id)?;
let is_memory_allocated = sierra_ty.is_memory_allocated(registry)?;
// From memory allocated types, the dup function receives a pointer
// as argument, so we need to alloc the given value onto the stack
// and pass a pointer to it instead.
let value = if is_memory_allocated {
// The init_block is guaranteed to not be executed multiple
// times on tail-recursive functions.
let value_ptr = init_block.alloca1(
context,
location,
ty,
sierra_ty.layout(registry)?.align(),
)?;
block.store(context, location, value_ptr, value)?;
value_ptr
} else {
value
};
let result = block.append_operation(func::call(
context,
FlatSymbolRefAttribute::new(context, &format!("dup${}", id.id)),
&[value],
&[ty, ty],
location,
));
let original = result.result(0)?.into();
let copy = result.result(1)?.into();
(original, copy)
} else {
(value, value)
})
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/cache/aot.rs | src/cache/aot.rs | use crate::error::{Error, Result};
use crate::{
context::NativeContext, executor::AotNativeExecutor, module::NativeModule,
utils::SHARED_LIBRARY_EXT, OptLevel,
};
use cairo_lang_sierra::program::Program;
use libloading::Library;
use std::{
collections::HashMap,
fmt::{self, Debug},
hash::Hash,
sync::Arc,
};
pub struct AotProgramCache<'a, K>
where
K: PartialEq + Eq + Hash,
{
context: &'a NativeContext,
cache: HashMap<K, Arc<AotNativeExecutor>>,
}
impl<'a, K> AotProgramCache<'a, K>
where
K: PartialEq + Eq + Hash,
{
pub fn new(context: &'a NativeContext) -> Self {
Self {
context,
cache: Default::default(),
}
}
pub fn get(&self, key: &K) -> Option<Arc<AotNativeExecutor>> {
self.cache.get(key).cloned()
}
pub fn compile_and_insert(
&mut self,
key: K,
program: &Program,
opt_level: OptLevel,
) -> Result<Arc<AotNativeExecutor>> {
let NativeModule {
module,
registry,
mut metadata,
} = self
.context
.compile(program, false, Some(Default::default()), None)?;
// Compile module into an object.
let object_data = crate::ffi::module_to_object(&module, opt_level, None)?;
// Compile object into a shared library.
let shared_library_path = tempfile::Builder::new()
.prefix("lib")
.suffix(SHARED_LIBRARY_EXT)
.tempfile()?
.into_temp_path();
crate::ffi::object_to_shared_lib(&object_data, &shared_library_path, None)?;
let shared_library = unsafe { Library::new(shared_library_path)? };
let executor = AotNativeExecutor::new(
shared_library,
registry,
metadata.remove().ok_or(Error::MissingMetadata)?,
metadata.remove().unwrap_or_default(),
);
let executor = Arc::new(executor);
self.cache.insert(key, executor.clone());
Ok(executor)
}
}
impl<K> Debug for AotProgramCache<'_, K>
where
K: PartialEq + Eq + Hash,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("AotProgramCache")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{load_cairo, values::Value};
use starknet_types_core::felt::Felt;
#[test]
fn test_aot_compile_and_insert() {
let native_context = NativeContext::new();
let mut cache = AotProgramCache::new(&native_context);
let (_, program) = load_cairo! {
fn run_test() -> felt252 {
42
}
};
let function_id = &program.funcs.first().expect("should have a function").id;
let executor = cache.compile_and_insert((), &program, OptLevel::default());
let res = executor
.unwrap()
.invoke_dynamic(function_id, &[], Some(u64::MAX))
.expect("should run");
// After compiling and inserting the program, we should be able to run it.
assert_eq!(res.return_value, Value::Felt252(Felt::from(42)));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/cache/jit.rs | src/cache/jit.rs | use crate::error::Result;
use crate::{context::NativeContext, executor::JitNativeExecutor, OptLevel};
use cairo_lang_sierra::program::Program;
use std::{
collections::HashMap,
fmt::{self, Debug},
hash::Hash,
sync::Arc,
};
/// A Cache for programs with the same context.
pub struct JitProgramCache<'a, K>
where
K: Eq + Hash + PartialEq,
{
context: &'a NativeContext,
// Since we already hold a reference to the Context, it doesn't make sense to use thread-safe
// reference counting. Using a Arc<RwLock<T>> here is useless because NativeExecutor is neither
// Send nor Sync.
cache: HashMap<K, Arc<JitNativeExecutor<'a>>>,
}
impl<'a, K> JitProgramCache<'a, K>
where
K: Eq + Hash + PartialEq,
{
pub fn new(context: &'a NativeContext) -> Self {
Self {
context,
cache: Default::default(),
}
}
// Return the native context.
pub const fn context(&self) -> &'a NativeContext {
self.context
}
pub fn get(&self, key: &K) -> Option<Arc<JitNativeExecutor<'a>>> {
self.cache.get(key).cloned()
}
pub fn compile_and_insert(
&mut self,
key: K,
program: &Program,
opt_level: OptLevel,
) -> Result<Arc<JitNativeExecutor<'a>>> {
let module = self
.context
.compile(program, false, Some(Default::default()), None)?;
let executor = JitNativeExecutor::from_native_module(module, opt_level)?;
let executor = Arc::new(executor);
self.cache.insert(key, executor.clone());
Ok(executor)
}
}
impl<K> Debug for JitProgramCache<'_, K>
where
K: Eq + Hash + PartialEq,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("JitProgramCache")
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::load_cairo;
use std::time::Instant;
#[test]
fn test_cache() {
let (_, program1) = load_cairo!(
fn main(lhs: felt252, rhs: felt252) -> felt252 {
lhs + rhs
}
);
let (_, program2) = load_cairo!(
fn main(lhs: felt252, rhs: felt252) -> felt252 {
lhs - rhs
}
);
let context = NativeContext::new();
let mut cache: JitProgramCache<&'static str> = JitProgramCache::new(&context);
let start = Instant::now();
cache
.compile_and_insert("program1", &program1, Default::default())
.unwrap();
let diff_1 = Instant::now().duration_since(start);
let start = Instant::now();
cache.get(&"program1").expect("exists");
let diff_2 = Instant::now().duration_since(start);
assert!(diff_2 < diff_1);
let start = Instant::now();
cache
.compile_and_insert("program2", &program2, Default::default())
.unwrap();
let diff_1 = Instant::now().duration_since(start);
let start = Instant::now();
cache.get(&"program2").expect("exists");
let diff_2 = Instant::now().duration_since(start);
assert!(diff_2 < diff_1);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/mem_tracing.rs | src/utils/mem_tracing.rs | #![cfg(feature = "with-mem-tracing")]
use libc::{c_void, size_t};
use melior::ExecutionEngine;
use std::cell::UnsafeCell;
thread_local! {
static MEM_TRACING: UnsafeCell<MemTracing> = const { UnsafeCell::new(MemTracing::new()) };
}
struct MemTracing {
finished: Vec<AllocTrace>,
pending: Vec<AllocTrace>,
}
struct AllocTrace {
ptr: *mut c_void,
len: size_t,
}
impl MemTracing {
pub const fn new() -> Self {
Self {
finished: Vec::new(),
pending: Vec::new(),
}
}
pub fn push(&mut self, trace: AllocTrace) {
match self.pending.binary_search_by_key(&trace.ptr, |x| x.ptr) {
Ok(_) => unreachable!(),
Err(pos) => self.pending.insert(pos, trace),
}
}
pub fn update(&mut self, ptr: *mut c_void, trace: AllocTrace) {
if let Ok(pos) = self.pending.binary_search_by_key(&ptr, |x| x.ptr) {
let trace = self.pending.remove(pos);
if trace.len == 0 {
self.finished.push(trace);
return;
}
};
self.push(trace);
}
pub fn finish(&mut self, ptr: *mut c_void) {
if ptr.is_null() {
return;
}
match self.pending.binary_search_by_key(&ptr, |x| x.ptr) {
Ok(pos) => {
let trace = self.pending.remove(pos);
self.finished.push(trace);
}
Err(_) => unreachable!(),
}
}
}
impl AllocTrace {
pub fn new(ptr: *mut c_void, len: size_t) -> Self {
Self { ptr, len }
}
}
pub(crate) fn register_bindings(engine: &ExecutionEngine) {
unsafe {
engine.register_symbol(
"malloc",
_wrapped_malloc as *const fn(size_t) -> *mut c_void as *mut (),
);
engine.register_symbol(
"realloc",
_wrapped_realloc as *const fn(*mut c_void, size_t) -> *mut c_void as *mut (),
);
engine.register_symbol("free", _wrapped_free as *const fn(*mut c_void) as *mut ());
}
}
pub fn report_stats() {
unsafe {
MEM_TRACING.with(|x| {
println!();
println!("[MemTracing] Stats:");
println!(
"[MemTracing] Freed allocations: {}",
(*x.get()).finished.len()
);
println!("[MemTracing] Pending allocations:");
for AllocTrace { ptr, len } in &(*x.get()).pending {
println!("[MemTracing] - {ptr:?} ({len} bytes)");
}
assert!((*x.get()).pending.is_empty());
*x.get() = MemTracing::new();
});
}
}
pub(crate) unsafe extern "C" fn _wrapped_malloc(len: size_t) -> *mut c_void {
let ptr = libc::malloc(len);
println!("[MemTracing] Allocating ptr {ptr:?} with {len} bytes.");
MEM_TRACING.with(|x| (*x.get()).push(AllocTrace::new(ptr, len)));
ptr
}
pub(crate) unsafe extern "C" fn _wrapped_realloc(ptr: *mut c_void, len: size_t) -> *mut c_void {
let new_ptr = libc::realloc(ptr, len);
println!("[MemTracing] Reallocating {ptr:?} into {new_ptr:?} with {len} bytes.");
MEM_TRACING.with(|x| (*x.get()).update(ptr, AllocTrace::new(new_ptr, len)));
new_ptr
}
pub(crate) unsafe extern "C" fn _wrapped_free(ptr: *mut c_void) {
if !ptr.is_null() {
// This print is placed before the actual call to log pointers before double free
// situations.
println!("[MemTracing] Freeing {ptr:?}.");
libc::free(ptr);
MEM_TRACING.with(|x| (*x.get()).finish(ptr));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/safe_runner.rs | src/utils/safe_runner.rs | //! # Safe runner
//!
//! The safe runner provides a way to run contracts without any risk of crashing due to invalid
//! memory accesses, including stack overflows. The same mechanism can also be used to abort the
//! current Cairo program execution, although it will probably leak some memory.
//!
//! It works by setting a signal handler for the `SIGSEGV` signal, which is generated by the
//! operating system on invalid memory accesses. This signal handler is global, therefore it must be
//! set by the application, rather than the library, by invoking `setup_safe_runner`.
//!
//! Since returning from a `SIGSEGV` signal handler would crash the program, the safe runner will
//! perform a non-local goto using the `setjmp` and `longjmp` functions. The first one configures
//! the jump target while the second performs the goto.
//!
//! Note: The protection is not 100% guaranteed. Since all memory addresses are within the same
//! memory space, some combinations of base addresses plus offsets may end up in a different page
//! allocation.
use libc::{
c_int, sigaction, sigaltstack, siginfo_t, sigset_t, stack_t, ucontext_t, SA_ONSTACK,
SA_SIGINFO, SIGSEGV, SIGSTKSZ,
};
use std::{
cell::UnsafeCell,
mem::MaybeUninit,
ptr::{self, null_mut},
};
use thiserror::Error;
extern "C" {
fn setjmp(env: *mut ()) -> c_int;
fn longjmp(env: *mut (), val: c_int) -> !;
}
thread_local! {
static STATE: UnsafeCell<SafeRunnerState> = const { UnsafeCell::new(SafeRunnerState::Inactive) };
static STACK: UnsafeCell<SignalStack> = const { UnsafeCell::new(SignalStack(MaybeUninit::uninit())) };
}
type JmpBuf = MaybeUninit<[u8; 1024]>;
#[repr(align(16))]
#[allow(dead_code)]
struct SignalStack(MaybeUninit<[u8; SIGSTKSZ]>);
enum SafeRunnerState {
Inactive,
Active(Box<JmpBuf>),
}
#[derive(Debug, Error)]
pub enum SafeRunnerError {
#[error("program execution aborted")]
Aborted,
#[error("program execution segfaulted")]
Segfault,
}
/// Configure the current **process** for the [`SafeRunner`].
///
/// Note: It will override the previous signal handler for SIGSEGV.
pub fn setup_safe_runner() {
unsafe {
assert_eq!(
sigaction(
SIGSEGV,
&sigaction {
sa_sigaction: segfault_handler
as *const extern "C" fn(c_int, &siginfo_t, &mut ucontext_t)
as usize,
sa_mask: MaybeUninit::<sigset_t>::zeroed().assume_init(),
sa_flags: SA_ONSTACK | SA_SIGINFO,
#[cfg(target_os = "linux")]
sa_restorer: None,
},
null_mut(),
),
0,
);
assert_eq!(
sigaltstack(
&stack_t {
ss_sp: STACK.with(|x| x.get()).cast(),
ss_flags: 0,
ss_size: SIGSTKSZ,
},
null_mut(),
),
0,
);
}
}
/// Manually trigger the segfault handler, thus aborting the current program.
pub fn abort_safe_runner() -> ! {
unsafe {
match STATE.with(|x| &mut *x.get()) {
SafeRunnerState::Inactive => {
panic!("manual abort triggered from outside a safe runner");
}
SafeRunnerState::Active(jmp_buf) => longjmp(jmp_buf.as_mut_ptr().cast(), 2),
}
}
}
/// Run a closure within a safe runner.
pub fn run_safely<T>(f: impl FnOnce() -> T) -> Result<T, SafeRunnerError> {
let (jmp_buf, prev_state) = STATE.with(|x| unsafe {
let jmp_buf;
let prev_state = ptr::replace(
x.get(),
SafeRunnerState::Active({
let mut tmp = Box::new(JmpBuf::uninit());
jmp_buf = tmp.as_mut_ptr();
tmp
}),
);
(jmp_buf, prev_state)
});
let jmp_ret = unsafe { setjmp(jmp_buf.cast()) };
let result = match jmp_ret {
0 => Ok(f()),
1 => Err(SafeRunnerError::Segfault),
2 => Err(SafeRunnerError::Aborted),
_ => unreachable!(),
};
STATE.with(|x| unsafe { ptr::write(x.get(), prev_state) });
result
}
unsafe extern "C" fn segfault_handler(_sig: c_int, _info: &siginfo_t, _context: &mut ucontext_t) {
match STATE.with(|x| &mut *x.get()) {
SafeRunnerState::Inactive => libc::abort(),
SafeRunnerState::Active(jmp_buf) => longjmp(jmp_buf.as_mut_ptr().cast(), 1),
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/program_registry_ext.rs | src/utils/program_registry_ext.rs | use crate::{error::Result, metadata::MetadataStorage, types::TypeBuilder};
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
ids::ConcreteTypeId,
program_registry::ProgramRegistry,
};
use melior::{
ir::{Module, Type},
Context,
};
use std::alloc::Layout;
pub trait ProgramRegistryExt {
fn build_type<'ctx>(
&self,
context: &'ctx Context,
module: &Module<'ctx>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
) -> Result<Type<'ctx>>;
fn build_type_with_layout<'ctx>(
&self,
context: &'ctx Context,
module: &Module<'ctx>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
) -> Result<(Type<'ctx>, Layout)>;
}
impl ProgramRegistryExt for ProgramRegistry<CoreType, CoreLibfunc> {
fn build_type<'ctx>(
&self,
context: &'ctx Context,
module: &Module<'ctx>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
) -> Result<Type<'ctx>> {
self.get_type(id)?
.build(context, module, self, metadata, id)
}
fn build_type_with_layout<'ctx>(
&self,
context: &'ctx Context,
module: &Module<'ctx>,
metadata: &mut MetadataStorage,
id: &ConcreteTypeId,
) -> Result<(Type<'ctx>, Layout)> {
let concrete_type = self.get_type(id)?;
Ok((
concrete_type.build(context, module, self, metadata, id)?,
concrete_type.layout(self)?,
))
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/trace_dump.rs | src/utils/trace_dump.rs | #![cfg(feature = "with-trace-dump")]
//! The trace dump feature is used to generate the execution trace of a sierra program.
//!
//! Take, for example, the following sierra code:
//!
//! ```sierra
//! const_as_immediate<Const<felt252, 10>>() -> ([0]);
//! const_as_immediate<Const<felt252, 20>>() -> ([1]);
//! store_temp<felt252>([0]) -> ([0]);
//! felt252_add([0], [1]) -> ([2]);
//! store_temp<felt252>([2]) -> ([2]);
//! return([2]);
//! ```
//!
//! The compiler will call `build_state_snapshot` right before each statement.
//! Iterating every variable on the current scope and saving its value on a global
//! static variable.
//!
//! At the end of the execution, the full trace dump can be retrieved, which
//! looks something like this:
//!
//! ```json
//! {
//! "states": [
//! {
//! "statementIdx": 0,
//! "preStateDump": {}
//! },
//! {
//! "statementIdx": 1,
//! "preStateDump": {
//! "0": { "Felt": "0xa" }
//! }
//! },
//! {
//! "statementIdx": 2,
//! "preStateDump": {
//! "0": { "Felt": "0xa" },
//! "1": { "Felt": "0x14" }
//! }
//! },
//! ...
//! ]
//! }
//! ```
//!
//! To support this feature even on the context of starknet contracts, then we
//! must support building a trace dump for multiple programs at the same time, as
//! starknet contracts can themselves call another contracts. To achieve this, we
//! need two important elements.
//!
//! 1. The global static variable must me able to store multiple trace dumps at the
//! same time. We have a global static hashmap from trace id to trace dump content.
//! See `TRACE_DUMP`.
//!
//! 2. We must store somewhere the ID of the current trace dump, and update it
//! acordingly when switching between contract executors
//!
//! Both these elements must be properly setup before running the executor. See
//! `cairo-native-run` for an example on how to do it. You can also check on
//! this file's integration tests.
//!
//! When executing starknet contracts, the trace id must be set right
//! before each execution, restoring the previous trace id after the execution.
use std::collections::HashMap;
use cairo_lang_sierra::{
extensions::core::{CoreLibfunc, CoreType},
ids::{ConcreteTypeId, VarId},
program::StatementIdx,
program_registry::ProgramRegistry,
};
use cairo_lang_utils::ordered_hash_map::OrderedHashMap;
use melior::{
helpers::LlvmBlockExt,
ir::{BlockRef, Location, Module, Value, ValueLike},
Context,
};
use crate::{
metadata::{trace_dump::TraceDumpMeta, MetadataStorage},
types::TypeBuilder,
};
#[allow(clippy::too_many_arguments)]
pub fn build_state_snapshot(
context: &Context,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
module: &Module,
block: &BlockRef,
location: Location,
metadata: &mut MetadataStorage,
statement_idx: StatementIdx,
state: &OrderedHashMap<VarId, Value>,
var_types: &HashMap<VarId, ConcreteTypeId>,
) {
let trace_dump = metadata.get_or_insert_with(TraceDumpMeta::default);
for (var_id, value) in state.iter() {
let value_type_id = var_types.get(var_id).unwrap();
let value_type = registry.get_type(value_type_id).unwrap();
let layout = value_type.layout(registry).unwrap();
let value_ptr = block
.alloca1(context, location, value.r#type(), layout.align())
.unwrap();
block.store(context, location, value_ptr, *value).unwrap();
trace_dump
.build_state(
context,
module,
block,
var_id,
value_type_id,
value_ptr,
location,
)
.unwrap();
}
trace_dump
.build_push(context, module, block, statement_idx, location)
.unwrap();
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use cairo_lang_sierra::{program::Program, program_registry::ProgramRegistry};
use pretty_assertions_sorted::assert_eq_sorted;
use rstest::{fixture, rstest};
use sierra_emu::{starknet::StubSyscallHandler, VirtualMachine};
use crate::{
context::NativeContext,
executor::AotNativeExecutor,
load_cairo,
metadata::trace_dump::{
trace_dump_runtime::{TraceDump, TRACE_DUMP},
TraceBinding,
},
OptLevel,
};
#[fixture]
fn program() -> Program {
let (_, program) = load_cairo! {
use core::felt252;
fn main() -> felt252 {
let n = 10;
let result = fib(1, 1, n);
result
}
fn fib(a: felt252, b: felt252, n: felt252) -> felt252 {
match n {
0 => a,
_ => fib(b, a + b, n - 1),
}
}
};
program
}
#[rstest]
fn test_program(program: Program) {
let entrypoint_function = &program
.funcs
.iter()
.find(|x| {
x.id.debug_name
.as_ref()
.map(|x| x.contains("main"))
.unwrap_or_default()
})
.unwrap()
.clone();
let native_context = NativeContext::new();
let module = native_context
.compile(&program, false, Some(Default::default()), None)
.expect("failed to compile context");
let executor = AotNativeExecutor::from_native_module(module, OptLevel::default()).unwrap();
if let Some(trace_id) = executor.find_symbol_ptr(TraceBinding::TraceId.symbol()) {
let trace_id = trace_id.cast::<u64>();
unsafe { *trace_id = 0 };
}
TRACE_DUMP
.lock()
.unwrap()
.insert(0, TraceDump::new(ProgramRegistry::new(&program).unwrap()));
executor
.invoke_dynamic(&entrypoint_function.id, &[], Some(u64::MAX))
.unwrap();
let native_trace = TRACE_DUMP
.lock()
.unwrap()
.values()
.next()
.unwrap()
.trace
.clone();
let mut vm = VirtualMachine::new(Arc::new(program));
let initial_gas = u64::MAX;
let args = [];
vm.call_program(entrypoint_function, initial_gas, args.into_iter());
let syscall_handler = &mut StubSyscallHandler::default();
let emu_trace = vm.run_with_trace(syscall_handler);
assert_eq_sorted!(emu_trace, native_trace);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/walk_ir.rs | src/utils/walk_ir.rs | use llvm_sys::{
core::{
LLVMGetFirstBasicBlock, LLVMGetFirstFunction, LLVMGetFirstInstruction,
LLVMGetNextBasicBlock, LLVMGetNextFunction, LLVMGetNextInstruction,
},
prelude::{LLVMModuleRef, LLVMValueRef},
LLVMBasicBlock, LLVMValue,
};
use melior::ir::{BlockLike, BlockRef, OperationRef};
/// Traverses the given operation tree in preorder.
///
/// Calls `f` on each operation encountered.
pub fn walk_mlir_operations(top_op: OperationRef, f: &mut impl FnMut(OperationRef)) {
f(top_op);
for region in top_op.regions() {
let mut next_block = region.first_block();
while let Some(block) = next_block {
if let Some(operation) = block.first_operation() {
walk_mlir_block_operations(operation, f);
}
next_block = block.next_in_region();
}
}
}
/// Traverses all following operations in the current block
///
/// Calls `f` on each operation encountered.
///
/// NOTE: The lifetime of each operation is bound to the previous operation,
/// so the only way I found to comply with the borrow checker was to make the
/// function recursive. This convinces the compiler that the full operation
/// chain is in scope. This has been fixed in the latest melior release, but
/// updating the dependency requires us to update to LLVM 20.
pub fn walk_mlir_block_operations(operation: OperationRef, f: &mut impl FnMut(OperationRef)) {
walk_mlir_operations(operation, f);
if let Some(next_operation) = operation.next_in_block() {
walk_mlir_block_operations(next_operation, f);
}
}
/// Traverses from start block to end block (including) in preorder.
///
/// Calls `f` on each operation encountered.
pub fn walk_mlir_block(
start_block: BlockRef,
end_block: BlockRef,
f: &mut impl FnMut(OperationRef),
) {
let mut next_block = Some(start_block);
while let Some(block) = next_block {
if let Some(operation) = block.first_operation() {
walk_mlir_block_operations(operation, f);
}
if block == end_block {
return;
}
next_block = block.next_in_region();
}
}
/// Traverses the whole LLVM Module, calling `f` on each instruction.
///
/// As this function receives a closure rather than a function, there is no need
/// to receive initial data, and can instead modify the captured environment.
pub unsafe fn walk_llvm_instructions(llvm_module: LLVMModuleRef, mut f: impl FnMut(LLVMValueRef)) {
let new_value = |function_ptr: *mut LLVMValue| {
if function_ptr.is_null() {
None
} else {
Some(function_ptr)
}
};
let new_block = |function_ptr: *mut LLVMBasicBlock| {
if function_ptr.is_null() {
None
} else {
Some(function_ptr)
}
};
let mut current_function = new_value(LLVMGetFirstFunction(llvm_module));
while let Some(function) = current_function {
let mut current_block = new_block(LLVMGetFirstBasicBlock(function));
while let Some(block) = current_block {
let mut current_instruction = new_value(LLVMGetFirstInstruction(block));
while let Some(instruction) = current_instruction {
f(instruction);
current_instruction = new_value(LLVMGetNextInstruction(instruction));
}
current_block = new_block(LLVMGetNextBasicBlock(block));
}
current_function = new_value(LLVMGetNextFunction(function));
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/testing.rs | src/utils/testing.rs | #![cfg(any(test, feature = "testing"))]
use cairo_lang_compiler::CompilerConfig;
use cairo_lang_filesystem::{db::init_dev_corelib, ids::CrateInput};
use cairo_lang_lowering::utils::InliningStrategy;
use cairo_lang_sierra::{program::Program, ProgramParser};
use cairo_lang_starknet::{compile::compile_contract_in_prepared_db, starknet_plugin_suite};
use itertools::Itertools;
use starknet_types_core::felt::Felt;
use std::{fs, path::Path, sync::Arc};
use crate::{
context::NativeContext, execution_result::ExecutionResult, executor::JitNativeExecutor,
starknet_stub::StubSyscallHandler, utils::*, values::Value,
};
use cairo_lang_compiler::{
compile_prepared_db, db::RootDatabase, diagnostics::DiagnosticsReporter, project::setup_project,
};
use cairo_lang_starknet_classes::contract_class::ContractClass;
use std::env::var;
#[macro_export]
macro_rules! load_cairo {
( $( $program:tt )+ ) => {
$crate::utils::testing::load_cairo_str(stringify!($($program)+))
};
}
#[macro_export]
macro_rules! load_starknet {
( $( $program:tt )+ ) => {
$crate::utils::testing::load_starknet_str(stringify!($($program)+))
};
}
#[macro_export]
macro_rules! load_starknet_contract {
( $( $program:tt )+ ) => {
$crate::utils::testing::load_starknet_contract_str(stringify!($($program)+))
};
}
// Helper macros for faster testing.
#[macro_export]
macro_rules! jit_struct {
($($y:expr),* $(,)? ) => {
$crate::values::Value::Struct {
fields: vec![$($y), *],
debug_name: None
}
};
}
#[macro_export]
macro_rules! jit_enum {
( $tag:expr, $value:expr ) => {
$crate::values::Value::Enum {
tag: $tag,
value: Box::new($value),
debug_name: None,
}
};
}
#[macro_export]
macro_rules! jit_dict {
( $($key:expr $(=>)+ $value:expr),* $(,)? ) => {
$crate::values::Value::Felt252Dict {
value: {
let mut map = std::collections::HashMap::new();
$(map.insert($key.into(), $value.into());)*
map
},
debug_name: None,
}
};
}
#[macro_export]
macro_rules! jit_panic {
( $($value:expr)? ) => {
$crate::jit_enum!(1, $crate::jit_struct!(
$crate::jit_struct!(),
[$($value), *].into()
))
};
}
#[macro_export]
macro_rules! jit_panic_byte_array {
( $value:expr ) => {
$crate::jit_enum!(
1,
$crate::jit_struct!(
$crate::jit_struct!(),
$crate::utils::testing::panic_byte_array($value).into()
)
)
};
}
/// Compile a cairo program found at the given path to sierra.
pub fn cairo_to_sierra(program: &Path) -> crate::error::Result<Arc<Program>> {
if program
.extension()
.map(|x| {
x.to_ascii_lowercase()
.to_string_lossy()
.eq_ignore_ascii_case("cairo")
})
.unwrap_or(false)
{
cairo_lang_compiler::compile_cairo_project_at_path(
program,
CompilerConfig {
replace_ids: true,
..Default::default()
},
InliningStrategy::Default,
)
.map_err(|err| crate::error::Error::ProgramParser(err.to_string()))
} else {
let source = std::fs::read_to_string(program)?;
ProgramParser::new()
.parse(&source)
.map_err(|err| crate::error::Error::ProgramParser(err.to_string()))
}
.map(Arc::new)
}
pub fn load_cairo_str(program_str: &str) -> (String, Program) {
compile_program(program_str, RootDatabase::default())
}
pub fn load_starknet_str(program_str: &str) -> (String, Program) {
compile_program(
program_str,
RootDatabase::builder()
.with_default_plugin_suite(starknet_plugin_suite())
.build()
.unwrap(),
)
}
pub fn load_starknet_contract_str(program_str: &str) -> (String, ContractClass) {
compile_contract(
program_str,
RootDatabase::builder()
.with_default_plugin_suite(starknet_plugin_suite())
.build()
.unwrap(),
)
}
pub(crate) fn compile_contract(program_str: &str, mut db: RootDatabase) -> (String, ContractClass) {
let mut program_file = tempfile::Builder::new()
.prefix("test_")
.suffix(".cairo")
.tempfile()
.unwrap();
fs::write(&mut program_file, program_str).unwrap();
init_dev_corelib(
&mut db,
Path::new(&var("CARGO_MANIFEST_DIR").unwrap()).join("corelib/src"),
);
let main_crate_ids = {
let main_crate_inputs = setup_project(&mut db, program_file.path()).unwrap();
CrateInput::into_crate_ids(&db, main_crate_inputs)
};
let contract = compile_contract_in_prepared_db(
&db,
None,
main_crate_ids,
CompilerConfig {
diagnostics_reporter: DiagnosticsReporter::stderr(),
replace_ids: true,
..Default::default()
},
)
.unwrap();
let module_name = program_file.path().with_extension("");
let module_name = module_name.file_name().unwrap().to_str().unwrap();
(module_name.to_string(), contract)
}
pub(crate) fn compile_program(program_str: &str, mut db: RootDatabase) -> (String, Program) {
let mut program_file = tempfile::Builder::new()
.prefix("test_")
.suffix(".cairo")
.tempfile()
.unwrap();
fs::write(&mut program_file, program_str).unwrap();
init_dev_corelib(
&mut db,
Path::new(&var("CARGO_MANIFEST_DIR").unwrap()).join("corelib/src"),
);
let main_crate_ids = {
let main_crate_inputs = setup_project(&mut db, program_file.path()).unwrap();
CrateInput::into_crate_ids(&db, main_crate_inputs)
};
let sierra_program_with_dbg = compile_prepared_db(
&db,
main_crate_ids,
CompilerConfig {
diagnostics_reporter: DiagnosticsReporter::stderr(),
replace_ids: true,
..Default::default()
},
)
.unwrap();
let module_name = program_file.path().with_extension("");
let module_name = module_name.file_name().unwrap().to_str().unwrap();
(module_name.to_string(), sierra_program_with_dbg.program)
}
pub fn run_program(
program: &(String, Program),
entry_point: &str,
args: &[Value],
) -> ExecutionResult {
let entry_point = format!("{0}::{0}::{1}", program.0, entry_point);
let program = &program.1;
let entry_point_id = &program
.funcs
.iter()
.find(|x| x.id.debug_name.as_deref() == Some(&entry_point))
.expect("Test program entry point not found.")
.id;
let context = NativeContext::new();
let module = context
.compile(program, false, Some(Default::default()), None)
.expect("Could not compile test program to MLIR.");
let executor = JitNativeExecutor::from_native_module(module, OptLevel::Less).unwrap();
executor
.invoke_dynamic_with_syscall_handler(
entry_point_id,
args,
Some(u64::MAX),
&mut StubSyscallHandler::default(),
)
.unwrap()
}
#[track_caller]
pub fn run_program_assert_output(
program: &(String, Program),
entry_point: &str,
args: &[Value],
output: Value,
) {
let result = run_program(program, entry_point, args);
assert_eq!(result.return_value, output);
}
/// Serializes a message into a vector of felts, the same way that Cairo
/// serializes byte arrays. Used for asserting panic message on tests.
///
/// https://github.com/starkware-libs/cairo/tree/v2.12.3/corelib/src/debug.cairo#L142
pub fn panic_byte_array(message: &str) -> Vec<Felt> {
// Prepend byte array magic, used to identify serialized `ByteArray` variables.
// https://github.com/starkware-libs/cairo/tree/v2.12.3/corelib/src/byte_array.cairo#L64
let mut array = vec![Felt::from_hex_unchecked(
"0x46a6158a16a947e5916b2a2ca68501a45e93d7110e81aa2d6438b1c57c879a3",
)];
let chunk_iter = message.bytes().chunks(31);
let mut chunks = chunk_iter.into_iter().collect_vec();
// Take last word as its serialized differently.
let pending = chunks
.pop()
.map(|pendign| pendign.collect_vec())
.unwrap_or_default();
// Serialize length of the byte array.
array.push(chunks.len().into());
// Serialize each byte array element.
for chunk in chunks {
let chunk = chunk.collect_vec();
array.push(Felt::from_bytes_be_slice(&chunk));
}
// Serialize last word with its length.
array.extend_from_slice(&[Felt::from_bytes_be_slice(&pending), pending.len().into()]);
array
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/range_ext.rs | src/utils/range_ext.rs | use cairo_lang_sierra::extensions::utils::Range;
use num_bigint::{BigInt, BigUint, Sign};
use num_traits::One;
pub trait RangeExt {
/// Width in bits when the offset is zero (aka. the natural representation).
fn zero_based_bit_width(&self) -> u32;
/// Width in bits when the offset is not necessarily zero (aka. the compact representation).
fn offset_bit_width(&self) -> u32;
}
impl RangeExt for Range {
fn zero_based_bit_width(&self) -> u32 {
// Formula for unsigned integers:
// x.bits()
//
// Formula for signed values:
// - Positive: (x.magnitude() + BigUint::one()).bits()
// - Negative: (x.magnitude() - BigUint::one()).bits() + 1
// - Zero: 0
let width = if self.lower.sign() == Sign::Minus {
let lower_width = (self.lower.magnitude() - BigUint::one()).bits() + 1;
let upper_width = {
let upper = &self.upper - &BigInt::one();
match upper.sign() {
Sign::Minus => (upper.magnitude() - BigUint::one()).bits() + 1,
Sign::NoSign => 0,
Sign::Plus => (upper.magnitude() + BigUint::one()).bits(),
}
};
lower_width.max(upper_width) as u32
} else {
(&self.upper - &BigInt::one()).bits() as u32
};
// FIXME: Workaround for segfault in canonicalization (including LLVM 19).
width.max(1)
}
fn offset_bit_width(&self) -> u32 {
// FIXME: Workaround for segfault in canonicalization (including LLVM 19).
((self.size() - BigInt::one()).bits() as u32).max(1)
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/utils/sierra_gen.rs | src/utils/sierra_gen.rs | #![cfg(test)]
use cairo_lang_sierra::{
extensions::{
branch_align::BranchAlignLibfunc,
core::CoreType,
enm::{EnumInitLibfunc, EnumType},
lib_func::{SierraApChange, SignatureSpecializationContext},
structure::{StructConstructLibfunc, StructType},
type_specialization_context::TypeSpecializationContext,
types::TypeInfo,
ConcreteType, GenericLibfunc, GenericType, NamedLibfunc, NamedType,
},
ids::{
ConcreteLibfuncId, ConcreteTypeId, FunctionId, GenericLibfuncId, GenericTypeId, UserTypeId,
VarId,
},
program::{
BranchInfo, BranchTarget, ConcreteLibfuncLongId, DeclaredTypeInfo, Function,
FunctionSignature, GenericArg, Invocation, LibfuncDeclaration, Param, Program, Statement,
StatementIdx, TypeDeclaration,
},
};
use std::{
cell::{OnceCell, RefCell},
iter::once,
marker::PhantomData,
};
#[derive(Debug)]
pub struct SierraGenerator<T>
where
T: GenericLibfunc,
{
program: Program,
phantom: PhantomData<T>,
}
impl<T> Default for SierraGenerator<T>
where
T: GenericLibfunc,
{
fn default() -> Self {
Self {
program: Program {
type_declarations: Vec::new(),
libfunc_declarations: Vec::new(),
statements: Vec::new(),
funcs: Vec::new(),
},
phantom: PhantomData,
}
}
}
impl<T> SierraGenerator<T>
where
T: GenericLibfunc,
{
pub fn build(self, generic_args: impl Into<Vec<GenericArg>>) -> Program {
match T::supported_ids().as_slice() {
[generic_id] => self.build_with_generic_id(generic_id.clone(), generic_args.into()),
_ => panic!("multiple generic ids detected, please use build_with_generic_id directly"),
}
}
pub fn build_with_generic_id(
mut self,
generic_id: GenericLibfuncId,
generic_args: impl Into<Vec<GenericArg>>,
) -> Program {
let generic_args = generic_args.into();
let libfunc = T::by_id(&generic_id).unwrap();
let libfunc_signature = libfunc
.specialize_signature(
&SierraGeneratorWrapper(RefCell::new(&mut self)),
&generic_args,
)
.unwrap();
// Push the libfunc declaration.
let libfunc_id = self
.push_libfunc_declaration(ConcreteLibfuncLongId {
generic_id,
generic_args: generic_args.to_vec(),
})
.clone();
// Generate packed types.
let num_builtins = libfunc_signature
.param_signatures
.iter()
.take_while(|param_signature| {
let long_id = &self
.program
.type_declarations
.iter()
.find(|type_declaration| type_declaration.id == param_signature.ty)
.unwrap()
.long_id;
matches!(
long_id.generic_id.0.as_str(),
"Bitwise"
| "EcOp"
| "GasBuiltin"
| "BuiltinCosts"
| "RangeCheck"
| "RangeCheck96"
| "Pedersen"
| "Poseidon"
| "Coupon"
| "System"
| "SegmentArena"
| "AddMod"
| "MulMod"
)
})
.count();
let mut return_types = Vec::with_capacity(libfunc_signature.branch_signatures.len());
let mut packed_unit_type_id = None;
for branch_signature in &libfunc_signature.branch_signatures {
assert!(branch_signature
.vars
.iter()
.zip(libfunc_signature.param_signatures.iter().take(num_builtins))
.all(|(lhs, rhs)| lhs.ty == rhs.ty));
return_types.push(match branch_signature.vars.len() - num_builtins {
0 => match libfunc_signature.branch_signatures.len() {
1 => ResultVarType::Empty(None),
_ => ResultVarType::Empty(Some(
packed_unit_type_id
.get_or_insert_with(|| {
self.push_type_declaration::<StructType>(&[GenericArg::UserType(
UserTypeId::from_string("Tuple"),
)])
.clone()
})
.clone(),
)),
},
1 => ResultVarType::Single(branch_signature.vars[num_builtins].ty.clone()),
_ => ResultVarType::Multi(
self.push_type_declaration::<StructType>(
once(GenericArg::UserType(UserTypeId::from_string("Tuple")))
.chain(
branch_signature
.vars
.iter()
.skip(num_builtins)
.map(|var_info| GenericArg::Type(var_info.ty.clone())),
)
.collect::<Vec<_>>(),
)
.clone(),
),
});
}
// Generate switch type.
let return_type = match return_types.len() {
1 => match return_types[0].clone() {
ResultVarType::Empty(ty) => ty.unwrap().clone(),
ResultVarType::Single(ty) => ty.clone(),
ResultVarType::Multi(ty) => ty.clone(),
},
_ => self
.push_type_declaration::<EnumType>(
once(GenericArg::UserType(UserTypeId::from_string("Tuple")))
.chain(return_types.iter().map(|ty| {
GenericArg::Type(match ty {
ResultVarType::Empty(ty) => ty.clone().unwrap(),
ResultVarType::Single(ty) => ty.clone(),
ResultVarType::Multi(ty) => ty.clone(),
})
}))
.collect::<Vec<_>>(),
)
.clone(),
};
// Generate function declaration.
self.program.funcs.push(Function {
id: FunctionId::new(0),
signature: FunctionSignature {
param_types: libfunc_signature
.param_signatures
.iter()
.map(|param_signature| param_signature.ty.clone())
.collect(),
ret_types: libfunc_signature.param_signatures[..num_builtins]
.iter()
.map(|param_signature| param_signature.ty.clone())
.chain(once(return_type.clone()))
.collect(),
},
params: libfunc_signature
.param_signatures
.iter()
.enumerate()
.map(|(idx, param_signature)| Param {
id: VarId::new(idx as u64),
ty: param_signature.ty.clone(),
})
.collect(),
entry_point: StatementIdx(0),
});
// Generate statements.
let mut libfunc_invocation = Invocation {
libfunc_id,
args: libfunc_signature
.param_signatures
.iter()
.enumerate()
.map(|(idx, _)| VarId::new(idx as u64))
.collect(),
branches: Vec::new(),
};
let branch_align_libfunc = OnceCell::new();
let construct_unit_libfunc = packed_unit_type_id.map(|ty| {
self.push_libfunc_declaration(ConcreteLibfuncLongId {
generic_id: GenericLibfuncId::from_string(StructConstructLibfunc::STR_ID),
generic_args: vec![GenericArg::Type(ty)],
})
.clone()
});
for (branch_index, branch_signature) in
libfunc_signature.branch_signatures.iter().enumerate()
{
if libfunc_signature.branch_signatures.len() > 1 {
let branch_align_libfunc_id = branch_align_libfunc
.get_or_init(|| {
self.push_libfunc_declaration(ConcreteLibfuncLongId {
generic_id: GenericLibfuncId::from_string(BranchAlignLibfunc::STR_ID),
generic_args: Vec::new(),
})
.clone()
})
.clone();
self.program
.statements
.push(Statement::Invocation(Invocation {
libfunc_id: branch_align_libfunc_id,
args: Vec::new(),
branches: vec![BranchInfo {
target: BranchTarget::Fallthrough,
results: Vec::new(),
}],
}));
}
let branch_target = match branch_index {
0 => BranchTarget::Fallthrough,
_ => {
let statement_idx = StatementIdx(self.program.statements.len());
BranchTarget::Statement(statement_idx)
}
};
// Maybe pack values.
match &return_types[branch_index] {
ResultVarType::Empty(Some(_)) => {
self.program
.statements
.push(Statement::Invocation(Invocation {
libfunc_id: construct_unit_libfunc.clone().unwrap(),
args: Vec::new(),
branches: vec![BranchInfo {
target: BranchTarget::Fallthrough,
results: vec![VarId::new(num_builtins as u64)],
}],
}));
}
ResultVarType::Multi(type_id) => {
let construct_libfunc_id = self
.push_libfunc_declaration(ConcreteLibfuncLongId {
generic_id: GenericLibfuncId::from_string(
StructConstructLibfunc::STR_ID,
),
generic_args: vec![GenericArg::Type(type_id.clone())],
})
.clone();
self.program
.statements
.push(Statement::Invocation(Invocation {
libfunc_id: construct_libfunc_id,
args: (num_builtins..branch_signature.vars.len())
.map(|x| VarId::new(x as u64))
.collect(),
branches: vec![BranchInfo {
target: BranchTarget::Fallthrough,
results: vec![VarId::new(num_builtins as u64)],
}],
}));
}
_ => {}
}
// Maybe enum values.
if libfunc_signature.branch_signatures.len() > 1 {
let enum_libfunc_id = self
.push_libfunc_declaration(ConcreteLibfuncLongId {
generic_id: GenericLibfuncId::from_string(EnumInitLibfunc::STR_ID),
generic_args: vec![
GenericArg::Type(return_type.clone()),
GenericArg::Value(branch_index.into()),
],
})
.clone();
self.program
.statements
.push(Statement::Invocation(Invocation {
libfunc_id: enum_libfunc_id,
args: vec![VarId::new(num_builtins as u64)],
branches: vec![BranchInfo {
target: BranchTarget::Fallthrough,
results: vec![VarId::new(num_builtins as u64)],
}],
}));
}
// Return.
self.program.statements.push(Statement::Return(
(0..=num_builtins).map(|x| VarId::new(x as u64)).collect(),
));
// Push the branch target.
libfunc_invocation.branches.push(BranchInfo {
target: branch_target,
results: branch_signature
.vars
.iter()
.enumerate()
.map(|(idx, _)| VarId::new(idx as u64))
.collect(),
});
}
self.program
.statements
.insert(0, Statement::Invocation(libfunc_invocation));
self.program
}
pub fn push_type_declaration<U>(
&mut self,
generic_args: impl Into<Vec<GenericArg>>,
) -> &ConcreteTypeId
where
U: NamedType,
{
self.push_type_declaration_with_generic_id::<U>(U::ID, generic_args)
}
pub fn push_type_declaration_with_generic_id<U>(
&mut self,
generic_id: GenericTypeId,
generic_args: impl Into<Vec<GenericArg>>,
) -> &ConcreteTypeId
where
U: GenericType,
{
let generic_args = generic_args.into();
let type_info = U::by_id(&generic_id)
.unwrap()
.specialize(&SierraGeneratorWrapper(RefCell::new(self)), &generic_args)
.unwrap()
.info()
.clone();
let current_index = self
.program
.type_declarations
.iter()
.enumerate()
.find_map(|(idx, type_decl)| (type_decl.long_id == type_info.long_id).then_some(idx));
let current_index = current_index.unwrap_or_else(|| {
let idx = self.program.type_declarations.len();
self.program.type_declarations.push(TypeDeclaration {
id: ConcreteTypeId::new(idx as u64),
long_id: type_info.long_id,
declared_type_info: Some(DeclaredTypeInfo {
storable: type_info.storable,
droppable: type_info.droppable,
duplicatable: type_info.duplicatable,
zero_sized: type_info.zero_sized,
}),
});
idx
});
&self.program.type_declarations[current_index].id
}
fn push_libfunc_declaration(&mut self, long_id: ConcreteLibfuncLongId) -> &ConcreteLibfuncId {
let id = ConcreteLibfuncId::new(self.program.libfunc_declarations.len() as u64);
self.program
.libfunc_declarations
.push(LibfuncDeclaration { id, long_id });
&self.program.libfunc_declarations.last().unwrap().id
}
}
struct SierraGeneratorWrapper<'a, T>(RefCell<&'a mut SierraGenerator<T>>)
where
T: GenericLibfunc;
impl<T> SignatureSpecializationContext for SierraGeneratorWrapper<'_, T>
where
T: GenericLibfunc,
{
fn try_get_concrete_type(
&self,
id: GenericTypeId,
generic_args: &[GenericArg],
) -> Option<ConcreteTypeId> {
Some(
self.0
.borrow_mut()
.push_type_declaration_with_generic_id::<CoreType>(id, generic_args)
.clone(),
)
}
fn try_get_function_signature(&self, _function_id: &FunctionId) -> Option<FunctionSignature> {
todo!()
}
fn try_get_function_ap_change(&self, _function_id: &FunctionId) -> Option<SierraApChange> {
todo!()
}
}
impl<T> TypeSpecializationContext for SierraGeneratorWrapper<'_, T>
where
T: GenericLibfunc,
{
fn try_get_type_info(&self, id: ConcreteTypeId) -> Option<TypeInfo> {
self.0
.borrow()
.program
.type_declarations
.iter()
.find_map(|type_declaration| {
(type_declaration.id == id).then(|| {
let declared_type_info = type_declaration.declared_type_info.as_ref().unwrap();
TypeInfo {
long_id: type_declaration.long_id.clone(),
storable: declared_type_info.storable,
droppable: declared_type_info.droppable,
duplicatable: declared_type_info.duplicatable,
zero_sized: declared_type_info.zero_sized,
}
})
})
}
}
#[derive(Clone)]
enum ResultVarType {
Empty(Option<ConcreteTypeId>),
Single(ConcreteTypeId),
Multi(ConcreteTypeId),
}
#[cfg(test)]
mod test {
use super::*;
use cairo_lang_sierra::extensions::{
array::ArrayNewLibfunc,
bounded_int::BoundedIntIsZeroLibfunc,
bytes31::Bytes31FromFelt252Trait,
int::{
signed::{Sint8Traits, SintDiffLibfunc},
unsigned::{Uint32Type, Uint64Traits, Uint8Type},
unsigned128::U128GuaranteeMulLibfunc,
IntConstLibfunc,
},
try_from_felt252::TryFromFelt252Libfunc,
};
#[test]
fn sierra_generator() {
let program = SierraGenerator::<IntConstLibfunc<Uint64Traits>>::default()
.build(&[GenericArg::Value(0.into())]);
println!("{program}");
}
#[test]
fn sierra_generator_multiret() {
let program = SierraGenerator::<U128GuaranteeMulLibfunc>::default().build(&[]);
println!("{program}");
}
#[test]
fn sierra_generator_multibranch() {
let program = SierraGenerator::<SintDiffLibfunc<Sint8Traits>>::default().build(&[]);
println!("{program}");
}
#[test]
fn sierra_generator_template() {
let program = {
let mut generator = SierraGenerator::<ArrayNewLibfunc>::default();
let u8_type = generator.push_type_declaration::<Uint8Type>(&[]).clone();
generator.build(&[GenericArg::Type(u8_type)])
};
println!("{program}");
}
#[test]
fn sierra_generator_type_info() {
let program = {
let mut generator = SierraGenerator::<BoundedIntIsZeroLibfunc>::default();
let u32_type = generator.push_type_declaration::<Uint32Type>(&[]).clone();
generator.build(&[GenericArg::Type(u32_type)])
};
println!("{program}");
}
#[test]
fn sierra_generator_branch_align() {
let program =
SierraGenerator::<TryFromFelt252Libfunc<Bytes31FromFelt252Trait>>::default().build(&[]);
println!("{program}");
}
#[test]
fn sierra_generator_type_generation() {
let mut generator =
SierraGenerator::<cairo_lang_sierra::extensions::array::ArrayGetLibfunc>::default();
let u32_ty = generator.push_type_declaration::<Uint32Type>(&[]).clone();
let array_ty = generator
.push_type_declaration::<cairo_lang_sierra::extensions::array::ArrayType>(&[
GenericArg::Type(u32_ty),
])
.clone();
let program = generator.build(&[GenericArg::Type(array_ty)]);
println!("{program}");
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/struct.rs | src/types/struct.rs | //! # Struct type
//!
//! A struct is just a fixed collection of values that may have different types, which are known at
//! compile-time. Its fields are properly aligned and respect the declaration's field ordering.
//!
//! For example, the following struct would have a layout as described in the table below:
//!
//! ```cairo
//! struct MyStruct {
//! U8: u8,
//! U16: u16,
//! U32: u32,
//! U64: u64,
//! Felt: Felt,
//! }
//! ```
//!
//! | Index | Type | ABI (in Rust types) | Alignment | Size |
//! | ----- | ------ | ------------------- | --------- | ---- |
//! | 0 | `i8` | `u8` | 1 | 1 |
//! | N/A | N/A | `[u8; 1]` | 1 | 1 |
//! | 1 | `i16` | `u16` | 2 | 2 |
//! | N/A | N/A | `[u8; 2]` | 1 | 2 |
//! | 2 | `i32` | `u32` | 4 | 4 |
//! | N/A | N/A | `[u8; 4]` | 1 | 4 |
//! | 3 | `i64` | `u64` | 8 | 8 |
//! | 4 | `i252` | `[u64; 4]` | 8 | 8 |
//!
//! As inferred in the table above, the struct will have 8-byte alignment and a size of 30 bytes.
//! Since this way of generating structs is equivalent to the one used in C and C++, the same
//! effects apply. For example, if we invert the order of the fields the ABI will change but we
//! won't waste a single byte in padding; unless we're creating an array, in which case we'd waste
//! only a single byte per element.
use super::WithSelf;
use crate::{
error::Result,
metadata::{
drop_overrides::DropOverridesMeta, dup_overrides::DupOverridesMeta, MetadataStorage,
},
utils::ProgramRegistryExt,
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
structure::StructConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{func, llvm},
helpers::{BuiltinBlockExt, LlvmBlockExt},
ir::{Block, BlockLike, Location, Module, Region, Type},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: WithSelf<StructConcreteType>,
) -> Result<Type<'ctx>> {
DupOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
// The following unwrap is unreachable because `register_with` will always insert it
// before calling this closure.
let mut needs_override = false;
for member in &info.members {
registry.build_type(context, module, metadata, member)?;
if DupOverridesMeta::is_overriden(metadata, member) {
needs_override = true;
break;
}
}
needs_override
.then(|| build_dup(context, module, registry, metadata, &info))
.transpose()
},
)?;
DropOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
// The following unwrap is unreachable because `register_with` will always insert it
// before calling this closure.
let mut needs_override = false;
for member in &info.members {
registry.build_type(context, module, metadata, member)?;
if DropOverridesMeta::is_overriden(metadata, member) {
needs_override = true;
break;
}
}
needs_override
.then(|| build_drop(context, module, registry, metadata, &info))
.transpose()
},
)?;
let members = info
.members
.iter()
.map(|member| registry.build_type(context, module, metadata, member))
.collect::<Result<Vec<_>>>()?;
Ok(llvm::r#type::r#struct(context, &members, false))
}
fn build_dup<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: &WithSelf<StructConcreteType>,
) -> Result<Region<'ctx>> {
let location = Location::unknown(context);
let self_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let region = Region::new();
let entry = region.append_block(Block::new(&[(self_ty, location)]));
let mut src_value = entry.arg(0)?;
let mut dst_value = entry.append_op_result(llvm::undef(self_ty, location))?;
for (idx, member_id) in info.members.iter().enumerate() {
let member_ty = registry.build_type(context, module, metadata, member_id)?;
let member_val = entry.extract_value(context, location, src_value, member_ty, idx)?;
let values = DupOverridesMeta::invoke_override(
context, registry, module, &entry, &entry, location, metadata, member_id, member_val,
)?;
src_value = entry.insert_value(context, location, src_value, values.0, idx)?;
dst_value = entry.insert_value(context, location, dst_value, values.1, idx)?;
}
entry.append_operation(func::r#return(&[src_value, dst_value], location));
Ok(region)
}
fn build_drop<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: &WithSelf<StructConcreteType>,
) -> Result<Region<'ctx>> {
let location = Location::unknown(context);
let self_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let region = Region::new();
let entry = region.append_block(Block::new(&[(self_ty, location)]));
let value = entry.arg(0)?;
for (idx, member_id) in info.members.iter().enumerate() {
let member_ty = registry.build_type(context, module, metadata, member_id)?;
let member_val = entry.extract_value(context, location, value, member_ty, idx)?;
DropOverridesMeta::invoke_override(
context, registry, module, &entry, &entry, location, metadata, member_id, member_val,
)?;
}
entry.append_operation(func::r#return(&[], location));
Ok(region)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/uint128_mul_guarantee.rs | src/types/uint128_mul_guarantee.rs | //! # Unsigned 128-bit multiplication guarantee type
use super::WithSelf;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
types::InfoOnlyConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::llvm,
ir::{r#type::IntegerType, Module, Type},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
_module: &Module<'ctx>,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
_metadata: &mut MetadataStorage,
_info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
Ok(llvm::r#type::array(IntegerType::new(context, 8).into(), 0))
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/circuit.rs | src/types/circuit.rs | //! # `Circuit` type
use std::alloc::Layout;
use super::WithSelf;
use crate::{
error::{Result, SierraAssertError},
metadata::{
drop_overrides::DropOverridesMeta, dup_overrides::DupOverridesMeta,
realloc_bindings::ReallocBindingsMeta, MetadataStorage,
},
utils::{get_integer_layout, layout_repeat, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
circuit::{CircuitTypeConcrete, ConcreteU96LimbsLessThanGuarantee},
core::{CoreLibfunc, CoreType, CoreTypeConcrete},
types::InfoOnlyConcreteType,
},
program::GenericArg,
program_registry::ProgramRegistry,
};
use melior::{
dialect::{func, llvm},
helpers::{ArithBlockExt, BuiltinBlockExt, LlvmBlockExt},
ir::{r#type::IntegerType, Block, BlockLike, Location, Module, Region, Type, Value},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
selector: WithSelf<CircuitTypeConcrete>,
) -> Result<Type<'ctx>> {
match &*selector {
CircuitTypeConcrete::CircuitModulus(_) => Ok(IntegerType::new(context, 384).into()),
CircuitTypeConcrete::U96Guarantee(_) => Ok(IntegerType::new(context, 96).into()),
CircuitTypeConcrete::CircuitInputAccumulator(info) => build_circuit_accumulator(
context,
module,
registry,
metadata,
WithSelf::new(selector.self_ty(), info),
),
CircuitTypeConcrete::CircuitData(info) => build_circuit_data(
context,
module,
registry,
metadata,
WithSelf::new(selector.self_ty(), info),
),
CircuitTypeConcrete::CircuitOutputs(info) => build_circuit_outputs(
context,
module,
registry,
metadata,
WithSelf::new(selector.self_ty(), info),
),
CircuitTypeConcrete::U96LimbsLessThanGuarantee(info) => {
build_u96_limbs_less_than_guarantee(
context,
module,
registry,
metadata,
WithSelf::new(selector.self_ty(), info),
)
}
// builtins
CircuitTypeConcrete::AddMod(_) | CircuitTypeConcrete::MulMod(_) => {
Ok(IntegerType::new(context, 64).into())
}
// noops
CircuitTypeConcrete::CircuitDescriptor(_)
| CircuitTypeConcrete::CircuitFailureGuarantee(_)
| CircuitTypeConcrete::CircuitPartialOutputs(_) => {
Ok(llvm::r#type::array(IntegerType::new(context, 8).into(), 0))
}
// phantoms
CircuitTypeConcrete::Circuit(_)
| CircuitTypeConcrete::AddModGate(_)
| CircuitTypeConcrete::SubModGate(_)
| CircuitTypeConcrete::MulModGate(_)
| CircuitTypeConcrete::InverseGate(_)
| CircuitTypeConcrete::CircuitInput(_) => {
Err(SierraAssertError::BadTypeInit(selector.self_ty.clone()))?
}
}
}
/// Builds the circuit accumulator type.
///
/// ## Layout:
///
/// Holds up to N_INPUTS elements. Where each element is an u384 integer.
///
/// ```txt
/// type = struct {
/// size: u64,
/// data: *u384,
/// }
/// ```
pub fn build_circuit_accumulator<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
let Some(GenericArg::Type(circuit_type_id)) = info.info.long_id.generic_args.first() else {
return Err(SierraAssertError::BadTypeInfo.into());
};
let CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(circuit)) =
registry.get_type(circuit_type_id)?
else {
return Err(SierraAssertError::BadTypeInfo.into());
};
DupOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
let location = Location::unknown(context);
let region = Region::new();
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let accumulator = entry.arg(0)?;
let inputs_ptr = entry.extract_value(
context,
location,
accumulator,
llvm::r#type::pointer(context, 0),
1,
)?;
let u384_layout = get_integer_layout(384);
let new_inputs_ptr = build_array_dup(
context,
&entry,
location,
inputs_ptr,
circuit.circuit_info.n_inputs,
u384_layout,
)?;
let new_accumulator =
entry.insert_value(context, location, accumulator, new_inputs_ptr, 1)?;
entry.append_operation(func::r#return(&[accumulator, new_accumulator], location));
Ok(Some(region))
},
)?;
DropOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
let location = Location::unknown(context);
let region = Region::new();
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let accumulator = entry.arg(0)?;
let inputs_ptr = entry.extract_value(
context,
location,
accumulator,
llvm::r#type::pointer(context, 0),
1,
)?;
entry.append_operation(ReallocBindingsMeta::free(context, inputs_ptr, location)?);
entry.append_operation(func::r#return(&[], location));
Ok(Some(region))
},
)?;
let fields = vec![
IntegerType::new(context, 64).into(),
llvm::r#type::pointer(context, 0),
];
Ok(llvm::r#type::r#struct(context, &fields, false))
}
/// Builds the circuit data type.
///
/// ## Layout:
///
/// Holds N_INPUTS elements. Where each element is an u384.
///
/// ```txt
/// type = *u384
/// ```
pub fn build_circuit_data<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
let Some(GenericArg::Type(circuit_type_id)) = info.info.long_id.generic_args.first() else {
return Err(SierraAssertError::BadTypeInfo.into());
};
let CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(circuit)) =
registry.get_type(circuit_type_id)?
else {
return Err(SierraAssertError::BadTypeInfo.into());
};
DupOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
let location = Location::unknown(context);
let region = Region::new();
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let data_ptr = entry.arg(0)?;
let u384_layout = get_integer_layout(384);
let new_data_ptr = build_array_dup(
context,
&entry,
location,
data_ptr,
circuit.circuit_info.n_inputs,
u384_layout,
)?;
entry.append_operation(func::r#return(&[data_ptr, new_data_ptr], location));
Ok(Some(region))
},
)?;
DropOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
let location = Location::unknown(context);
let region = Region::new();
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let data_ptr = entry.arg(0)?;
entry.append_operation(ReallocBindingsMeta::free(context, data_ptr, location)?);
entry.append_operation(func::r#return(&[], location));
Ok(Some(region))
},
)?;
Ok(llvm::r#type::pointer(context, 0))
}
/// Builds the circuit outputs type.
///
/// ## Layout:
///
/// Holds the evaluated circuit output gates and the circuit modulus.
/// - The data is stored as a dynamic array of u384 integers.
/// - The modulus is stored as a u384 in struct form (multi-limb).
///
/// ```txt
/// type = struct {
/// data: *u384,
/// modulus: u384struct,
/// };
///
/// u384struct = struct {
/// limb1: u96,
/// limb2: u96,
/// limb3: u96,
/// limb4: u96,
/// }
/// ```
pub fn build_circuit_outputs<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
let Some(GenericArg::Type(circuit_type_id)) = info.info.long_id.generic_args.first() else {
return Err(SierraAssertError::BadTypeInfo.into());
};
let CoreTypeConcrete::Circuit(CircuitTypeConcrete::Circuit(circuit)) =
registry.get_type(circuit_type_id)?
else {
return Err(SierraAssertError::BadTypeInfo.into());
};
DupOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
let location = Location::unknown(context);
let region = Region::new();
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let outputs = entry.arg(0)?;
let gates_ptr = entry.extract_value(
context,
location,
outputs,
llvm::r#type::pointer(context, 0),
0,
)?;
let u384_integer_layout = get_integer_layout(384);
let new_gates_ptr = build_array_dup(
context,
&entry,
location,
gates_ptr,
circuit.circuit_info.values.len(),
u384_integer_layout,
)?;
let new_outputs = entry.insert_value(context, location, outputs, new_gates_ptr, 0)?;
entry.append_operation(func::r#return(&[outputs, new_outputs], location));
Ok(Some(region))
},
)?;
DropOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
let location = Location::unknown(context);
let region = Region::new();
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let outputs = entry.arg(0)?;
let gates_ptr = entry.extract_value(
context,
location,
outputs,
llvm::r#type::pointer(context, 0),
0,
)?;
entry.append_operation(ReallocBindingsMeta::free(context, gates_ptr, location)?);
entry.append_operation(func::r#return(&[], location));
Ok(Some(region))
},
)?;
Ok(llvm::r#type::r#struct(
context,
&[
llvm::r#type::pointer(context, 0),
build_u384_struct_type(context),
],
false,
))
}
pub fn build_u96_limbs_less_than_guarantee<'ctx>(
context: &'ctx Context,
_module: &Module<'ctx>,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
_metadata: &mut MetadataStorage,
info: WithSelf<ConcreteU96LimbsLessThanGuarantee>,
) -> Result<Type<'ctx>> {
let limbs = info.inner.limb_count;
let u96_type = IntegerType::new(context, 96).into();
let limb_struct_type = llvm::r#type::r#struct(context, &vec![u96_type; limbs], false);
Ok(llvm::r#type::r#struct(
context,
&[limb_struct_type, limb_struct_type],
false,
))
}
pub fn build_array_dup<'ctx, 'this>(
context: &'ctx Context,
block: &'this Block<'ctx>,
location: Location<'ctx>,
ptr: Value<'ctx, 'this>,
capacity: usize,
layout: Layout,
) -> Result<Value<'ctx, 'this>> {
let capacity_bytes = layout_repeat(&layout, capacity)?.0.pad_to_align().size();
let capacity_bytes_value = block.const_int(context, location, capacity_bytes, 64)?;
let new_inputs_ptr = {
let ptr_ty = llvm::r#type::pointer(context, 0);
let new_inputs_ptr = block.append_op_result(llvm::zero(ptr_ty, location))?;
block.append_op_result(ReallocBindingsMeta::realloc(
context,
new_inputs_ptr,
capacity_bytes_value,
location,
)?)?
};
block.memcpy(context, location, ptr, new_inputs_ptr, capacity_bytes_value);
Ok(new_inputs_ptr)
}
pub const fn is_complex(info: &CircuitTypeConcrete) -> bool {
match *info {
CircuitTypeConcrete::AddMod(_)
| CircuitTypeConcrete::MulMod(_)
| CircuitTypeConcrete::AddModGate(_)
| CircuitTypeConcrete::SubModGate(_)
| CircuitTypeConcrete::MulModGate(_)
| CircuitTypeConcrete::U96Guarantee(_)
| CircuitTypeConcrete::InverseGate(_)
| CircuitTypeConcrete::U96LimbsLessThanGuarantee(_)
| CircuitTypeConcrete::CircuitModulus(_)
| CircuitTypeConcrete::CircuitInput(_)
| CircuitTypeConcrete::Circuit(_)
| CircuitTypeConcrete::CircuitDescriptor(_)
| CircuitTypeConcrete::CircuitFailureGuarantee(_) => false,
CircuitTypeConcrete::CircuitInputAccumulator(_)
| CircuitTypeConcrete::CircuitPartialOutputs(_)
| CircuitTypeConcrete::CircuitData(_)
| CircuitTypeConcrete::CircuitOutputs(_) => true,
}
}
pub const fn is_zst(info: &CircuitTypeConcrete) -> bool {
match *info {
CircuitTypeConcrete::AddModGate(_)
| CircuitTypeConcrete::SubModGate(_)
| CircuitTypeConcrete::MulModGate(_)
| CircuitTypeConcrete::CircuitInput(_)
| CircuitTypeConcrete::InverseGate(_)
| CircuitTypeConcrete::U96LimbsLessThanGuarantee(_)
| CircuitTypeConcrete::Circuit(_)
| CircuitTypeConcrete::CircuitDescriptor(_)
| CircuitTypeConcrete::CircuitFailureGuarantee(_) => true,
CircuitTypeConcrete::AddMod(_)
| CircuitTypeConcrete::CircuitModulus(_)
| CircuitTypeConcrete::U96Guarantee(_)
| CircuitTypeConcrete::MulMod(_)
| CircuitTypeConcrete::CircuitInputAccumulator(_)
| CircuitTypeConcrete::CircuitPartialOutputs(_)
| CircuitTypeConcrete::CircuitData(_)
| CircuitTypeConcrete::CircuitOutputs(_) => false,
}
}
pub fn layout(
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
info: &CircuitTypeConcrete,
) -> Result<Layout> {
match info {
CircuitTypeConcrete::AddMod(_) | CircuitTypeConcrete::MulMod(_) => {
Ok(get_integer_layout(64))
}
CircuitTypeConcrete::CircuitModulus(_) => Ok(get_integer_layout(384)),
CircuitTypeConcrete::U96Guarantee(_) => Ok(get_integer_layout(96)),
CircuitTypeConcrete::AddModGate(_)
| CircuitTypeConcrete::SubModGate(_)
| CircuitTypeConcrete::MulModGate(_)
| CircuitTypeConcrete::CircuitInput(_)
| CircuitTypeConcrete::InverseGate(_)
| CircuitTypeConcrete::U96LimbsLessThanGuarantee(_)
| CircuitTypeConcrete::Circuit(_)
| CircuitTypeConcrete::CircuitDescriptor(_)
| CircuitTypeConcrete::CircuitFailureGuarantee(_) => Ok(Layout::new::<()>()),
CircuitTypeConcrete::CircuitData(_) => Ok(Layout::new::<*mut ()>()),
CircuitTypeConcrete::CircuitOutputs(_) => {
let u384_struct_layout = layout_repeat(&get_integer_layout(96), 4)?.0;
let pointer_layout = Layout::new::<*mut ()>();
Ok(pointer_layout.extend(u384_struct_layout)?.0)
}
CircuitTypeConcrete::CircuitInputAccumulator(_) => {
let integer_layout = get_integer_layout(64);
let pointer_layout = Layout::new::<*mut ()>();
Ok(integer_layout.extend(pointer_layout)?.0)
}
CircuitTypeConcrete::CircuitPartialOutputs(_) => Ok(Layout::new::<()>()),
}
}
pub fn build_u384_struct_type(context: &Context) -> Type<'_> {
llvm::r#type::r#struct(
context,
&[
IntegerType::new(context, 96).into(),
IntegerType::new(context, 96).into(),
IntegerType::new(context, 96).into(),
IntegerType::new(context, 96).into(),
],
false,
)
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/bitwise.rs | src/types/bitwise.rs | //! # Bitwise type
//!
//! The bitwise type is used in the VM for computing bitwise operations. Since this can be done
//! natively in MLIR, this type is effectively an unit type.
use super::WithSelf;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
types::InfoOnlyConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
ir::{r#type::IntegerType, Module, Type},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
_module: &Module<'ctx>,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
_metadata: &mut MetadataStorage,
_info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
Ok(IntegerType::new(context, 64).into())
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/array.rs | src/types/array.rs | //! # Array type
//!
//! An array type is a dynamically allocated list of items.
//!
//! ## Layout
//!
//! Being dynamically allocated, we just need to keep the pointer to the data, its length and
//! its capacity:
//!
//! | Index | Type | Description |
//! | ----- | -------------- | ------------------------ |
//! | 0 | `!llvm.ptr<T>` | Pointer to the data[^1]. |
//! | 1 | `i32` | Array start offset[^2]. |
//! | 1 | `i32` | Array end offset[^2]. |
//! | 2 | `i32` | Allocated capacity[^2]. |
//!
//! The pointer to the allocation (which is **not the data**) contains:
//! 1. Reference counter.
//! 2. Padding.
//! 3. Array data. Its address is the pointer to the data stored in the type.
//!
//! [^1]: When capacity is zero, this field is not guaranteed to be valid.
//! [^2]: Those numbers are number of items, **not bytes**.
use super::{TypeBuilder, WithSelf};
use crate::{
error::Result,
metadata::{
drop_overrides::DropOverridesMeta, dup_overrides::DupOverridesMeta,
realloc_bindings::ReallocBindingsMeta, MetadataStorage,
},
utils::{get_integer_layout, ProgramRegistryExt},
};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
types::InfoAndTypeConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
dialect::{arith, llvm},
ir::{r#type::IntegerType, Block, Location, Module, Type},
Context,
};
use melior::{
dialect::{arith::CmpiPredicate, func, scf},
ir::BlockLike,
};
use melior::{
helpers::{ArithBlockExt, BuiltinBlockExt, GepIndex, LlvmBlockExt},
ir::Region,
};
use std::alloc::Layout;
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: WithSelf<InfoAndTypeConcreteType>,
) -> Result<Type<'ctx>> {
DupOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
// There's no need to build the type here because it'll always be built within
// `build_dup`.
Ok(Some(build_dup(context, module, registry, metadata, &info)?))
},
)?;
DropOverridesMeta::register_with(
context,
module,
registry,
metadata,
info.self_ty(),
|metadata| {
// There's no need to build the type here because it'll always be built within
// `build_drop`.
Ok(Some(build_drop(
context, module, registry, metadata, &info,
)?))
},
)?;
let ptr_ty = llvm::r#type::pointer(context, 0);
let len_ty = IntegerType::new(context, 32).into();
Ok(llvm::r#type::r#struct(
context,
&[ptr_ty, len_ty, len_ty, len_ty],
false,
))
}
/// This function clones the array shallowly. That is, it'll increment the reference counter but not
/// actually clone anything. The deep clone implementation is provided in `src/libfuncs/array.rs` as
/// part of some libfuncs's implementations.
fn build_dup<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: &WithSelf<InfoAndTypeConcreteType>,
) -> Result<Region<'ctx>> {
let location = Location::unknown(context);
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let elem_layout = registry.get_type(&info.ty)?.layout(registry)?;
let refcount_offset = calc_data_prefix_offset(elem_layout);
let region = Region::new();
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let array_cap = entry.extract_value(
context,
location,
entry.argument(0)?.into(),
IntegerType::new(context, 32).into(),
3,
)?;
let k0 = entry.const_int(context, location, 0, 32)?;
let is_empty = entry.append_op_result(arith::cmpi(
context,
CmpiPredicate::Eq,
array_cap,
k0,
location,
))?;
entry.append_operation(scf::r#if(
is_empty,
&[],
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(&[], location));
region
},
{
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let array_ptr_ptr = block.extract_value(
context,
location,
entry.argument(0)?.into(),
llvm::r#type::pointer(context, 0),
0,
)?;
let array_ptr = block.load(
context,
location,
array_ptr_ptr,
llvm::r#type::pointer(context, 0),
)?;
let refcount_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(-(refcount_offset as i32))],
IntegerType::new(context, 8).into(),
)?;
let ref_count = block.load(
context,
location,
refcount_ptr,
IntegerType::new(context, 32).into(),
)?;
let k1 = block.const_int(context, location, 1, 32)?;
let ref_count = block.append_op_result(arith::addi(ref_count, k1, location))?;
block.store(context, location, refcount_ptr, ref_count)?;
block.append_operation(scf::r#yield(&[], location));
region
},
location,
));
entry.append_operation(func::r#return(
&[entry.argument(0)?.into(), entry.argument(0)?.into()],
location,
));
Ok(region)
}
/// This function decreases the reference counter of the array by one.
/// If the reference counter reaches zero, then all the resources are freed.
fn build_drop<'ctx>(
context: &'ctx Context,
module: &Module<'ctx>,
registry: &ProgramRegistry<CoreType, CoreLibfunc>,
metadata: &mut MetadataStorage,
info: &WithSelf<InfoAndTypeConcreteType>,
) -> Result<Region<'ctx>> {
let location = Location::unknown(context);
if metadata.get::<ReallocBindingsMeta>().is_none() {
metadata.insert(ReallocBindingsMeta::new(context, module));
}
let value_ty = registry.build_type(context, module, metadata, info.self_ty())?;
let elem_ty = registry.get_type(&info.ty)?;
let elem_stride = elem_ty.layout(registry)?.pad_to_align().size();
let elem_ty = elem_ty.build(context, module, registry, metadata, &info.ty)?;
let elem_layout = registry.get_type(&info.ty)?.layout(registry)?;
let refcount_offset = calc_data_prefix_offset(elem_layout);
let region = Region::new();
let entry = region.append_block(Block::new(&[(value_ty, location)]));
let array_ptr_ptr = entry.extract_value(
context,
location,
entry.argument(0)?.into(),
llvm::r#type::pointer(context, 0),
0,
)?;
let array_cap = entry.extract_value(
context,
location,
entry.argument(0)?.into(),
IntegerType::new(context, 32).into(),
3,
)?;
let k0 = entry.const_int(context, location, 0, 32)?;
let zero_capacity = entry.append_op_result(arith::cmpi(
context,
CmpiPredicate::Eq,
array_cap,
k0,
location,
))?;
entry.append_operation(scf::r#if(
zero_capacity,
&[],
{
// if the array has no capacity, do nothing, as there is no allocation
let region = Region::new();
let block = region.append_block(Block::new(&[]));
block.append_operation(scf::r#yield(&[], location));
region
},
{
// if the array has capacity, decrease the reference counter
// and, in case it reaches zero, free all the resources.
let region = Region::new();
let block = region.append_block(Block::new(&[]));
// obtain the reference counter
let array_ptr = block.load(
context,
location,
array_ptr_ptr,
llvm::r#type::pointer(context, 0),
)?;
let refcount_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(-(refcount_offset as i32))],
IntegerType::new(context, 8).into(),
)?;
let ref_count = block.load(
context,
location,
refcount_ptr,
IntegerType::new(context, 32).into(),
)?;
// if the reference counter is greater than 1, then it's shared
let k1 = block.const_int(context, location, 1, 32)?;
let is_shared = block.append_op_result(arith::cmpi(
context,
CmpiPredicate::Ne,
ref_count,
k1,
location,
))?;
block.append_operation(scf::r#if(
is_shared,
&[],
{
// if the array is shared, decrease the reference counter by one
let region = Region::new();
let block = region.append_block(Block::new(&[]));
let ref_count = block.append_op_result(arith::subi(ref_count, k1, location))?;
block.store(context, location, refcount_ptr, ref_count)?;
block.append_operation(scf::r#yield(&[], location));
region
},
{
// if the array is not shared, drop all elements and free the memory
let region = Region::new();
let block = region.append_block(Block::new(&[]));
if DropOverridesMeta::is_overriden(metadata, &info.ty) {
let k0 = block.const_int(context, location, 0, 64)?;
let elem_stride = block.const_int(context, location, elem_stride, 64)?;
let max_len_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Const(
-((refcount_offset - size_of::<u32>()) as i32),
)],
IntegerType::new(context, 8).into(),
)?;
let max_len = block.load(
context,
location,
max_len_ptr,
IntegerType::new(context, 32).into(),
)?;
let max_len =
block.extui(max_len, IntegerType::new(context, 64).into(), location)?;
let offset_end = block.muli(max_len, elem_stride, location)?;
// Drop each element in the array.
block.append_operation(scf::r#for(
k0,
offset_end,
elem_stride,
{
let region = Region::new();
let block = region.append_block(Block::new(&[(
IntegerType::new(context, 64).into(),
location,
)]));
let elem_offset = block.argument(0)?.into();
let elem_ptr = block.gep(
context,
location,
array_ptr,
&[GepIndex::Value(elem_offset)],
IntegerType::new(context, 8).into(),
)?;
let elem_val = block.load(context, location, elem_ptr, elem_ty)?;
DropOverridesMeta::invoke_override(
context, registry, module, &block, &block, location, metadata,
&info.ty, elem_val,
)?;
block.append_operation(scf::r#yield(&[], location));
region
},
location,
));
}
// finally, free the array allocation
block.append_operation(ReallocBindingsMeta::free(
context,
refcount_ptr,
location,
)?);
block.append_operation(ReallocBindingsMeta::free(
context,
array_ptr_ptr,
location,
)?);
block.append_operation(scf::r#yield(&[], location));
region
},
location,
));
block.append_operation(scf::r#yield(&[], location));
region
},
location,
));
entry.append_operation(func::r#return(&[], location));
Ok(region)
}
pub fn calc_data_prefix_offset(layout: Layout) -> usize {
get_integer_layout(32)
.extend(get_integer_layout(32))
.expect("creating a layout of two i32 should never fail")
.0
.align_to(layout.align())
.expect("layout size rounded up to the next multiple of layout alignment should never be greater than ISIZE::MAX")
.pad_to_align()
.size()
}
#[cfg(test)]
mod test {
use crate::{load_cairo, utils::testing::run_program, values::Value};
use pretty_assertions_sorted::assert_eq;
#[test]
fn test_array_snapshot_deep_clone() {
let program = load_cairo! {
fn run_test() -> @Array<Array<felt252>> {
let mut inputs: Array<Array<felt252>> = ArrayTrait::new();
inputs.append(array![1, 2, 3]);
inputs.append(array![4, 5, 6]);
@inputs
}
};
let result = run_program(&program, "run_test", &[]).return_value;
assert_eq!(
result,
Value::Array(vec![
Value::Array(vec![
Value::Felt252(1.into()),
Value::Felt252(2.into()),
Value::Felt252(3.into()),
]),
Value::Array(vec![
Value::Felt252(4.into()),
Value::Felt252(5.into()),
Value::Felt252(6.into()),
]),
]),
);
}
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/gas_builtin.rs | src/types/gas_builtin.rs | //! # Gas builtin type
//!
//! The gas builtin is just a number indicating how many
//! gas units currently remain.
use super::WithSelf;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
types::InfoOnlyConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
ir::{r#type::IntegerType, Module, Type},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
_module: &Module<'ctx>,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
_metadata: &mut MetadataStorage,
_info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
Ok(IntegerType::new(context, 64).into())
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/uint8.rs | src/types/uint8.rs | //! # Unsigned 8-bit integer type
use super::WithSelf;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
types::InfoOnlyConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
ir::{r#type::IntegerType, Module, Type},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
_module: &Module<'ctx>,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
_metadata: &mut MetadataStorage,
_info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
Ok(IntegerType::new(context, 8).into())
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
lambdaclass/cairo_native | https://github.com/lambdaclass/cairo_native/blob/f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce/src/types/range_check.rs | src/types/range_check.rs | //! # Builtin costs type
//!
//! The range check type is used in the VM for checking whether values are in a specific range.
//! Since this can be done natively in MLIR, this type is effectively an unit type.
use super::WithSelf;
use crate::{error::Result, metadata::MetadataStorage};
use cairo_lang_sierra::{
extensions::{
core::{CoreLibfunc, CoreType},
types::InfoOnlyConcreteType,
},
program_registry::ProgramRegistry,
};
use melior::{
ir::{r#type::IntegerType, Module, Type},
Context,
};
/// Build the MLIR type.
///
/// Check out [the module](self) for more info.
pub fn build<'ctx>(
context: &'ctx Context,
_module: &Module<'ctx>,
_registry: &ProgramRegistry<CoreType, CoreLibfunc>,
_metadata: &mut MetadataStorage,
_info: WithSelf<InfoOnlyConcreteType>,
) -> Result<Type<'ctx>> {
Ok(IntegerType::new(context, 64).into())
}
| rust | Apache-2.0 | f0a4fdbbad8a1730dea3b20c0b2ea140b9b853ce | 2026-01-04T20:20:54.031924Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.